pax_global_header00006660000000000000000000000064143737752750014536gustar00rootroot0000000000000052 comment=196dd43efe93bfae589eaa97223092f5a8efc942 capirca-2.0.9/000077500000000000000000000000001437377527500131505ustar00rootroot00000000000000capirca-2.0.9/.github/000077500000000000000000000000001437377527500145105ustar00rootroot00000000000000capirca-2.0.9/.github/workflows/000077500000000000000000000000001437377527500165455ustar00rootroot00000000000000capirca-2.0.9/.github/workflows/docker.yml000066400000000000000000000016601437377527500205420ustar00rootroot00000000000000name: Docker Publish on: push: branches: - master env: IMAGE_NAME: capirca jobs: build: name: "Build Capirca container" runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: Log into registry run: | echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin - name: Build and tag the image run: docker build . --tag $IMAGE_NAME - name: Push the image to the container registry run: | IMAGE_ID="docker.pkg.github.com/${{ github.repository }}/$IMAGE_NAME" # Change all uppercase to lowercase IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') VERSION=$(cat VERSION) docker tag $IMAGE_NAME $IMAGE_ID:latest docker tag $IMAGE_NAME $IMAGE_ID:$VERSION docker push $IMAGE_ID:latest docker push $IMAGE_ID:$VERSION capirca-2.0.9/.github/workflows/main.yml000066400000000000000000000070751437377527500202250ustar00rootroot00000000000000--- name: Python package on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: timeout-minutes: 20 strategy: fail-fast: true matrix: python-version: [3.6, 3.7, 3.8, 3.9] include: - os-version: ubuntu-latest - python-version: 3.6 os-version: ubuntu-20.04 runs-on: ${{ matrix.os-version }} steps: - name: Checkout branch with changes uses: actions/checkout@v2 with: path: current - name: Checkout master branch uses: actions/checkout@v2 with: path: master ref: master - name: Set up Node.js uses: actions/setup-node@v2 with: node-version: '14' - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Setup environment variables run: | mkdir -p artifacts echo "wfdt=$(date +'%Y%m%d_%H%M%S')" >> $GITHUB_ENV - name: Install dependencies run: | sudo apt update sudo apt install unzip zip python -m pip install --upgrade pip python -m pip install setuptools wheel python -m pip install flake8 pytest cd current if [ -f requirements.txt ]; then pip install -r requirements.txt; fi if [ -f test-requirements.txt ]; then pip install -r test-requirements.txt; fi - name: Lint with flake8 run: | cd current flake8 . --count --select=W291,W293,W391 --statistic flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics flake8 . --count --exit-zero --max-complexity=10 \ --max-line-length=127 --statistics - name: Test with pytest run: | cd current pytest - name: Perform end-to-end testing with current branch or pull request run: | cd current git status python setup.py sdist bdist_wheel python3 -m pip -v install dist/capirca*py3*.whl aclgen --output_directory ./output --logtostderr cd ./output/ && \ zip -r ../../artifacts/capirca_output_${{ env.wfdt }}.zip . cd .. python3 -m pip -v uninstall -y capirca - name: Perform end-to-end testing with master branch run: | cd master git status python setup.py sdist bdist_wheel python3 -m pip -v install dist/capirca*py3*.whl aclgen --output_directory ./output --logtostderr python3 -m pip -v uninstall -y capirca - name: Compare output files between the branches run: | mkdir -p artifacts-diff sudo npm install -g diff2html diff2html-cli diff2html --version diff -qr current/output master/output > \ ./artifacts-diff/policy_output.diff | true cat ./artifacts-diff/policy_output.diff | grep Files | grep differ \ | cut -d" " -f2 | cut -d "/" -f3 > ./artifacts-diff/files.list while read p; do diff -u master/output/$p current/output/$p | \ diff2html -i stdin --file ./artifacts-diff/$p.html | \ true; done < ./artifacts-diff/files.list sed -i '/Diff to HTML by/d' ./artifacts-diff/* - name: Upload generated policies uses: actions/upload-artifact@v2 with: name: capirca_output_${{ matrix.python-version }}_${{ env.wfdt }} path: ./artifacts/capirca_output_${{ env.wfdt }}.zip - name: Upload policy differences uses: actions/upload-artifact@v2 with: name: capirca_output_policy_diff path: ./artifacts-diff capirca-2.0.9/.github/workflows/pypi_release.yml000066400000000000000000000011301437377527500217440ustar00rootroot00000000000000name: Upload Python Package on: release: types: [created] jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: '3.x' - name: Install dependencies run: | python -m pip install --upgrade pip pip install setuptools wheel twine - name: Build and publish env: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_APIKEY }} run: | python setup.py sdist bdist_wheel twine upload dist/* capirca-2.0.9/.github/workflows/windows.yml000066400000000000000000000032561437377527500207700ustar00rootroot00000000000000--- name: Python Package for Windows on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: windows-latest timeout-minutes: 20 strategy: fail-fast: true matrix: python-version: [3.8, 3.9] steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v2 with: path: ~\AppData\Local\pip\Cache key: ${{ runner.os }}-${{ matrix.python-version}}-pip-test-${{ hashFiles('setup.py','requirements.txt','test-requirements.txt') }} restore-keys: | ${{ runner.os }}-${{ matrix.python-version}}-pip-test- ${{ runner.os }}-${{ matrix.python-version}}-pip- ${{ runner.os }}-${{ matrix.python-version}}- - name: Add msbuild to PATH uses: microsoft/setup-msbuild@v1.0.2 - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install setuptools wheel python -m pip install flake8 pytest python -m pip install -r test-requirements.txt python -m pip install -r requirements.txt - name: Build, install, and run run: | python setup.py sdist bdist_wheel python -m pip -v install --find-links=dist --no-index capirca aclgen --output_directory .\output --logtostderr powershell Compress-Archive -Force output\* output.zip - name: Upload generated policies uses: actions/upload-artifact@v2 with: name: capirca_output_${{ matrix.python-version }} path: ./output.zip capirca-2.0.9/.gitignore000066400000000000000000000042341437377527500151430ustar00rootroot00000000000000# Generated files filters/sample_* sample_* def/AUTOGEN.net tests/characterization_data/filters_actual tools/new_lint_errors.txt # Recommended python excludes # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ output/ capirca-2.0.9/.pre-commit-config.yaml000066400000000000000000000005751437377527500174400ustar00rootroot00000000000000repos: - repo: https://gitlab.com/pycqa/flake8 rev: '3.8.3' hooks: - id: flake8 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.1.0 hooks: - id: check-executables-have-shebangs - id: debug-statements - id: detect-private-key - id: end-of-file-fixer - id: requirements-txt-fixer - id: trailing-whitespace capirca-2.0.9/.travis.yml000066400000000000000000000004161437377527500152620ustar00rootroot00000000000000language: python python: - "3.6" - "3.7" # command to install dependencies install: "pip install -r requirements.txt -r test-requirements.txt . && pip install flake8" # command to run tests script: - pytest - flake8 . --count --select=W291,W293,W391 --statistic capirca-2.0.9/AUTHORS000066400000000000000000000004771437377527500142300ustar00rootroot00000000000000# This is the official list of Capirca authors for copyright purposes. # This file is distinct from the CONTRIBUTORS files. # See the latter for an explanation. # Names should be added to this file as: # Name or Organization # The email address is not required for organizations. Google Inc VMWare Inc capirca-2.0.9/CONTRIBUTING.md000066400000000000000000000017771437377527500154150ustar00rootroot00000000000000# Contributing Guidelines Before contributing to Capirca please take into mind the following. ## Contributors License Agreements We require everyone who submits code to us to sign the [Contributors License Agreement](https://cla.developers.google.com/clas). Please take the time to sign this before sending us a Pull Request. ## Code Readability Please take time to become familiar with the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). We try to adhere to the rules in the guide as much as possible. To help with that you can use our [lint tool](https://github.com/google/capirca/blob/master/tools/run_lint.sh). This runs pylint over the files and will display new lint errors. This will not catch certain style choices but it will find a majority of problems that are easy to fix. If you feel a lint error is incorrect let us know and we can suppress it. ## Development Environment Run the following script to install `capirca` in development environment. ```bash dev-install ``` capirca-2.0.9/Dockerfile000066400000000000000000000002511437377527500151400ustar00rootroot00000000000000FROM python:3.6-alpine WORKDIR /app COPY requirements.txt . RUN pip install -r requirements.txt COPY . /app RUN pip install . WORKDIR /data ENTRYPOINT ["aclgen"] capirca-2.0.9/LICENSE000066400000000000000000000261361437377527500141650ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. capirca-2.0.9/MANIFEST.in000066400000000000000000000001161437377527500147040ustar00rootroot00000000000000include AUTHORS include LICENSE include README.md include aclcheck_cmdline.py capirca-2.0.9/README.md000066400000000000000000000750671437377527500144460ustar00rootroot00000000000000# capirca [![BuildStatus](https://travis-ci.org/google/capirca.svg?branch=master)](https://travis-ci.org/google/capirca) - [About](#about) - [The basics](#the-basics) - [Anatomy of a policy file](#anatomy-of-a-policy-file) - [Headers](#headers) - [Terms](#terms) - [Tokens](#tokens) - [Keywords](#keywords) - [Required](#required) - [Optional](#optional) - [Includes](#includes) - [Example](#example) - [Term Keywords By Generator](#term-keywords-by-generator) - [Term Examples](#term-examples) - [Example Policy File](#example-policy-file) - [Getting Started](#getting-started) - [Installation](#installation) - [From Source](#from-source) - [Package Manager](#package-manager) - [Basic Usage](#basic-usage) - [Python Package](#python-package) - [Running with Docker](#running-with-docker) - [Miscellaneous](#miscellaneous) ## About Capirca is designed to utilize common definitions of networks, services and high-level policy files to facilitate the development and manipulation of network access control lists (ACLs) for various platforms. It was developed by Google for internal use, and is now open source. Capirca consist of `capirca` Python package and the `capirca` tool. The typical usage workflow consists of the following steps: 1. Create **object definitions** containing "network" and "service" definitions 1. Create a **access control policy** defining the desired state of access control and referencing the **object definitions** together with desired firewall platforms 1. Generate ACL configurations by running `capirca` command referencing the access control policy and the object definitions. The command triggers a **generator** for each of the firewall platforms. ## The basics At a high-level capirca rationalizes objects (networks, services) against a security definition (.pol file) to generate a specific device configuration file via a platform specific **ACL generator**. Before getting started some objects must exist, the below table summarizes where objects are stored: path | description ----------------- | ----------------------------------------- /def/NETWORK.net | a list of **network objects** definitions /def/SERVICES.svc | a list of **service objects** definitions Each network or service definition file has a very simple structure. A token is defined, e.g. `GUEST_NET`, followed by an equal sign, then followed by a definition, e.g. `10.10.10.0/24`, and optional description field, e.g. `# guest network range`. ``` GUEST_NET = 10.10.10.0/24 # guest network range ``` The tool populates the **access control policy** from `.pol` files in a particular directory, e.g. [`policies/`](./policies/). The tool searches recursively for `.pol` files and add them to the policy, .e.g `.pol` files are located in [`policies/pol`](./policies/pol). Additionally, the `.pol` files MAY reference other policy definition files located outside of the directory by using `include` directive. Please see [Includes](#includes) section for documentation. ### Network Objects The files with `.net` extension contain the definitions of network objects, e.g. IP networks and hosts. The following definition creates `INTERNAL` and `RFC1918` network objects in the object definitions, whether `INTERNAL` references the IP ranges of RFC 1918 defined in the `RFC1918`. ``` RFC1918 = 10.0.0.0/8 # non-public 172.16.0.0/12 # non-public 192.168.0.0/16 # non-public INTERNAL = RFC1918 ``` [Back to Top](#table-of-contents) ### Service Objects The files with `.svc` extension contain the definitions of service objects, e.g. ports and protocols. ``` DNS = 53/tcp # transfers 53/udp # queries ``` [Back to Top](#table-of-contents) ### Object Nesting The nesting of tokens is permitted only when both tokens are of the same type. The referencing of a "network" object by "service" object is not allowed, and vice versa. The examples of nesting of the network and service object follow. ``` HTTP = 80/tcp # common web HTTPS = 443/tcp # SSL web HTTP_8080 = 8080/tcp # web on non-standard port WEB_SERVICES = HTTP HTTP_8080 HTTPS # all our web services DB_SERVICES = 3306/tcp # allow db access HTTPS # and SSL access NYC_NETWORK = 200.1.1.0/24 # New York office ATL_NETWORK = 200.2.1.0/24 # Atlanta office DEN_NETWORK = 200.5.1.0/24 # Denver office REMOTE_OFFICES = NYC_NETWORK ATL_NETWORK DEN_NETWORK ``` The network objects may reference both IPv4 and IPv6 addresses at the same time. ``` LOOPBACK = 127.0.0.1/32 # loopback in IPv4 ::1/128 # loopback in IPv6 LINKLOCAL = FE80::/10 # IPv6 link local address NYC_NETWORK = 172.16.1.0/24 # NYC IPv4 2620:0:10A1::/48 # NYC IPv6 ``` [Back to Top](#table-of-contents) ### Anatomy of a policy file A policy file (/policies/pol/something.pol) has the security policy written using capirca specific meta-language and format. There are specific sections (e.g: header) that tell capirca how to generate the output configuration of the security policy. #### Headers The header section defines: * **target** firewall platforms (which ACL generator to use) * passes **additional arguments** to the generator responsible for that platform. A single header may have many targets within a section. It will result in multiple outputs being generated for that policy. #### Terms The **term** sections defines the access control rules within an ACL, it contains keywords followed by an object (service or network) and policy decision ("action" keyword). The term section specifies the network flow metadata for ACL matching. * Addresses * Ports * Protocols * Action (allow/deny) Inside a `term` a mandatory keyword will be found followed by an object token for rule evaluation. #### Tokens Tokens are the names of services and networks loaded from the object definitions. Example: token_name | definition ------------- | -------------- "HTTPS" | 443 "NYC_NETWORK" | 192.168.0.0/24 ### Keywords | keyword | description | | -------- | ---------------------------------------------------------------- | | required | **must be supported by all output policy generators** | | optional | available in a subset of the generators and are intended to | : : provide additional flexibility when developing policies for that : : : specific target platform : #### Required * **action** * accept * deny * reject * next * reject-with-tcp-rst * **comment** * a text comment enclosed in double-quotes. Comments may span multiple lines if desired. * **destination-address** * one or more destination address tokens. * **destination-exclude** * exclude one or more address tokens from the specified destination-address. * **destination-port** * one or more service definition tokens. * **icmp-type** * specific icmp-type code to match (IPv4/IPv6 types vary). * IPv4: * echo-reply * unreachable * source-quench * redirect * alternate-address * echo-request * router-advertisement * router-solicitation * time-exceeded * parameter-problem * timestamp-request * timestamp-reply * information-request * information-reply * mask-request * mask-reply * conversion-error * mobile-redirect * IPv6: * destination-unreachable * packet-too-big * time-exceeded * parameter-problem * echo-request * echo-reply * multicast-listener-query * multicast-listener-report * multicast-listener-done * router-solicit * router-advertisement * neighbor-solicit * neighbor-advertisement * redirect-message * router-renumbering * icmp-node-information-query * icmp-node-information-response * inverse-neighbor-discovery-solicitation * inverse-neighbor-discovery-advertisement * version-2-multicast-listener-report * home-agent-address-discovery-request * home-agent-address-discovery-reply * mobile-prefix-solicitation * mobile-prefix-advertisement * certification-path-solicitation * certification-path-advertisement * multicast-router-advertisement * multicast-router-solicitation * multicast-router-termination * **option** * connection options. * **established** * only permit established connections; implements tcp-established flag if protocol is tcp only, otherwise adds 1024-65535 to required destination-ports. * **tcp-established** * only permit established tcp connections, usually checked based on TCP flag settings. If protocol UDP is included in term, only adds 1024-65535 to required destination-ports. * **sample** * not supported by all generators. Samples traffic for netflow. * **intial** * currently only supported by juniper generator. Appends tcp-initial to the term. * **rst** * currently only supported by juniper generator. Appends "tcp-flags rst" to the term. * **first-fragment** * currently only supported by juniper generator. Appends 'first-fragment' to the term. * **protocol** * network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * **protocol-except** * network protocols that should be excluded from the protocol specification. This is rarely used. * **source-address** * one or more source address tokens. * **source-exclude** * exclude one or more address tokens from the specified source-address. * **source-port** * one or more service definition tokens. * **verbatim** * this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. #### Optional WARNING: These terms may or may not function properly on all generators. Always refer to the generator specific documentation and code base. * **address** * one or more network address tokens matches either source or destination. * **counter** * (Juniper only) enable filter-based generic routing encapsulation (GRE) tunneling using the specified tunnel template. * **destination-prefix** * (Juniper only) specify destination-prefix matching (e.g. source-prefix` configured-neighbors-only). * **ether-type** * (Juniper only) specify matching ether-type(e.g. ether-type` arp). * **fragement-offset** * (Juniper only) specify a fragment offset of a fragmented packet. * **logging** * (Juniper, speedway/iptables) specify that this packet should be logged via syslog. * **loss-priority** * (Juniper only) specify loss priority. * **packet-length** * (Juniper only) specify packet length. * **policer** * (Juniper only) specify which policer to apply to matching packets. * **precedence** * (Juniper only) specify precendence. * **qos** * (Juniper only) apply quality of service classification to matching packets (e.g. qos` af4). * **routing-instance** * (iptables, speedway only) specify specific interface a term should apply to (e.g. source-interface` eth3). * **source-prefix** * (Juniper only) specify source-prefix matching (e.g. source-prefix, configured-neighbors-only). * **traffic-type** * (Juniper only) specify traffic-type. ### Includes Policy files support the use of `#include` statements. An include may be used to avoid duplication of commonly used text, such as a group of terms that permit or block specific types of traffic. An include directive will result in the contents of the included file being injected into the current policy file in the exact location of the `#include` directive. An example include: ``` #include 'includes/untrusted-networks-blocking.inc' ``` NOTE: Includes are only read from the subdirectories of your base_directory, all other directories will error out. The `.inc` file extension and the `includes/` folder is not required but it is recommended to be used as a best practice and for easier readability. ### Example WARNING: Not all generators have the same configuration options or feature set. ``` header { target:: paloalto from-zone internal to-zone external } term ping-gdns{ source-address:: INTERNAL destination-address:: GOOGLE_DNS protocol:: icmp action:: accept } ``` The above example tells capirca to use paloalto.py to generate a platform specific configuration for Palo Alto. The security policy is written within the term section using the meta-language: * name/description: ping-gdns * source: any INTERNAL network (check /def/NETWORK.net definition of 'INTERNAL') * destination: service object named GOOGLE_DNS * protocol: icmp * action: accept The above ACL controls traffic in one direction only (outbound towards the service) and there should be another header and term to control the traffic in the opposite direction. Unless the target generator features the ability to automatically create bi-directional configuration from a single ACL term. Always check the documentation of the generator or validate the output generated to validate final configuration and policy interpretation. #### Term Keywords By Generator The following list contains links to the documentation of the individual policy generators: * [`arista`](./doc/generators/arista.md): Arista * [`aruba`](./doc/generators/aruba.md): Aruba * [`brocade`](./doc/generators/brocade.md): Brocade * [`cisco`](./doc/generators/cisco.md): Cisco * [`ciscoasa`](./doc/generators/ciscoasa.md): Cisco ASA * [`cisconx`](./doc/generators/cisconx.md): Cisco NX * [`ciscoxr`](./doc/generators/ciscoxr.md): Cisco XR * [`cloudarmor`](./doc/generators/cloudarmor.md): cloudarmor * [`gce`](./doc/generators/gce.md): GCE * `gcp_hf` * [`ipset`](./doc/generators/ipset.md): ipset * [`iptables`](./doc/generators/iptables.md): iptables * [`juniper`](./doc/generators/juniper.md): Juniper * [`juniperevo`](./doc/generators/juniperevo.md): Juniper EVO * [`junipermsmpc`](./doc/generators/junipermsmpc.md): Juniper * [`junipersrx`](./doc/generators/junipersrx.md): Juniper SRX * [`k8s`](./doc/generators/k8s.md): Kubernetes NetworkPolicy * [`nftables`](./doc/generators/nftables.md): nftables * [`nsxv`](./doc/generators/nsxv.md): NSX * [`packetfilter`](./doc/generators/packetfilter.md): PacketFilter * [`paloaltofw`](./doc/generators/paloaltofw.md): Palo Alto PANOS * [`pcap`](./doc/generators/pcap.md): PcapFilter * [`sonic`](./doc/generators/sonic.md): SONiC ACLs in config_db.json format * [`speedway`](./doc/generators/speedway.md): Speedway * [`srxlo`](./doc/generators/srxlo.md): Stateless Juniper ACL * [`windows_advfirewall`](./doc/generators/windows_advfirewall.md): Windows Advanced Firewall [Back to Top](#table-of-contents) #### Term Examples The following are examples of how to construct a term, and assumes that naming definition tokens used have been defined in the definitions files. * Block incoming bogons and spoofed traffic ``` term block-bogons { source-address:: BOGONS RFC1918 source-address:: COMPANY_INTERNAL action:: deny } ``` * Permit Public to Web Servers ``` term permit-to-web-servers { destination-address:: WEB_SERVERS destination-port:: HTTP protocol:: tcp action:: accept } ``` * Permit Replies to DNS Servers From Primaries ``` term permit-dns-tcp-replies { source-address:: DNS_PRIMARIES destination-address:: DNS_SECONDARIES source-address:: DNS protocol:: tcp option:: tcp-established action:: accept } ``` * Permit All Corporate Networks, Except New York, to FTP Server This will "subtract" the `CORP_NYC_NETBLOCK` from the `CORP_NETBLOCKS` token. For example, assume `CORP_NETBLOCKS` includes `200.0.0.0/20`, and `CORP_NYC_NETBLOCK` is defined as `200.2.0.0/24`. The `source-exclude` will remove the NYC netblock from the permitted source addresses. If the excluded address is not contained with the source address, nothing is changed. ``` term allow-inbound-ftp-from-corp { source-address:: CORP_NETBLOCKS source-exclude:: CORP_NYC_NETBLOCK destination-port:: FTP protocol:: tcp action:: accept } ``` [Back to Top](#table-of-contents) #### Example Policy File Below is an example policy file for a Juniper target platform. It contains two filters, each with a handful of terms. This examples assumes that the network and service naming definition tokens have been defined. ``` header { comment:: "edge input filter for sample network." target:: juniper edge-inbound } term discard-spoofs { source-address:: RFC1918 action:: deny } term permit-ipsec-access { source-address:: REMOTE_OFFICES destination-address:: VPN_HUB protocol:: 50 action:: accept } term permit-ike-access { source-address:: REMOTE_OFFICES destination-address:: VPN_HUB protocol:: udp destination-port:: IKE action:: accept } term permit-public-web-access { destination-address:: WEB_SERVERS destination-port:: HTTP HTTPS HTTP_8080 protocol:: tcp action:: accept } term permit-tcp-replies { option:: tcp-established action:: accept } term default-deny { action:: deny } header { comment:: "edge output filter for sample network." target:: juniper edge-outbound } term drop-internal-sourced-outbound { destination-address:: INTERNAL destination-address:: RESERVED action:: deny } term reject-internal { source-address:: INTERNAL action:: reject } term default-accept { action:: accept } ``` [Back to Top](#table-of-contents) ## Getting Started ### Installation #### From Source If `setuptools` Python package is not installed on your system, install it: For example, the following commands installs the package with `apt` package manager. ```bash sudo apt-get install python3-pip python3-setuptools ``` Next, to install `capirca` from source, clone the `capirca` repository and run its installer: ```bash git clone https://github.com/google/capirca.git cd capirca/ python3 setup.py install --user ``` Typically, when provided `--user` argument, the installer creates the following files, where `3.8` is Python version and `2.0.0` is the version of `capirca`: * `~/.local/bin/capirca` * `~/.local/lib/python3.8/site-packages/capirca-2.0.0-py3.8.egg` If necessary, remove build files: ```bash rm -rf build capirca.egg-info dist ``` Next, test `capirca` by generating sample output filters for Cisco, Juniper, iptables, and other firewall platforms. ```bash ~/.local/bin/capirca ``` The generation of sample output while in the `capirca`'s source code directory does not require command line parameters, because `capirca` inherits default settings from the following configuration (see `capirca/utils/config.py`). ```json { 'base_directory': './policies', 'definitions_directory': './def', 'policy_file': None, 'output_directory': './filters', 'optimize': False, 'recursive': True, 'debug': False, 'verbose': False, 'ignore_directories': ['DEPRECATED', 'def'], 'max_renderers': 10, 'shade_check': False, 'exp_info': 2 } ``` Although the `policy_file` is `None`, the tool processes all policies located in `base_directory`, i.e. `./policies`. The tool loads network and service definitions from `definitions_directory`. The tool output generated ACLs to the root of the source directory because `output_directory` is `./filters`. [Back to Top](#table-of-contents) #### Package Manager Currently, the PyPI is out of date. Nevertheless, a user can install an older version of `capirca` with `pip`: ```py pip install capirca --user ``` [Back to Top](#table-of-contents) ### Basic Usage There are a number of command-line arguments that can be used with `capirca`. ``` $ ~/.local/bin/capirca --helpfull USAGE: capirca [flags] flags: absl.app: -?,--[no]help: show this help (default: 'false') --[no]helpfull: show full help (default: 'false') --[no]helpshort: show this help (default: 'false') --[no]helpxml: like --helpfull, but generates XML output (default: 'false') --[no]only_check_args: Set to true to validate args and exit. (default: 'false') --[no]pdb: Alias for --pdb_post_mortem. (default: 'false') --[no]pdb_post_mortem: Set to true to handle uncaught exceptions with PDB post mortem. (default: 'false') --profile_file: Dump profile information to a file (for python -m pstats). Implies --run_with_profiling. --[no]run_with_pdb: Set to true for PDB debug mode (default: 'false') --[no]run_with_profiling: Set to true for profiling the script. Execution will be slower, and the output format might change over time. (default: 'false') --[no]use_cprofile_for_profiling: Use cProfile instead of the profile module for profiling. This has no effect unless --run_with_profiling is set. (default: 'true') absl.logging: --[no]alsologtostderr: also log to stderr? (default: 'false') --log_dir: directory to write logfiles into (default: '') --logger_levels: Specify log level of loggers. The format is a CSV list of `name:level`. Where `name` is the logger name used with `logging.getLogger()`, and `level` is a level name (INFO, DEBUG, etc). e.g. `myapp.foo:INFO,other.logger:DEBUG` (default: '') --[no]logtostderr: Should only log to stderr? (default: 'false') --[no]showprefixforinfo: If False, do not prepend prefix to info messages when it's logged to stderr, --verbosity is set to INFO level, and python logging is used. (default: 'true') --stderrthreshold: log messages at this level, or more severe, to stderr in addition to the logfile. Possible values are 'debug', 'info', 'warning', 'error', and 'fatal'. Obsoletes --alsologtostderr. Using --alsologtostderr cancels the effect of this flag. Please also note that this flag is subject to --verbosity and requires logfile not be stderr. (default: 'fatal') -v,--verbosity: Logging verbosity level. Messages logged at this level or lower will be included. Set to 1 for debug logging. If the flag was not set or supplied, the value will be changed from the default of -1 (warning) to 0 (info) after flags are parsed. (default: '-1') (an integer) capirca.capirca: --base_directory: The base directory to look for acls; typically where you'd find ./corp and ./prod (default: './policies') --config_file: A yaml file with the configuration options for capirca; repeat this option to specify a list of values --[no]debug: Debug messages (default: 'false') --definitions_directory: Directory where the definitions can be found. (default: './def') --exp_info: Print a info message when a term is set to expire in that many weeks. (default: '2') (an integer) --ignore_directories: Don't descend into directories that look like this string (default: 'DEPRECATED,def') (a comma separated list) --max_renderers: Max number of rendering processes to use. (default: '10') (an integer) -o,--[no]optimize: Turn on optimization. (default: 'False') --output_directory: Directory to output the rendered acls. (default: './filters') --policy_file: Individual policy file to generate. --[no]recursive: Descend recursively from the base directory rendering acls (default: 'true') --[no]shade_check: Raise an error when a term is completely shaded by a prior term. (default: 'false') --[no]verbose: Verbose messages (default: 'false') absl.flags: --flagfile: Insert flag definitions from the given file into the command line. (default: '') --undefok: comma-separated list of flag names that it is okay to specify on the command line even if the program does not define a flag with that name. IMPORTANT: flags in this list that have arguments MUST use the --flag=value format. (default: '') ``` Notably, the `--config_file PATH` argument allows passing one or more yaml configuration files. These files will be prioritized from left to right, i.e. any duplicate configurations will be overriden, not merged. The command line arguments take precendence over any settings passed via the configuration files. The default `capirca` configurations in a YAML format follows: ```yaml --- base_directory: ./policies definitions_directory: ./def output_directory: ./ optimize: false recursive: true debug: false verbose: false ignore_directories: - DEPRECATED - def max_renderers: 10 shade_check: true exp_info: 2 ``` [Back to Top](#table-of-contents) ### Python Package The `capirca` tool uses `capirca` Python package. Therefore, there is a way to access `capirca` programmatically. * `policies/sample_paloalto.pol` * `def/SERVICES.svc` * `def/NETWORK.net` Provided you have the following files in your directory, the following code snippets create a `naming` definitions object, policy object, and render generator filter output. **NOTE**: Paste the code snippets one line at a time. First, start Python interpreter: ``` $ python3 Python 3.8.7 (default, Dec 22 2020, 10:37:26) [GCC 10.2.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> ``` Next, import `naming` library and create `naming` object from definitions files in `./def` directory. ```py from pprint import pprint from capirca.lib import naming defs = naming.Naming('./def') pprint(defs) ``` The `defs` object follows: ``` ``` The `Naming` object has various methods. The `GetServiceNames` method returns the service name tokens. ``` >>> dir(defs) ['GetIpParents', 'GetNet', 'GetNetAddr', 'GetNetChildren', 'GetServiceNames', <...intentionally omitted ..> 'unseen_networks', 'unseen_services'] >>> >>> pprint(defs.GetServiceNames()) ['WHOIS', 'SSH', <...intentionally omitted ..> 'TRACEROUTE'] >>> ``` Then, import `policy` library, read in the policy configuration data from `./policies/sample_paloalto.pol`, and create a policy object. ```py from capirca.lib import policy conf = open('./policies/sample_paloalto.pol').read() pol = policy.ParsePolicy(conf, defs, optimize=True) ``` The policy object follows: ``` >>> pprint(pol) Policy: {Target[paloalto], Comments [], Apply groups: [], except: []:[ name: ping-gdns source_address: [IPv4('10.0.0.0/8'), IPv4('172.16.0.0/12'), IPv4('192.168.0.0/16')] destination_address: [IPv4('8.8.4.4/32'), IPv4('8.8.8.8/32'), IPv6('2001:4860:4860::8844/128'), IPv6('2001:4860:4860::8888/128')] protocol: ['icmp'] action: ['accept'], name: dns-gdns source_address: [IPv4('10.0.0.0/8'), IPv4('172.16.0.0/12'), IPv4('192.168.0.0/16')] destination_address: [IPv4('8.8.4.4/32'), IPv4('8.8.8.8/32'), IPv6('2001:4860:4860::8844/128'), IPv6('2001:4860:4860::8888/128')] protocol: ['tcp'] destination_port: [(53, 53)] action: ['accept'], name: allow-web-outbound source_address: [IPv4('10.0.0.0/8'), IPv4('172.16.0.0/12'), IPv4('192.168.0.0/16')] protocol: ['tcp'] destination_port: [(80, 80), (443, 443)] action: ['accept']], Target[paloalto], Comments [], Apply groups: [], except: []:[ name: allow-icmp protocol: ['icmp'] action: ['accept'], name: allow-only-pan-app action: ['accept'] pan_application: ['http'], name: allow-web-inbound destination_address: [IPv4('200.1.1.1/32'), IPv4('200.1.1.2/32')] protocol: ['tcp'] destination_port: [(80, 80), (443, 443)] action: ['accept'] pan_application: ['ssl', 'http']]} >>> ``` Next, import a generator library (here `paloaltofw` for Palo Alto firewalls) and output a policy in the desired format. ```py from capirca.lib import paloaltofw for header in pol.headers: if 'paloalto' in header.platforms: jcl = True if jcl: output = paloaltofw.PaloAltoFW(pol, 1) print(output) ``` The following code initiates Palo Alto firewall ACL model with the default expiration of 1 week. ``` paloaltofw.PaloAltoFW(pol, 1) ``` [Back to Top](#table-of-contents) ### Running with Docker If your use case is to just use the CLI and you don't want to go through the process of installing `capirca`, you can use the dockerized version of the tool. When using `docker`, mount your working directory to the `/data` directory of the container and pass command-line arguments in the following way. ```bash docker run -v "${PWD}:/data" docker.pkg.github.com/google/capirca/capirca:latest docker run -v "${PWD}:/data" docker.pkg.github.com/google/capirca/capirca:latest --helpfull docker run -v "${PWD}:/data" docker.pkg.github.com/google/capirca/capirca:latest --config_file /data/path/to/config ``` [Back to Top](#table-of-contents) ## Miscellaneous ### Security considerations The Capirca threat model assumes some control and verification of policy definitions (in .pol files). This is either through human user verification, or that policies are generated by upstream systems that enforce correctness. It is recommended that the ACL generated by Capirca is always tested for correctness before being applied to production. Not all generators support every feature, configuration option or term keywords. When something is unsupported, Capirca will error out. But due to the sensitive nature of network ACLs, it is always recommended to test any new generator being used, or new policies being generated. ### Additional documentation * [aclcheck library](./doc/wiki/AclCheck-library.md) * [policy reader library](./doc/wiki/PolicyReader-library.md) * [policy library](./doc/wiki/Policy-library.md) * [naming library](./doc/wiki/Naming-library.md) * [capirca design doc](./doc/wiki/Capirca-design.md) External links, resources and references: * [Brief Overview (4 slides):](https://docs.google.com/present/embed?id=dhtc9k26_13cz9fphfb&autoStart=true&loop=true&size=1) * [Nanog49; Enterprise QoS](http://www.nanog.org/meetings/nanog49/presentations/Tuesday/Chung-EnterpriseQoS-final.pdf) * [Capirca Slack at NetworkToCode](https://networktocode.slack.com/) [Back to Top](#table-of-contents) capirca-2.0.9/VERSION000066400000000000000000000000061437377527500142140ustar00rootroot000000000000002.0.9 capirca-2.0.9/aclcheck_cmdline.py000066400000000000000000000042041437377527500167520ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Command line interface to aclcheck library.""" from optparse import OptionParser from capirca.lib import aclcheck from capirca.lib import naming from capirca.lib import policy def main(): # TODO(robankeny): Lets move this to gflags usage = 'usage: %prog [options] arg' _parser = OptionParser(usage) _parser.add_option('--definitions-directory', dest='definitions', help='definitions directory', default='./def') _parser.add_option('-p', '--policy-file', dest='pol', help='policy file', default='./policies/sample.pol') _parser.add_option('-d', '--destination', dest='dst', help='destination IP', default='200.1.1.1') _parser.add_option('-s', '--source', dest='src', help='source IP', default='any') _parser.add_option('--proto', '--protocol', dest='proto', help='Protocol (tcp, udp, icmp, etc.)', default='tcp') _parser.add_option('--dport', '--destination-port', dest='dport', help='destination port', default='80') _parser.add_option('--sport', '--source-port', dest='sport', help='source port', default='1025') (FLAGS, unused_args) = _parser.parse_args() defs = naming.Naming(FLAGS.definitions) policy_obj = policy.ParsePolicy(open(FLAGS.pol).read(), defs) check = aclcheck.AclCheck(policy_obj, src=FLAGS.src, dst=FLAGS.dst, sport=FLAGS.sport, dport=FLAGS.dport, proto=FLAGS.proto) print(str(check)) if __name__ == '__main__': main() capirca-2.0.9/capirca/000077500000000000000000000000001437377527500145525ustar00rootroot00000000000000capirca-2.0.9/capirca/__init__.py000066400000000000000000000000171437377527500166610ustar00rootroot00000000000000"""Capirca.""" capirca-2.0.9/capirca/aclgen.py000066400000000000000000000545201437377527500163630ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Renders policy source files into actual Access Control Lists.""" import copy import multiprocessing import pathlib import sys from typing import Iterator, List, Tuple, cast from absl import app from absl import flags from absl import logging from capirca.lib import aclgenerator from capirca.lib import arista from capirca.lib import arista_tp from capirca.lib import aruba from capirca.lib import brocade from capirca.lib import cisco from capirca.lib import ciscoasa from capirca.lib import cisconx from capirca.lib import ciscoxr from capirca.lib import cloudarmor from capirca.lib import gce from capirca.lib import gce_vpc_tf from capirca.lib import gcp_hf from capirca.lib import ipset from capirca.lib import iptables from capirca.lib import juniper from capirca.lib import juniperevo from capirca.lib import junipermsmpc from capirca.lib import junipersrx from capirca.lib import k8s from capirca.lib import naming from capirca.lib import nftables from capirca.lib import nsxv from capirca.lib import openconfig from capirca.lib import packetfilter from capirca.lib import paloaltofw from capirca.lib import pcap from capirca.lib import policy from capirca.lib import sonic from capirca.lib import speedway from capirca.lib import srxlo from capirca.lib import windows_advfirewall from capirca.utils import config FLAGS = flags.FLAGS WriteList = List[Tuple[pathlib.Path, str]] def SetupFlags(): """Read in configuration from CLI flags.""" flags.DEFINE_string( 'base_directory', None, 'The base directory to look for acls; ' 'typically where you\'d find ./corp and ./prod\n(default: \'%s\')' % config.defaults['base_directory']) flags.DEFINE_string( 'definitions_directory', None, 'Directory where the definitions can be found.\n(default: \'%s\')' % config.defaults['definitions_directory']) flags.DEFINE_string('policy_file', None, 'Individual policy file to generate.') flags.DEFINE_string( 'output_directory', None, 'Directory to output the rendered acls.\n(default: \'%s\')' % config.defaults['output_directory']) flags.DEFINE_boolean( 'optimize', None, 'Turn on optimization.\n(default: \'%s\')' % config.defaults['optimize'], short_name='o') flags.DEFINE_boolean( 'recursive', None, 'Descend recursively from the base directory rendering acls\n(default: \'%s\')' % str(config.defaults['recursive']).lower()) flags.DEFINE_boolean( 'debug', None, 'Debug messages\n(default: \'%s\')' % str(config.defaults['debug']).lower()) flags.DEFINE_boolean( 'verbose', None, 'Verbose messages\n(default: \'%s\')' % str(config.defaults['verbose']).lower()) flags.DEFINE_list( 'ignore_directories', None, 'Don\'t descend into directories that look like this string\n(default: \'%s\')' % ','.join(config.defaults['ignore_directories'])) flags.DEFINE_integer( 'max_renderers', None, 'Max number of rendering processes to use.\n(default: \'%s\')' % config.defaults['max_renderers']) flags.DEFINE_boolean( 'shade_check', None, 'Raise an error when a term is completely shaded by a prior term.\n(default: \'%s\')' % str(config.defaults['shade_check']).lower()) flags.DEFINE_integer( 'exp_info', None, 'Print a info message when a term is set to expire in that many weeks.\n(default: \'%s\')' % str(config.defaults['exp_info'])) flags.DEFINE_multi_string( 'config_file', None, 'A yaml file with the configuration options for capirca') class Error(Exception): """Base Error class.""" class P4WriteFileError(Error): """Error when there are issues p4 editing the destination.""" class ACLGeneratorError(Error): """Raised when an ACL generator has errors.""" class ACLParserError(Error): """Raised when the ACL parser fails.""" def SkipLines(text, skip_line_func=False): """Apply skip_line_func to the given text. Args: text: list of the first text to scan skip_line_func: function to use to check if we should skip a line Returns: ret_text: text(list) minus the skipped lines """ if not skip_line_func: return text return [x for x in text if not skip_line_func(x)] def RenderFile(base_directory: str, input_file: pathlib.Path, output_directory: pathlib.Path, definitions: naming.Naming, exp_info: int, optimize: bool, shade_check: bool, write_files: WriteList): """Render a single file. Args: base_directory: The base directory to look for acls. input_file: the name of the input policy file. output_directory: the directory in which we place the rendered file. definitions: the definitions from naming.Naming(). exp_info: print a info message when a term is set to expire in that many weeks. optimize: a boolean indicating if we should turn on optimization or not. shade_check: should we raise an error if a term is completely shaded write_files: a list of file tuples, (output_file, acl_text), to write """ output_relative = input_file.relative_to(base_directory).parent.parent output_directory = output_directory / output_relative logging.debug('rendering file: %s into %s', input_file, output_directory) pol = None jcl = False evojcl = False acl = False atp = False asacl = False aacl = False bacl = False eacl = False gca = False gcefw = False gcphf = False ips = False ipt = False msmpc = False spd = False nsx = False oc = False pcap_accept = False pcap_deny = False pf = False srx = False jsl = False nft = False win_afw = False nxacl = False xacl = False paloalto = False sonic_pol = False k8s_pol = False gce_vpc_tf_pol = False try: with open(input_file) as f: conf = f.read() logging.debug('opened and read %s', input_file) except IOError as e: logging.warning('bad file: \n%s', e) raise try: pol = policy.ParsePolicy( conf, definitions, optimize=optimize, base_dir=base_directory, shade_check=shade_check) except policy.ShadingError as e: logging.warning('shading errors for %s:\n%s', input_file, e) return except (policy.Error, naming.Error): raise ACLParserError('Error parsing policy file %s:\n%s%s' % (input_file, sys.exc_info()[0], sys.exc_info()[1])) platforms = set() for header in pol.headers: platforms.update(header.platforms) if 'juniper' in platforms: jcl = copy.deepcopy(pol) if 'juniperevo' in platforms: evojcl = copy.deepcopy(pol) if 'cisco' in platforms: acl = copy.deepcopy(pol) if 'ciscoasa' in platforms: asacl = copy.deepcopy(pol) if 'brocade' in platforms: bacl = copy.deepcopy(pol) if 'arista' in platforms: eacl = copy.deepcopy(pol) if 'arista_tp' in platforms: atp = copy.deepcopy(pol) if 'aruba' in platforms: aacl = copy.deepcopy(pol) if 'ipset' in platforms: ips = copy.deepcopy(pol) if 'iptables' in platforms: ipt = copy.deepcopy(pol) if 'msmpc' in platforms: msmpc = copy.deepcopy(pol) if 'nsxv' in platforms: nsx = copy.deepcopy(pol) if 'openconfig' in platforms: oc = copy.deepcopy(pol) if 'packetfilter' in platforms: pf = copy.deepcopy(pol) if 'pcap' in platforms: pcap_accept = copy.deepcopy(pol) pcap_deny = copy.deepcopy(pol) if 'speedway' in platforms: spd = copy.deepcopy(pol) if 'srx' in platforms: srx = copy.deepcopy(pol) if 'srxlo' in platforms: jsl = copy.deepcopy(pol) if 'windows_advfirewall' in platforms: win_afw = copy.deepcopy(pol) if 'cisconx' in platforms: nxacl = copy.deepcopy(pol) if 'ciscoxr' in platforms: xacl = copy.deepcopy(pol) if 'nftables' in platforms: nft = copy.deepcopy(pol) if 'gce' in platforms: gcefw = copy.deepcopy(pol) if 'gce_vpc_tf' in platforms: gce_vpc_tf_pol = copy.deepcopy(pol) if 'gcp_hf' in platforms: gcphf = copy.deepcopy(pol) if 'paloalto' in platforms: paloalto = copy.deepcopy(pol) if 'sonic' in platforms: sonic_pol = copy.deepcopy(pol) if 'cloudarmor' in platforms: gca = copy.deepcopy(pol) if 'k8s' in platforms: k8s_pol = copy.deepcopy(pol) acl_obj: aclgenerator.ACLGenerator try: if jcl: acl_obj = juniper.Juniper(jcl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if evojcl: acl_obj = juniperevo.JuniperEvo(evojcl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if msmpc: acl_obj = junipermsmpc.JuniperMSMPC(msmpc, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if srx: acl_obj = junipersrx.JuniperSRX(srx, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if acl: acl_obj = cisco.Cisco(acl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if asacl: acl_obj = ciscoasa.CiscoASA(asacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if aacl: acl_obj = aruba.Aruba(aacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if bacl: acl_obj = brocade.Brocade(bacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if eacl: acl_obj = arista.Arista(eacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if atp: acl_obj = arista_tp.AristaTrafficPolicy(atp, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if ips: acl_obj = ipset.Ipset(ips, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if ipt: acl_obj = iptables.Iptables(ipt, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if nsx: acl_obj = nsxv.Nsxv(nsx, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if oc: acl_obj = openconfig.OpenConfig(oc, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if spd: acl_obj = speedway.Speedway(spd, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if pcap_accept: acl_obj = pcap.PcapFilter(pcap_accept, exp_info) RenderACL( str(acl_obj), '-accept' + acl_obj.SUFFIX, output_directory, input_file, write_files) if pcap_deny: acl_obj = pcap.PcapFilter(pcap_deny, exp_info, invert=True) RenderACL( str(acl_obj), '-deny' + acl_obj.SUFFIX, output_directory, input_file, write_files) if pf: acl_obj = packetfilter.PacketFilter(pf, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if win_afw: acl_obj = windows_advfirewall.WindowsAdvFirewall(win_afw, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if jsl: acl_obj = srxlo.SRXlo(jsl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if nxacl: acl_obj = cisconx.CiscoNX(nxacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if xacl: acl_obj = ciscoxr.CiscoXR(xacl, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if nft: acl_obj = nftables.Nftables(nft, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if gcefw: acl_obj = gce.GCE(gcefw, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if gce_vpc_tf_pol: acl_obj = gce_vpc_tf.TerraformGCE(gce_vpc_tf_pol, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if gcphf: acl_obj = gcp_hf.HierarchicalFirewall(gcphf, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if paloalto: acl_obj = paloaltofw.PaloAltoFW(paloalto, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if sonic_pol: acl_obj = sonic.Sonic(sonic_pol, exp_info) RenderACL( str(acl_obj), '.json', output_directory, input_file, write_files, True) if gca: acl_obj = cloudarmor.CloudArmor(gca, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) if k8s_pol: acl_obj = k8s.K8s(k8s_pol, exp_info) RenderACL( str(acl_obj), acl_obj.SUFFIX, output_directory, input_file, write_files) # TODO(robankeny) add additional errors. except ( juniper.Error, junipermsmpc.Error, junipersrx.Error, cisco.Error, ipset.Error, iptables.Error, speedway.Error, pcap.Error, sonic.Error, aclgenerator.Error, aruba.Error, nftables.Error, gce.Error, gce_vpc_tf.Error, cloudarmor.Error, k8s.Error) as e: raise ACLGeneratorError('Error generating target ACL for %s:\n%s' % (input_file, e)) def RenderACL(acl_text: str, acl_suffix: str, output_directory: pathlib.Path, input_file: pathlib.Path, write_files: List[Tuple[pathlib.Path, str]], binary: bool = False): """Write the ACL string out to file if appropriate. Args: acl_text: Rendered output of an ACL Generator. acl_suffix: File suffix to append to output filename. output_directory: The directory to write the output file. input_file: The name of the policy file that was used to render ACL. write_files: A list of file tuples, (output_file, acl_text), to write. binary: Boolean if the rendered ACL is in binary format. """ input_filename = input_file.with_suffix(acl_suffix).name output_file = output_directory / input_filename if FilesUpdated(output_file, acl_text, binary): logging.info('file changed: %s', output_file) write_files.append((output_file, acl_text)) else: logging.debug('file not changed: %s', output_file) def FilesUpdated(file_name: pathlib.Path, new_text: str, binary: bool) -> bool: """Diff the rendered acl with what's already on disk. Args: file_name: Name of file on disk to check against. new_text: Text of newly generated ACL. binary: True if file is a binary format. Returns: Boolean if config does not equal new text. """ if binary: readmode = 'rb' else: readmode = 'r' try: with open(file_name, readmode) as f: conf: str = str(f.read()) except IOError: return True if not binary: p4_id = '$I d:'.replace(' ', '') p4_date = '$Da te:'.replace(' ', '') p4_revision = '$Rev ision:'.replace(' ', '') def P4Tags(text: str) -> bool: return not (p4_id in text or p4_date in text or p4_revision in text) filtered_conf = filter(P4Tags, conf.split('\n')) filtered_text = filter(P4Tags, new_text.split('\n')) return list(filtered_conf) != list(filtered_text) return conf != new_text def DescendDirectory(input_dirname: str, ignore_directories: List[str]) -> List[pathlib.Path]: """Descend from input_dirname looking for policy files to render. Args: input_dirname: the base directory. ignore_directories: directories to ignore while traversing. Returns: a list of input file paths """ input_dir = pathlib.Path(input_dirname) policy_files: List[pathlib.Path] = [] policy_directories: Iterator[pathlib.Path] = filter( lambda path: path.is_dir(), input_dir.glob('**/pol')) for ignored_directory in ignore_directories: def Filtering(path, ignored=ignored_directory): return not path.match('%s/**/pol' % ignored) and not path.match( '%s/pol' % ignored) policy_directories = filter(Filtering, policy_directories) for directory in policy_directories: directory_policies = list(directory.glob('*.pol')) depth = len(directory.parents) - 1 logging.warning('-' * (2 * depth) + '> %s (%d pol files found)' % (directory, len(directory_policies))) policy_files.extend(filter(lambda path: path.is_file(), directory_policies)) return policy_files def WriteFiles(write_files: WriteList): """Writes files to disk. Args: write_files: List of file names and strings. """ if write_files: logging.info('writing %d files to disk...', len(write_files)) else: logging.info('no files changed, not writing to disk') for output_file, file_contents in write_files: _WriteFile(output_file, file_contents) def _WriteFile(output_file: pathlib.Path, file_contents: str): """Inner file writing function. Args: output_file: Path to write to file_contents: Data to write """ try: parent_path = pathlib.Path(output_file).parent if not parent_path.is_dir(): parent_path.mkdir(parents=True, exist_ok=True) with open(output_file, 'w') as output: logging.info('writing file: %s', output_file) output.write(file_contents) except IOError: logging.warning('error while writing file: %s', output_file) raise def Run(base_directory: str, definitions_directory: str, policy_file: str, output_directory: str, exp_info: int, max_renderers: int, ignore_directories: List[str], optimize: bool, shade_check: bool, context: multiprocessing.context.BaseContext): """Generate ACLs. Args: base_directory: directory containing policy files. definitions_directory: directory containing NETWORK and SERVICES definition files. policy_file: path to a single policy file to render. output_directory: directory in which rendered files are placed. exp_info: print a info message when a term is set to expire in that many weeks. max_renderers: the number of renderers to run in parallel. ignore_directories: directories to ignore when searching for policy files. optimize: a boolean indicating if we should turn on optimization or not. shade_check: should we raise an error if a term is completely shaded. context: multiprocessing context """ definitions = None try: definitions = naming.Naming(definitions_directory) except naming.NoDefinitionsError: err_msg = 'bad definitions directory: %s' % definitions_directory logging.fatal(err_msg) return # static type analyzer can't detect that logging.fatal exits program # thead-safe list for storing files to write manager: multiprocessing.managers.SyncManager = context.Manager() write_files: WriteList = cast(WriteList, manager.list()) with_errors = False logging.info('finding policies...') if policy_file: # render just one file logging.info('rendering one file') RenderFile(base_directory, pathlib.Path(policy_file), pathlib.Path(output_directory), definitions, exp_info, optimize, shade_check, write_files) elif max_renderers == 1: # If only one process, run it sequentially policies = DescendDirectory(base_directory, ignore_directories) for pol in policies: RenderFile(base_directory, pol, pathlib.Path(output_directory), definitions, exp_info, optimize, shade_check, write_files) else: # render all files in parallel policies = DescendDirectory(base_directory, ignore_directories) pool = context.Pool(processes=max_renderers) results: List[multiprocessing.pool.AsyncResult] = [] for pol in policies: results.append( pool.apply_async( RenderFile, args=(base_directory, pol, output_directory, definitions, exp_info, optimize, shade_check, write_files))) pool.close() pool.join() for result in results: try: result.get() except (ACLParserError, ACLGeneratorError) as e: with_errors = True logging.warning('\n\nerror encountered in rendering process:\n%s\n\n', e) # actually write files to disk WriteFiles(write_files) if with_errors: logging.warning('done, with errors.') sys.exit(1) else: logging.info('done.') def main(argv): del argv # Unused. configs = config.generate_configs(FLAGS) if configs['verbose']: logging.set_verbosity(logging.INFO) if configs['debug']: logging.set_verbosity(logging.DEBUG) logging.debug( 'binary: %s\noptimize: %d\nbase_directory: %s\n' 'policy_file: %s\nrendered_acl_directory: %s', str(sys.argv[0]), int(configs['optimize']), str(configs['base_directory']), str(configs['policy_file']), str(configs['output_directory'])) logging.debug('capirca configurations: %s', configs) context = multiprocessing.get_context() Run(configs['base_directory'], configs['definitions_directory'], configs['policy_file'], configs['output_directory'], configs['exp_info'], configs['max_renderers'], configs['ignore_directories'], configs['optimize'], configs['shade_check'], context) def EntryPoint(): """Read in flags and call main().""" SetupFlags() app.run(main) if __name__ == '__main__': EntryPoint() capirca-2.0.9/capirca/lib/000077500000000000000000000000001437377527500153205ustar00rootroot00000000000000capirca-2.0.9/capirca/lib/COPYING000066400000000000000000000261361437377527500163630ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. capirca-2.0.9/capirca/lib/__init__.py000066400000000000000000000000351437377527500174270ustar00rootroot00000000000000"""Libraries for Capirca.""" capirca-2.0.9/capirca/lib/aclcheck.py000066400000000000000000000215261437377527500174350ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Check where hosts, ports and protocols are matched in a capirca policy.""" import logging from capirca.lib import nacaddr from capirca.lib import policy from capirca.lib import port class Error(Exception): """Base error class.""" class AddressError(Error): """Incorrect IP address or format.""" class BadPolicyError(Error): """Item is not a valid policy object.""" class NoTargetError(Error): """Specified target platform not available in specified policy.""" class AclCheck: """Check where hosts, ports and protocols match in a NAC policy. Attributes: pol_obj: policy.Policy object. pol: policy.Policy object. src: A string for the source address. dst: A string for the destination address. sport: A string for the source port. dport: A string for the destination port. proto: A string for the protocol. matches: A list of term-related matches. exact_matches: A list of exact matches. Returns: An AclCheck Object Raises: port.BarPortValue: An invalid source port is used port.BadPortRange: A port is outside of the acceptable range 0-65535 AddressError: Incorrect ip address or format """ def __init__(self, pol, src='any', dst='any', sport='any', dport='any', proto='any', ): logging.debug('aclcheck __init__') self.pol_obj = pol self.proto = proto # validate source port if sport == 'any': self.sport = sport else: self.sport = port.Port(sport) # validate destination port if dport == 'any': self.dport = dport else: self.dport = port.Port(dport) # validate source address if src == 'any': self.src = src else: try: self.src = nacaddr.IP(src) except ValueError: raise AddressError('bad source address: %s\n' % src) # validate destination address if dst == 'any': self.dst = dst else: try: self.dst = nacaddr.IP(dst) except ValueError: raise AddressError('bad destination address: %s\n' % dst) if not isinstance(self.pol_obj, (policy.Policy)): raise BadPolicyError('Policy object is not valid.') self.matches = [] self.exact_matches = [] for header, terms in self.pol_obj.filters: filtername = header.target[0].options[0] for term in terms: possible = [] logging.debug('checking term: %s', term.name) if not self._AddrInside(self.src, term.source_address): logging.debug('srcaddr does not match') continue logging.debug('srcaddr matches: %s', self.src) if not self._AddrInside(self.dst, term.destination_address): logging.debug('dstaddr does not match') continue logging.debug('dstaddr matches: %s', self.dst) if (self.sport != 'any' and term.source_port and not self._PortInside(self.sport, term.source_port)): logging.debug('sport does not match') continue logging.debug('sport matches: %s', self.sport) if (self.dport != 'any' and term.destination_port and not self._PortInside(self.dport, term.destination_port)): logging.debug('dport does not match') continue logging.debug('dport matches: %s', self.dport) if (self.proto != 'any' and term.protocol and self.proto not in term.protocol): logging.debug('proto does not match') continue logging.debug('proto matches: %s', self.proto) if term.protocol_except and self.proto in term.protocol_except: logging.debug('protocol excepted by term, no match.') continue logging.debug('proto not excepted: %s', self.proto) if not term.action: # avoid any verbatim logging.debug('term had no action (verbatim?), no match.') continue logging.debug('term has an action') possible = self._PossibleMatch(term) self.matches.append(Match(filtername, term.name, possible, term.action, term.qos)) if possible: logging.debug('term has options: %s, not treating as exact match', possible) continue # if we get here then we have a match, and if the action isn't next and # there are no possibles, then this is a "definite" match and we needn't # look for any further matches (i.e. later terms may match, but since # we'll never get there we shouldn't report them) if 'next' not in term.action: self.exact_matches.append(Match(filtername, term.name, [], term.action, term.qos)) break def Matches(self): """Return list of matched terms.""" return self.matches def ExactMatches(self): """Return matched terms, but not terms with possibles or action next.""" return self.exact_matches def ActionMatch(self, action='any'): """Return list of matched terms with specified actions.""" match_list = [] for match in self.matches: if match.action: if not match.possibles: if action == 'any' or action in match.action: match_list.append(match) return match_list def DescribeMatches(self): """Provide sentence descriptions of matches. Returns: ret_str: text sentences describing matches """ ret_str = [] for match in self.matches: text = str(match) ret_str.append(text) return '\n'.join(ret_str) def __str__(self): text = [] last_filter = '' for match in self.matches: if match.filter != last_filter: last_filter = match.filter text.append(' filter: ' + match.filter) if match.possibles: text.append(' ' * 10 + 'term: ' + str(match.term) + ' (possible match)') else: text.append(' ' * 10 + 'term: ' + str(match.term)) if match.possibles: text.append(' ' * 16 + match.action + ' if ' + str(match.possibles)) else: text.append(' ' * 16 + match.action) return '\n'.join(text) def _PossibleMatch(self, term): """Ignore some options and keywords that are edge cases. Args: term: term object to examine for edge-cases Returns: ret_str: a list of reasons this term may possible match """ ret_str = [] if 'first-fragment' in term.option: ret_str.append('first-frag') if term.fragment_offset: ret_str.append('frag-offset') if term.packet_length: ret_str.append('packet-length') if 'established' in term.option: ret_str.append('est') if 'tcp-established' in term.option and 'tcp' in term.protocol: ret_str.append('tcp-est') return ret_str def _AddrInside(self, addr, addresses): """Check if address is matched in another address or group of addresses. Args: addr: An ipaddr network or host address or text 'any' addresses: A list of ipaddr network or host addresses Returns: bool: True of false """ if addr == 'any': return True # always true if we match for any addr if not addresses: return True # always true if term has nothing to match for ip in addresses: # ipaddr can incorrectly report ipv4 as contained with ipv6 addrs if addr.subnet_of(ip): return True return False def _PortInside(self, myport, port_list): """Check if port matches in a port or group of ports. Args: myport: port number port_list: list of ports Returns: bool: True of false """ if myport == 'any': return True if [x for x in port_list if x[0] <= myport <= x[1]]: return True return False class Match: """A matching term and its associate values.""" def __init__(self, filtername, term, possibles, action, qos=None): self.filter = filtername self.term = term self.possibles = possibles self.action = action[0] self.qos = qos def __str__(self): text = '' if self.possibles: text += 'possible ' + self.action else: text += self.action text += ' in term ' + self.term + ' of filter ' + self.filter if self.possibles: text += ' with factors: ' + str(', '.join(self.possibles)) return text def main(): pass if __name__ == '__main__': main() capirca-2.0.9/capirca/lib/aclgenerator.py000066400000000000000000000537271437377527500203560ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ACL Generator base class.""" import copy import logging import re import string from capirca.lib import policy import six import hashlib # generic error class class Error(Exception): """Base error class.""" class NoPlatformPolicyError(Error): """Raised when a policy is received that doesn't support this platform.""" class UnknownIcmpTypeError(Error): """Raised when we see an unknown icmp-type.""" class MismatchIcmpInetError(Error): """Raised when mistmatch between icmp/icmpv6 and inet/inet6.""" class EstablishedError(Error): """Raised when a term has established option with inappropriate protocol.""" class UnsupportedAFError(Error): """Raised when provided an unsupported address family.""" class DuplicateTermError(Error): """Raised when duplication of term names are detected.""" class UnsupportedFilterError(Error): """Raised when we see an inappropriate filter.""" class UnsupportedTargetOptionError(Error): """Raised when a filter has an impermissible default action specified.""" class TermNameTooLongError(Error): """Raised when term named can not be abbreviated.""" class Term: """Generic framework for a generator Term.""" ICMP_TYPE = policy.Term.ICMP_TYPE # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml PROTO_MAP = {'hopopt': 0, 'icmp': 1, 'igmp': 2, 'ggp': 3, 'ipip': 4, 'tcp': 6, 'egp': 8, 'igp': 9, 'udp': 17, 'rdp': 27, 'ipv6': 41, 'ipv6-route': 43, 'fragment': 44, 'rsvp': 46, 'gre': 47, 'esp': 50, 'ah': 51, 'icmpv6': 58, 'ipv6-nonxt': 59, 'ipv6-opts': 60, 'ospf': 89, 'pim': 103, 'vrrp': 112, 'l2tp': 115, 'sctp': 132, 'udplite': 136, 'all': -1, # Used for GCE default deny, do not use in pol file. } AF_MAP = {'inet': 4, 'inet6': 6, 'bridge': 4 # if this doesn't exist, output includes v4 & v6 } # These protos are always expressed as numbers instead of name # due to inconsistencies on the end platform's name-to-number # mapping. ALWAYS_PROTO_NUM = ['ipip'] # provide flipped key/value dicts PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.items()]) AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.items()]) NO_AF_LOG_ADDR = string.Template('Term $term will not be rendered, as it has' ' $direction address match specified but no' ' $direction addresses of $af address family' ' are present.') NO_AF_LOG_PROTO = string.Template('Term $term will not be rendered, as it has' ' $proto match specified but the ACL is of' ' $af address family.') def __init__(self, term): if term.protocol: for protocol in term.protocol: if (protocol not in self.PROTO_MAP and str(protocol) not in [str(p) for p in self.PROTO_MAP_BY_NUMBER]): raise UnsupportedFilterError('Protocol(s) %s are not supported.' % str(term.protocol)) term.protocol = ProtocolNameToNumber(term.protocol, self.ALWAYS_PROTO_NUM, self.PROTO_MAP) self.term = term def NormalizeAddressFamily(self, af): """Convert (if necessary) address family name to numeric value. Args: af: Address family, can be either numeric or string (e.g. 4 or 'inet') Returns: af: Numeric address family value Raises: UnsupportedAFError: Address family not in keys or values of our AF_MAP. """ # ensure address family (af) is valid if af in self.AF_MAP_BY_NUMBER: return af elif af in self.AF_MAP: # convert AF name to number (e.g. 'inet' becomes 4, 'inet6' becomes 6) af = self.AF_MAP[af] else: raise UnsupportedAFError('Address family %s is not supported, ' 'term %s.' % (af, self.term.name)) return af def NormalizeIcmpTypes(self, icmp_types, protocols, af): """Return verified list of appropriate icmp-types. Args: icmp_types: list of icmp_types protocols: list of protocols af: address family of this term, either numeric or text (see self.AF_MAP) Returns: sorted list of numeric icmp-type codes. Raises: UnsupportedFilterError: icmp-types specified with non-icmp protocol. MismatchIcmpInetError: mismatch between icmp protocol and address family. UnknownIcmpTypeError: unknown icmp-type specified """ if not icmp_types: return [''] # only protocols icmp or icmpv6 can be used with icmp-types if protocols != ['icmp'] and protocols != ['icmpv6']: raise UnsupportedFilterError('%s %s' % ( 'icmp-types specified for non-icmp protocols in term: ', self.term.name)) # make sure we have a numeric address family (4 or 6) af = self.NormalizeAddressFamily(af) # check that addr family and protocl are appropriate if ((af != 4 and protocols == ['icmp']) or (af != 6 and protocols == ['icmpv6'])): raise MismatchIcmpInetError('%s %s, %s: %s, %s: %s' % ( 'ICMP/ICMPv6 mismatch with address family IPv4/IPv6 in term', self.term.name, 'address family', af, 'protocols', ','.join(protocols))) # ensure all icmp types are valid for icmptype in icmp_types: if icmptype not in self.ICMP_TYPE[af]: raise UnknownIcmpTypeError('%s %s %s %s' % ( '\nUnrecognized ICMP-type (', icmptype, ') specified in term ', self.term.name)) rval = [] rval.extend([self.ICMP_TYPE[af][x] for x in icmp_types]) rval.sort() return rval class ACLGenerator: """Generates platform specific filters and terms from a policy object. This class takes a policy object and renders the output into a syntax which is understood by a specific platform (eg. iptables, cisco, etc). """ _PLATFORM = None # Default protocol to apply when no protocol is specified. _DEFAULT_PROTOCOL = 'ip' # Unsupported protocols by address family. _SUPPORTED_AF = {'inet', 'inet6'} # Commonly misspelled protocols that the generator should reject. _FILTER_BLACKLIST = {} # Only warn if these tokens are not implemented by a platform. These are not # meant to be overridden in subclasses like supported tokens/sub tokens. WARN_IF_UNSUPPORTED = { 'restrict_address_family', 'counter', 'destination_tag', 'filter_term', 'logging', 'loss_priority', 'owner', 'qos', 'routing_instance', 'policer', 'source_tag' } # Abbreviation table used to automatically abbreviate terms that exceed # specified limit. We use uppercase for abbreviations to distinguish # from lowercase names. This is order list - we try the ones in the # top of the list before the ones later in the list. Prefer clear # or very-space-saving abbreviations by putting them early in the # list. Abbreviations may be regular expressions or fixed terms; # prefer fixed terms unless there's a clear benefit to regular # expressions. _ABBREVIATION_TABLE = [ # Service abbreviations first. ('experiment', 'EXP'), ('wifi-radius', 'W-R'), ('customer', 'CUST'), ('server', 'SRV'), # Next, common routing terms ('global', 'GBL'), ('google', 'GOOG'), ('service', 'SVC'), ('router', 'RTR'), ('internal', 'INT'), ('external', 'EXT'), ('transit', 'TRNS'), ('management', 'MGMT'), # State info ('established', 'EST'), ('unreachable', 'UNR'), ('fragment', 'FRAG'), ('accept', 'ACC'), ('discard', 'DISC'), ('reject', 'REJ'), ('replies', 'RPL'), ('request', 'REQ'), ] # Maximum term length. Can be overridden by generator to enforce # platform specific restrictions. _TERM_MAX_LENGTH = 62 def __init__(self, pol, exp_info): """Initialise an ACLGenerator. Store policy structure for processing.""" supported_tokens, supported_sub_tokens = self._GetSupportedTokens() self.policy = pol all_err = [] all_warn = [] for header, terms in pol.filters: if self._PLATFORM in header.platforms: # Verify valid keywords # error on unsupported optional keywords that could result # in dangerous or unexpected results for term in terms: if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue # Only verify optional keywords if the term is active on the platform. err = [] warn = [] for el, val in term.__dict__.items(): # Private attributes do not need to be valid keywords. if (val and el not in supported_tokens and not el.startswith('flatten')): if val and el not in self.WARN_IF_UNSUPPORTED: err.append(el) else: warn.append(el) # ignore Liskov's rule. if (val and isinstance(val, list) and el in supported_sub_tokens): ns = set(val) - supported_sub_tokens[el] # hack support for ArbitraryOptions in junos. todo, add the # junos options into the lexer, then we can nuke .* # shenanigans. if ns and '.*' not in supported_sub_tokens[el]: err.append(' '.join(ns)) if err: all_err.append(('%s contains unsupported keywords (%s) for target ' '%s in policy %s') % (term.name, ' '.join(err), self._PLATFORM, pol.filename)) if warn: all_warn.append( ('%s contains unimplemented keywords (%s) for ' 'target %s in policy %s') % (term.name, ' '.join(warn), self._PLATFORM, pol.filename)) continue if all_err: raise UnsupportedFilterError('\n %s' % '\n'.join(all_err)) if all_warn: logging.debug('\n %s', '\n'.join(all_warn)) self._TranslatePolicy(pol, exp_info) def _TranslatePolicy(self, pol, exp_info): # pylint: disable=unused-argument """Translate policy contents to platform specific data structures.""" raise Error('%s does not implement _TranslatePolicies()' % self._PLATFORM) def _BuildTokens(self): """Provide a default for supported tokens and sub tokens. Returns: tuple containing both supported tokens and sub tokens """ # Set of supported keywords for a given platform. Values should be in # undercase form, eg, icmp_type (not icmp-type) supported_tokens = {'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'stateless_reply', 'name', # obj attribute, not token 'option', 'protocol', 'platform', 'platform_exclude', 'source_address', 'source_address_exclude', 'source_port', 'translated', # obj attribute, not token 'verbatim', } # These keys must be also listed in supported_tokens. # Keys should be in undercase form, eg, icmp_type (not icmp-type). Values # should be in dash form, icmp-type (not icmp_type) supported_sub_tokens = { 'option': { 'established', 'first-fragment', 'is-fragment', 'initial', 'rst', 'sample', 'tcp-established', }, 'action': { 'accept', 'deny', 'next', 'reject', 'reject-with-tcp-rst', }, 'icmp_type': set(list(Term.ICMP_TYPE[4].keys()) + list(Term.ICMP_TYPE[6].keys())) } return supported_tokens, supported_sub_tokens def _GetSupportedTokens(self): """Build our supported tokens and sub tokens. Returns: tuple containing the supported tokens and sub tokens. Raises: UnsupportedFilterError: Raised when token is not supported. """ supported_tokens, supported_sub_tokens = self._BuildTokens() # make sure we don't have subtokens that are not listed. This should not # occur unless a platform's tokens/subtokens are changed. undefined_st = set(supported_sub_tokens) - supported_tokens if undefined_st: raise UnsupportedFilterError( 'Found undefined sub tokens missing from the supported token list! ' 'These must match. (%s)' % ' '.join(undefined_st)) # all good. return supported_tokens, supported_sub_tokens # TODO(robankeny) Fix this function, it no longer does what it says. def FixHighPorts(self, term, af='inet', all_protocols_stateful=False): """Evaluate protocol and ports of term, return sane version of term. Args: term: Term object to be checked af: String presenting the address family, inet, inet6 all_protocols_stateful: Boolean suggesting if protocols are all stateful. Returns: Copy of term that has been fixed Raises: UnsupportedAFError: Address family provided but unsupported. UnsupportedFilter: Protocols do not match the address family. EstablishedError: Established option used with inappropriate protocol. UnsupportedFilterError: Filter does not support protocols with AF. """ mod = term # Determine which protocols this term applies to. if term.protocol: protocols = set(term.protocol) else: protocols = set((self._DEFAULT_PROTOCOL,)) # Check that the address family matches the protocols. if af not in self._SUPPORTED_AF: raise UnsupportedAFError( '\nAddress family %s, found in %s, unsupported ' 'by %s' % (af, term.name, self._PLATFORM)) if af in self._FILTER_BLACKLIST: unsupported_protocols = self._FILTER_BLACKLIST[af].intersection(protocols) if unsupported_protocols: raise UnsupportedFilterError( '\n%s targets do not support protocol(s) %s ' 'with address family %s (in %s)' % (self._PLATFORM, unsupported_protocols, af, term.name)) # Many renders expect high ports for terms with the established option. for opt in [str(x) for x in term.option]: if opt.find('established') == 0: unstateful_protocols = protocols.difference(set(('tcp', 'udp'))) if not unstateful_protocols: # TCP/UDP: add in high ports then collapse to eliminate overlaps. mod = copy.deepcopy(term) if not all_protocols_stateful: mod.destination_port.append((1024, 65535)) mod.destination_port = mod.CollapsePortList(mod.destination_port) elif not all_protocols_stateful: errmsg = 'Established option supplied with inappropriate protocol(s)' raise EstablishedError('%s %s %s %s' % (errmsg, unstateful_protocols, 'in term', term.name)) break return mod def FixTermLength(self, term_name, abbreviate=False, truncate=False, override_max_length=None): """Return a term name which is equal or shorter than _TERM_MAX_LENGTH. New term is obtained in two steps. First, if allowed, automatic abbreviation is performed using hardcoded abbreviation table. Second, if allowed, term name is truncated to specified limit. Args: term_name: Name to abbreviate if necessary. abbreviate: Whether to allow abbreviations to shorten the length. truncate: Whether to allow truncation to shorten the length. override_max_length: Override the _TERM_MAX_LENGTH to a different value. Returns: A string based on term_name, that is equal or shorter than _TERM_MAX_LENGTH abbreviated and truncated as necessary. Raises: TermNameTooLongError: term_name cannot be abbreviated to be shorter than _TERM_MAX_LENGTH, or truncation is disabled. """ new_term = term_name if override_max_length is None: override_max_length = self._TERM_MAX_LENGTH if abbreviate: for word, abbrev in self._ABBREVIATION_TABLE: if len(new_term) <= override_max_length: return new_term new_term = re.sub(word, abbrev, new_term) if truncate: new_term = new_term[:override_max_length] if len(new_term) <= override_max_length: return new_term raise TermNameTooLongError('Term %s (originally %s) is ' 'too long. Limit is %d characters (vs. %d) ' 'and no abbreviations remain or abbreviations ' 'disabled.' % (new_term, term_name, override_max_length, len(new_term))) def HexDigest(self, name, truncation_length=None): """Return a hexadecimal digest of the name object. Args: name: Name to hash. truncation_length: Truncation to shorten the digest length if necessary. Returns: A hexadecimal digest of the input name. Raises: N/A """ if truncation_length is None: truncation_length = 64 name_bytes = name.encode('UTF-8') return hashlib.sha256(name_bytes).hexdigest()[:truncation_length] def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map): """Convert a protocol name to a numeric value. Args: protocols: list of protocol names to inspect proto_to_num: list of protocol names that should be converted to numbers name_to_num_map: map of protocol names to protocol numbers Returns: return_proto: list of protocol names, converted if applicable """ return_proto = [] for protocol in protocols: if protocol in proto_to_num: return_proto.append(name_to_num_map[protocol]) else: return_proto.append(protocol) return return_proto def AddRepositoryTags(prefix='', rid=True, date=True, revision=True, wrap=False): """Add repository tagging into the output. Args: prefix: comment delimiter, if needed, to appear before tags rid: bool; True includes the revision Id: repository tag. date: bool; True includes the Date: repository tag. revision: bool; True includes the Revision: repository tag. wrap: bool; True wraps the tag in double quotes. Returns: list of text lines containing revision data """ tags = [] wrapper = '"' if wrap else '' # Format print the '$' into the RCS tags in order prevent the tags from # being interpolated here. p4_id = '%s%sId:%s%s' % (wrapper, '$', '$', wrapper) p4_date = '%s%sDate:%s%s' % (wrapper, '$', '$', wrapper) p4_revision = '%s%sRevision:%s%s' % (wrapper, '$', '$', wrapper) if rid: tags.append('%s%s' % (prefix, p4_id)) if date: tags.append('%s%s' % (prefix, p4_date)) if revision: tags.append('%s%s' % (prefix, p4_revision)) return tags def WrapWords(textlist, size, joiner='\n'): r"""Insert breaks into the listed strings at specified width. Args: textlist: a list of text strings size: width of reformated strings joiner: text to insert at break. eg. '\n ' to add an indent. Returns: list of strings """ # \S{%d}(?!\s|\Z) collets the max size for words that are larger than the max # (?<=\S{%d})\S+ collects the remaining text for overflow words in their own # line # \S.{1,%d}(?=\s|\Z)) collects all words and spaces up to max size, breaking # at the last space rval = [] linelength_re = re.compile( r'(\S{%d}(?!\s|\Z)|(?<=\S{%d})\S+|\S.{1,%d}(?=\s|\Z))' % (size, size, size - 1)) for index in range(len(textlist)): if len(textlist[index]) > size: # insert joiner into the string at appropriate places. textlist[index] = joiner.join(linelength_re.findall(textlist[index])) # avoid empty comment lines rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x) return rval def TruncateWords(input_text, char_limit): """Shorten text strings to not exceed a specified limit. This function also removes any line breaks. Args: input_text: list or individual string values. char_limit: size limit of resulting string element. Returns: truncated single string element within double quotes. """ CHAR_LIMIT = 126 if isinstance(input_text, list): # handle multiple comments. Remove newline. sanitized_list = [] for i in input_text: sanitized_list.append(i.replace('\n', '')) comment = ' '.join(sanitized_list) else: comment = input_text return '"' + comment[:char_limit] + '"' capirca-2.0.9/capirca/lib/arista.py000066400000000000000000000047151437377527500171640ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Arista generator.""" from capirca.lib import cisco class Error(Exception): """Base error class.""" class UnsupportedEosAccessListError(Error): """When a filter type is not supported in an EOS policy target.""" class Arista(cisco.Cisco): """An Arista policy object. EOS devices differ slightly from Cisco, omitting the extended argument to ACLs for example. There are other items such as 'tracked' we may want to add in the future. """ _PLATFORM = 'arista' SUFFIX = '.eacl' # Protocols should be emitted as they were in the policy (names). _PROTO_INT = False # Arista omits the "extended" access-list argument. def _AppendTargetByFilterType(self, filter_name, filter_type): """Takes in the filter name and type and appends headers. Args: filter_name: Name of the current filter filter_type: Type of current filter Returns: list of strings Raises: UnsupportedEosAccessListError: When unknown filter type is used. """ target = [] if filter_type == 'standard': if filter_name.isdigit(): target.append('no access-list %s' % filter_name) else: target.append('no ip access-list standard %s' % filter_name) target.append('ip access-list standard %s' % filter_name) elif filter_type == 'extended': target.append('no ip access-list %s' % filter_name) target.append('ip access-list %s' % filter_name) elif filter_type == 'object-group': target.append('no ip access-list %s' % filter_name) target.append('ip access-list %s' % filter_name) elif filter_type == 'inet6': target.append('no ipv6 access-list %s' % filter_name) target.append('ipv6 access-list %s' % filter_name) else: raise UnsupportedEosAccessListError( 'access list type %s not supported by %s' % ( filter_type, self._PLATFORM)) return target capirca-2.0.9/capirca/lib/arista_tp.py000066400000000000000000000774151437377527500176760ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """arista traffic-policy generator.""" import copy import datetime import re import textwrap from absl import logging from capirca.lib import aclgenerator import six # 1 2 3 # 123456789012345678901234567890123456789 # traffic-policies # traffic-policy foo # match dos-attaqrs-source-ip ipv4 << TERM_INDENT # 1 2 3 4 5 6 # 123456789012345678901234567890123456789012345678901234567890123456789 # !! i am a comment, hear me rawr << MATCH_INDENT # !! # source prefix field-set << MATCH_INDENT # ! # actions # counter edge-dos-attaqrs-source-ip-count << ACTION_INDENT # drop # ! # # 1 2 3 # 123456789012345678901234567890123456789 # traffic-policies # field-set ipv4 prefix dst-hjjqurby6yftqk6fa3xx4fas << TERM_INDENT # 0.0.0.0/0 << MATCH_INDENT # except 34.64.0.0/26 # ! # field-set ipv4 prefix dst-hjjqurby6yftqk6fa3xx4fas # various indentation constants - see above INDENT_STR = " " * 3 # 3 spaces TERM_INDENT = 2 * INDENT_STR MATCH_INDENT = 3 * INDENT_STR ACTION_INDENT = 4 * INDENT_STR MAX_COMMENT_LENGTH = 60 # generic error class class Error(Exception): pass class TcpEstablishedWithNonTcpError(Error): pass class AristaTpFragmentInV6Error(Error): pass class Config: """config allows a configuration to be assembled easily. when appending to the configuration object, the element should be indented according to the arista traffic-policy style. a text representation of the config can be extracted with str(). attributes: indent: The number of leading spaces on the current line. lines: the text lines of the configuration. """ def __init__(self): self.lines = [] def __str__(self): return "\n".join(self.lines) def Append(self, line_indent, line, verbatim=False): """append one line to the configuration. Args: line_indent: config specific spaces prepended to the line line: the configuratoin string to append to the config. verbatim: append line without adjusting indentation. Default False. """ if verbatim: self.lines.append(line) return self.lines.append(line_indent + line.strip()) class Term(aclgenerator.Term): """represents an individual AristaTrafficPolicy term. useful for the __str__() method. where literally, everything interesting happens. attributes: term: the term object from the policy. term_type: string indicating type of term, inet, inet6 etc. noverbose: boolean to disable verbosity. """ _PLATFORM = "arista_tp" _ACTIONS = { "accept": "", "deny": "drop", "reject": "drop", # unsupported action, convert to drop "reject-with-tcp-rst": "drop", # ibid # "next": "continue", } AF_MAP = { "inet": 4, "inet6": 6, } # the following lookup table is used to map between the various types of # filters the generator can render. as new differences are # encountered, they should be added to this table. Accessing members # of this table looks like: # self._TERM_TYPE('inet').get('saddr') -> 'source-address' # # it's critical that the members of each filter type be the same, that is # to say that if _TERM_TYPE.get('inet').get('foo') returns something, # _TERM_TYPE.get('inet6').get('foo') must return the inet6 equivalent. _TERM_TYPE = { "inet": { "addr_fam": "ipv4", }, "inet6": { "addr_fam": "ipv6", }, } def __init__(self, term, term_type, noverbose): super().__init__(term) self.term = term self.term_type = term_type # drives the address-family self.noverbose = noverbose if term_type not in self._TERM_TYPE: raise ValueError("unknown filter type: %s" % term_type) def __str__(self): # verify platform specific terms. skip the whole term if the platform # does not match. if (self.term.platform and self._PLATFORM not in self.term.platform): return "" if (self.term.platform_exclude and self._PLATFORM in self.term.platform_exclude): return "" config = Config() # a LoL which will be appended to the config at the end of this method # elements will be of the form [indentation, string, verbatim] by # default verbatim = False term_block = [] # don't render icmpv6 protocol terms under inet, or icmp under inet6 if (self.term_type == "inet6" and "icmp" in self.term.protocol) or (self.term_type == "inet" and "icmpv6" in self.term.protocol): logging.debug( self.NO_AF_LOG_PROTO.substitute( term=self.term.name, proto=", ".join(self.term.protocol), af=self.term_type, )) return "" # term verbatim output - this will skip over normal term creation # code. warning generated from policy.py if appropriate. if self.term.verbatim: for line in self.term.verbatim: if line[0] == self._PLATFORM: # pass MATCH_INDENT, but this should be ignored in the # rendering term_block.append([MATCH_INDENT, str(line[1]), True]) # we return immediately, there's no action to be formed for i, s, v in term_block: config.Append(i, s, verbatim=v) return str(config) # option processing flags = [] misc_options = [] if self.term.option: flags, misc_options = self._processTermOptions(self.term, self.term.option) # helper for per-address-family keywords. family_keywords = self._TERM_TYPE.get(self.term_type) term_block.append([ TERM_INDENT, "match %s %s" % (self.term.name, family_keywords["addr_fam"]), False ]) term_af = self.AF_MAP.get(self.term_type) if self.term.owner and not self.noverbose: self.term.comment.append("owner: %s" % self.term.owner) if self.term.comment and not self.noverbose: reflowed_comments = self._reflowComments(self.term.comment, MAX_COMMENT_LENGTH) for line in reflowed_comments: term_block.append([MATCH_INDENT, "!! " + line, False]) has_match_criteria = ( self.term.destination_address or self.term.destination_address_exclude or self.term.destination_port or self.term.destination_prefix or self.term.fragment_offset or self.term.hop_limit or self.term.port or self.term.protocol or self.term.protocol_except or self.term.source_address or self.term.source_address_exclude or self.term.source_port or self.term.source_prefix or self.term.ttl) # if the term name is default-* we will render this into the # appropriate default term name to be used in this filter. is_default_term = re.match(r"^ipv(4|6)\-default\-.*", self.term.name, re.IGNORECASE) if (not has_match_criteria and not is_default_term): # this term doesn't match on anything and isn't a default-term logging.warning( "WARNING: term %s has no valid match criteria and " "will not be rendered.", self.term.name, ) return "" else: # source address src_addr = self.term.GetAddressOfVersion("source_address", term_af) src_addr_ex = self.term.GetAddressOfVersion("source_address_exclude", term_af) if src_addr: src_str = "source prefix" if src_addr_ex: # this should correspond to the generated field set src_str += " field-set src-%s" % self.term.name else: for addr in src_addr: src_str += " %s" % addr term_block.append([MATCH_INDENT, src_str, False]) elif self.term.source_address: logging.debug( self.NO_AF_LOG_ADDR.substitute( term=self.term.name, direction="source", af=self.term_type)) return "" # destination address dst_addr = self.term.GetAddressOfVersion("destination_address", term_af) dst_addr_ex = self.term.GetAddressOfVersion("destination_address_exclude", term_af) if dst_addr: dst_str = "destination prefix" if dst_addr_ex: # this should correspond to the generated field set dst_str += " field-set dst-%s" % self.term.name else: for addr in dst_addr: dst_str += " %s" % addr term_block.append([MATCH_INDENT, dst_str, False]) elif self.term.destination_address: logging.debug( self.NO_AF_LOG_ADDR.substitute( term=self.term.name, direction="destination", af=self.term_type)) return "" if self.term.source_prefix: src_pfx_str = "source prefix field-set" for pfx in self.term.source_prefix: src_pfx_str += " %s" % pfx term_block.append([MATCH_INDENT, " %s" % src_pfx_str, False]) if self.term.destination_prefix: dst_pfx_str = "destination prefix field-set" for pfx in self.term.destination_prefix: dst_pfx_str += " %s" % pfx term_block.append([MATCH_INDENT, " %s" % dst_pfx_str, False]) # PROTOCOL MATCHES protocol_str = "" if self.term.protocol: protocol_str = self._processProtocol(self.term_type, self.term, flags) # protocol-except handling if self.term.protocol_except: protocol_str = self._processProtocolExcept(self.term_type, self.term, flags) # tcp/udp port generation port_str = self._processPorts(self.term) if port_str: protocol_str += port_str # icmp[v6] handling icmp_type_str = "" icmp_code_str = "" if self.term.protocol == ["icmp"] or \ self.term.protocol == ["icmpv6"]: icmp_type_str, icmp_code_str = self._processICMP(self.term) if self.term.icmp_type: protocol_str += icmp_type_str if self.term.icmp_code: protocol_str += icmp_code_str # don't render empty protocol strings. if protocol_str: term_block.append([MATCH_INDENT, protocol_str, False]) # ADDITIONAL SUPPORTED MATCH OPTIONS ------------------------------ # packet length if self.term.packet_length: term_block.append( [MATCH_INDENT, "ip length %s" % self.term.packet_length, False]) # fragment offset if self.term.fragment_offset: term_block.append([ MATCH_INDENT, "fragment offset %s" % self.term.fragment_offset, False ]) if self.term.hop_limit: term_block.append([MATCH_INDENT, "ttl %s" % self.term.hop_limit, False]) if self.term.ttl: term_block.append([MATCH_INDENT, "ttl %s" % self.term.ttl, False]) if misc_options: for mopt in misc_options: term_block.append([MATCH_INDENT, mopt, False]) # ACTION HANDLING # if there's no action, then this is an implicit permit current_action = self._ACTIONS.get(self.term.action[0]) # non-permit/drop actions should be added here has_extra_actions = ( self.term.logging or self.term.counter or self.term.dscp_set) # if !accept - generate an action statement # if accept and there are extra actions generate an actions statement # if accept and no extra actions don't generate an actions statement if self.term.action != ["accept"]: term_block.append([MATCH_INDENT, "actions", False]) term_block.append([ACTION_INDENT, "%s" % current_action, False]) elif self.term.action == ["accept"] and has_extra_actions: term_block.append([MATCH_INDENT, "actions", False]) if has_extra_actions: # logging - only supported on deny actions if self.term.logging and self.term.action != ["accept"]: term_block.append([ACTION_INDENT, "log", False]) elif self.term.logging and self.term.action == ["accept"]: logging.warning( "WARNING: term %s uses logging option but is not a deny " "action. logging will not be added.", self.term.name, ) # counters if self.term.counter: term_block.append( [ACTION_INDENT, "count %s" % self.term.counter, False]) term_block.append([MATCH_INDENT, "!", False]) # end of actions term_block.append([TERM_INDENT, "!", False]) # end of match entry for tindent, tstr, tverb in term_block: config.Append(tindent, tstr, verbatim=tverb) return str(config) def _reflowComments(self, comments, max_length): """reflows capirca comments to stay within max_length. Args: comments (list): list of comment strings max_length (int): Returns: type: list containing the reflowed text. if a comment list entry is > max_length it will be reflowed and appended to the returned comment list """ flowed_comments = [] for comment in comments: lines = comment.split("\n") for line in lines: if len(line) > max_length: line = textwrap.wrap(line, max_length) flowed_comments.extend(line) else: flowed_comments.append(line) return flowed_comments def _processPorts(self, term): port_str = "" # source port generation if term.source_port: port_str += " source port %s" % self._Group(term.source_port) # destination port if term.destination_port: port_str += (" destination port %s" % self._Group(term.destination_port)) return port_str def _processICMP(self, term): icmp_types = [""] icmp_code_str = "" icmp_type_str = " type " if term.icmp_type: icmp_types = self.NormalizeIcmpTypes(term.icmp_type, term.protocol, self.term_type) if icmp_types != [""]: for t in icmp_types: icmp_type_str += "%s," % t if icmp_type_str.endswith(","): icmp_type_str = icmp_type_str[:-1] # chomp trailing ',' if not term.icmp_code: icmp_type_str += " code all" if self.term.icmp_code and len(icmp_types) <= 1: icmp_codes = self._Group(self.term.icmp_code) icmp_codes = re.sub(r" ", ",", icmp_codes) icmp_code_str += " code %s" % icmp_codes return icmp_type_str, icmp_code_str def _processProtocol(self, term_type, term, flags): anet_proto_map = { "inet": { # <1-255> protocol values(s) or range(s) of protocol values "ahp": "", "bgp": "", "icmp": "", "igmp": "", "ospf": "", "pim": "", "rsvp": "", "tcp": "", "udp": "", "vrrp": "", }, "inet6": { # <0-255> protocol values(s) or range(s) of protocol values "bgp": "", # BGP "icmpv6": "", # ICMPv6 (58) "ospf": "", # OSPF routing protocol (89) "pim": "", # Protocol Independent Multicast (PIM) (103) "rsvp": "", # Resource Reservation Protocol (RSVP) (46) "tcp": "", # TCP "udp": "", # UDP "vrrp": "", # VRRP (112) } } protocol_str = "" prots = [] # if there are dirty prots we'll need to convert the protocol list to # all numbers and generate the list of protocols to match on. EOS # doesn't support commingling named protocols w/numeric protocol-ids dirty_prots = False for p in term.protocol: if p not in anet_proto_map[term_type].keys(): dirty_prots = True prots.append(p) if dirty_prots: num_prots = [] for p in prots: try: num_prots.append(str(self.PROTO_MAP[p])) except KeyError: num_prots.append(str(p)) protocol_str += "protocol %s" % ",".join(num_prots) else: protocol_str += "protocol %s" % self._Group(prots) if prots == ["tcp"] and flags: protocol_str += " flags " + " ".join(flags) return protocol_str def _processProtocolExcept(self, term_type, term, flags): # EOS does not have a protocol-except keyword. it does, however, support # lists of protocol-ids. given a term this function will generate the # appropriate list of protocol-id's which *will* be permited. within the # supported range of addaress family protocols. protocol_range = { "inet": 1, "inet6": 0, } protocol_str = "" except_list = set() for p in term.protocol_except: if p in self.PROTO_MAP.keys(): except_list.add(self.PROTO_MAP[p]) else: except_list.add(int(p)) except_list = sorted(except_list) ex_str = "" ptr = protocol_range[term_type] for p in except_list: if 255 > p > ptr: if (p - 1) == ptr: ex_str += str(ptr) + "," else: ex_str += str(ptr) + "-" + str(p - 1) + "," ptr = p + 1 elif p == ptr: ptr = p + 1 ex_str += str(ptr) + "-" + "255" protocol_str = "protocol " + ex_str return protocol_str def _processTermOptions(self, term, options): flags = [] misc_options = [] for opt in [str(x) for x in options]: # note: traffic policies support additional tcp flags. for now, # only handle the required elements # # only append tcp-established for option established when # tcp is the only protocol if opt.startswith("established"): if self.term.protocol == ["tcp"] and "established" not in flags: flags.append("established") # if tcp-established specified, but more than just tcp is # included in the protocols, raise an error elif opt.startswith("tcp-established"): if (self.term.protocol == ["tcp"] and "established" not in flags): flags.append("established") if (len(self.term.protocol) > 1 or self.term.protocol != ["tcp"]): raise TcpEstablishedWithNonTcpError( "tcp-established can only be used with tcp " "protocol in term %s" % self.term.name) elif (opt.startswith("initial") and self.term.protocol == ["tcp"]): flags.append("initial") elif opt.startswith("rst") and self.term.protocol == ["tcp"]: flags.append("rst") elif "fragment" in opt: # handles the is-fragment and first-fragment options misc_options.append("fragment") return flags, misc_options def _Group(self, group, lc=True): """If 1 item return it, else return [item1 item2]. Args: group: a list. could be a list of strings(protocols) or a list of tuples(ports) lc: return a lower cased result for text. Default is True. Returns: string: surrounded by '[' and '];' if len(group) > 1, or with just ';' appended if len(group) == 1 """ def _FormattedGroup(el, lc=True): """Return the actual formatting of an individual element. Args: el: either a string(protocol) or a tuple(ports) lc: return lower cased result for text. Default is True. Returns: string: either the lower()'ed string or the ports, hyphenated if they're a range, or by itself if it's not. """ if isinstance(el, str): if lc: return el else: return el.lower() elif isinstance(el, int): return str(el) # type is a tuple below here elif el[0] == el[1]: return "%d" % el[0] else: return "%d-%d" % (el[0], el[1]) if len(group) > 1: rval = " ".join([_FormattedGroup(x, lc) for x in group]) else: rval = _FormattedGroup(group[0]) return rval class AristaTrafficPolicy(aclgenerator.ACLGenerator): """arista traffic-policy rendering class. takes a policy object and renders the output into a syntax which is understood by arista switches. Attributes: pol: policy.Policy object """ _AF_MAP = {"inet": 4, "inet6": 6} _DEFAULT_PROTOCOL = "ip" _PLATFORM = "arista_tp" _SUPPORTED_AF = frozenset(("inet", "inet6", "mixed")) _TERM = Term _LOGGING = set() SUFFIX = ".atp" def _BuildTokens(self): """returns: tuple of supported tokens and sub tokens.""" supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= { "action", "comment", "counter", "destination_address", "destination_address_exclude", "destination_port", "destination_prefix", "dscp_set", "expiration", "fragment_offset", "hop_limit", "icmp_code", "icmp_type", "logging", "name", "option", "owner", "packet_length", "platform", "platform_exclude", "port", "protocol", "protocol_except", "source_address", "source_address_exclude", "source_port", "source_prefix", "ttl", "verbatim" } supported_sub_tokens.update({ "option": { "established", "is-fragment", ".*", # accept arbitrary options "tcp-established", "tcp-initial", } }) return supported_tokens, supported_sub_tokens def _MinimizePrefixes(self, include, exclude): """Calculate a minimal set of prefixes for match conditions. Args: include: Iterable of nacaddr objects, prefixes to match. exclude: Iterable of nacaddr objects, prefixes to exclude. Returns: A tuple (I,E) where I and E are lists containing the minimized versions of include and exclude, respectively. The order of each input list is preserved. """ # Remove any included prefixes that have EXACT matches in the # excluded list. Excluded prefixes take precedence on the router # regardless of the order in which the include/exclude are applied. exclude_set = set(exclude) include_result = [ip for ip in include if ip not in exclude_set] # Every address match condition on a AristaTp firewall filter # contains an implicit "0/0 except" or "0::0/0 except". If an # excluded prefix is not contained within any less-specific prefix # in the included set, we can elide it. In other words, if the # next-less-specific prefix is the implicit "default except", # there is no need to configure the more specific "except". exclude_result = [] for exclude_prefix in exclude: for include_prefix in include_result: if exclude_prefix.subnet_of(include_prefix): exclude_result.append(exclude_prefix) break return include_result, exclude_result def _GenPrefixFieldset(self, direction, name, pfxs, ex_pfxs, af): field_list = "" for p in pfxs: field_list += (" " * 6) + "%s\n" % p for p in ex_pfxs: field_list += (" " * 6) + "except %s\n" % p fieldset_hdr = ("field-set " + af + " prefix " + direction + "-" + ("%s" % name) + "\n") field_set = fieldset_hdr + field_list return field_set def _TranslatePolicy(self, pol, exp_info): self.arista_traffic_policies = [] af_map_txt = {"inet": "ipv4", "inet6": "ipv6"} current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) noverbose = "noverbose" in filter_options[1:] term_names = set() new_terms = [] # list of generated terms policy_field_sets = [] # list of generated field-sets policy_counters = set() # set of the counters in the policy # default to mixed policies filter_type = "mixed" if len(filter_options) > 1: filter_type = filter_options[1] # if the filter_type is mixed, we need to iterate through the # supported address families. treat the incoming policy term # (pol_term) as a template for the term and override the necessary # elements of the term for the inet6 evaluation. # # only make a copy of the pol_term if filter_type = "mixed" ftypes = [] if filter_type == "mixed": ftypes = ["inet", "inet6"] else: ftypes = [filter_type] for pol_term in terms: for ft in ftypes: if filter_type == "mixed": term = copy.deepcopy(pol_term) else: term = pol_term # if the term name is default-* we will render this into the # appropriate default term name to be used in this filter. default_term = re.match(r"^default\-.*", term.name, re.IGNORECASE) # TODO(sulrich): if term names become unique to address # families, this can be removed. if (filter_type == "mixed" and ft == "inet6"): term.name = af_map_txt[ft] + "-" + term.name if default_term: term.name = af_map_txt[ft] + "-default-all" if term.name in term_names: raise aclgenerator.DuplicateTermError("multiple terms named: %s" % term.name) term_names.add(term.name) term = self.FixHighPorts(term, af=ft) if not term: continue if term.expiration: if term.expiration <= exp_info_date: logging.info( "INFO: term %s in policy %s expires " "in less than two weeks.", term.name, filter_name, ) if term.expiration <= current_date: logging.warning( "WARNING: term %s in policy %s is expired and " "will not be rendered.", term.name, filter_name, ) continue # emit warnings for unsupported options / terms if term.option: unsupported_opts = [] for opt in [str(x) for x in term.option]: if opt.startswith("sample") or \ opt.startswith("first-fragment"): unsupported_opts.append(opt) # unsupported options are in use and should be skipped if unsupported_opts: logging.warning( "WARNING: term %s in policy %s uses an " "unsupported option (%s) and will not be " "rendered.", term.name, filter_name, " ".join(unsupported_opts), ) continue has_unsupported_match_criteria = ( term.dscp_except or term.dscp_match or term.ether_type or term.flexible_match_range or term.forwarding_class or term.forwarding_class_except or term.next_ip or term.port or term.traffic_type) if has_unsupported_match_criteria: logging.warning( "WARNING: term %s in policy %s uses an " "unsupported match criteria and will not " "be rendered.", term.name, filter_name, ) continue if (("is-fragment" in term.option or "fragment" in term.option) and filter_type == "inet6"): raise AristaTpFragmentInV6Error("the term %s uses is-fragment but " "is a v6 policy." % term.name) # this should error out more gracefully in mixed configs if (("is-fragment" in term.option or "fragment" in term.option) and ft == "inet6"): logging.warning( "WARNING: term %s in mixed policy %s uses " "fragment the ipv6 version of the term will not be " "rendered.", term.name, filter_name, ) continue # check for traffic-policy specific feature interactions if (("is-fragment" in term.option or "fragment" in term.option) and (term.source_port or term.destination_port)): logging.warning( "WARNING: term %s uses fragment as well as src/dst " "port matches. traffic-policies currently do not " "support this match combination. the term will not " "be rendered", term.name, ) continue # check for common unsupported actions (e.g.: next) if term.action == ["next"]: logging.warning( "WARNING: term %s uses an unsupported action " "(%s) and will not be rendered", term.name, " ".join(term.action), ) continue # generate the prefix sets when there are inline addres # exclusions in a term. these will be referenced within the # term if term.source_address_exclude: src_addr = term.GetAddressOfVersion("source_address", self._AF_MAP[ft]) src_addr_ex = term.GetAddressOfVersion("source_address_exclude", self._AF_MAP[ft]) src_addr, src_addr_ex = self._MinimizePrefixes( src_addr, src_addr_ex) if src_addr_ex: fs = self._GenPrefixFieldset("src", "%s" % term.name, src_addr, src_addr_ex, af_map_txt[ft]) policy_field_sets.append(fs) if term.destination_address_exclude: dst_addr = term.GetAddressOfVersion("destination_address", self._AF_MAP[ft]) dst_addr_ex = term.GetAddressOfVersion( "destination_address_exclude", self._AF_MAP[ft]) dst_addr, dst_addr_ex = self._MinimizePrefixes( dst_addr, dst_addr_ex) if dst_addr_ex: fs = self._GenPrefixFieldset("dst", "%s" % term.name, dst_addr, dst_addr_ex, af_map_txt[ft]) policy_field_sets.append(fs) # generate the unique list of named counters if term.counter: # we can't have '.' in counter names term.counter = re.sub(r"\.", "-", str(term.counter)) policy_counters.add(term.counter) new_terms.append(self._TERM(term, ft, noverbose)) self.arista_traffic_policies.append( (header, filter_name, filter_type, new_terms, policy_counters, policy_field_sets)) def __str__(self): config = Config() for ( _, filter_name, _, terms, counters, field_sets, ) in self.arista_traffic_policies: # add the header information config.Append("", "traffic-policies") if field_sets: for fs in field_sets: config.Append(" ", fs) config.Append(" ", "!") config.Append(" ", "no traffic-policy %s" % filter_name) config.Append(" ", "traffic-policy %s" % filter_name) # if there are counters, export the list of counters if counters: str_counters = " ".join(counters) config.Append(" ", "counter %s" % str_counters) for term in terms: term_str = str(term) if term_str: config.Append("", term_str, verbatim=True) return str(config) + "\n" capirca-2.0.9/capirca/lib/aruba.py000066400000000000000000000230461437377527500167710ustar00rootroot00000000000000# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Aruba generator.""" import datetime import logging from capirca.lib import aclgenerator _PLATFORM = 'aruba' _COMMENT_MARKER = '#' _TERMINATOR_MARKER = '!' class Error(Exception): """Base error class.""" class Term(aclgenerator.Term): """A single Aruba ACL term, mostly used for the __str__() method. Args: term: policy.Term object. filter_type: IP address version number. """ _ANY_STR = 'any' _ALIAS_STR = 'alias' _IPV6_START_STR = 'ipv6' _NET_DEST_STR = 'netdestination' _NEGATOR = 'no' _SRC_NETDEST_SUF = '_src' _DST_NETDEST_SUF = '_dst' _NETWORK_STRING = 'network' _HOST_STRING = 'host' _USER_STR = 'user' _SOURCE_IS_USER_OPT_STR = 'source-is-user' _DESTINATION_IS_USER_OPT_STR = 'destination-is-user' _NEGATE_OPT_STR = 'negate' _IDENT = ' ' _COMMENT_LINE_LENGTH = 70 _ACTIONS = { 'accept': 'permit', 'deny': 'deny', } _PROTOCOL_MAP = { 'icmp': 1, 'gre': 47, 'esp': 50, } def __init__(self, term, filter_type, verbose=True): super().__init__(term) self.term = term self.filter_type = filter_type self.netdestinations = [] self.verbose = verbose def __str__(self): netdestinations = [] ret_str = [] term_af = self.AF_MAP.get(self.filter_type) if self.term.verbatim: for next_verbatim in self.term.verbatim: if next_verbatim[0] == _PLATFORM and next_verbatim[1]: ret_str.append('%s%s' % (self._IDENT, next_verbatim[1])) return '\n'.join(t for t in ret_str if t) if self.verbose: comments = self.term.comment[:] if self.term.owner: comments.append('Owner: %s' % self.term.owner) if comments: for line in aclgenerator.WrapWords(comments, self._COMMENT_LINE_LENGTH): ret_str.append('%s%s %s' % (self._IDENT, _COMMENT_MARKER, line)) src_addr_token = '' dst_addr_token = '' if self._SOURCE_IS_USER_OPT_STR in self.term.option: src_addr_token = self._USER_STR else: if self.term.source_address: src_addr = self.term.GetAddressOfVersion('source_address', term_af) if not src_addr: return '' src_netdest_id = '%s%s' % (self.term.name.lower(), self._SRC_NETDEST_SUF) src_addr_token = '%s %s' % (self._ALIAS_STR, src_netdest_id) netdestinations.append(self._GenerateNetdest(src_netdest_id, src_addr, term_af)) else: src_addr_token = self._ANY_STR if self._DESTINATION_IS_USER_OPT_STR in self.term.option: dst_addr_token = self._USER_STR else: if self.term.destination_address: dst_addr = self.term.GetAddressOfVersion('destination_address', term_af) if not dst_addr: return '' dst_netdest_id = '%s%s' % (self.term.name.lower(), self._DST_NETDEST_SUF) dst_addr_token = '%s %s' % (self._ALIAS_STR, dst_netdest_id) netdestinations.append(self._GenerateNetdest(dst_netdest_id, dst_addr, term_af)) else: dst_addr_token = self._ANY_STR dst_protocol_list = [] if self.term.protocol: dst_protocol_list = self._GeneratePortTokens(self.term.protocol, self.term.destination_port) else: dst_protocol_list = [self._ANY_STR] for dst_port in dst_protocol_list: str_tok = [' '] if self._NEGATE_OPT_STR in self.term.option: str_tok.append(self._NEGATOR) if term_af == 6: str_tok.append(self._IPV6_START_STR) str_tok.append(src_addr_token) str_tok.append(dst_addr_token) str_tok.append(dst_port) str_tok.append(self._ACTIONS.get(self.term.action[0])) ret_str.append(' '.join(t for t in str_tok if t)) self.netdestinations = netdestinations return '\n'.join(t for t in ret_str if t) def _GenerateNetdest(self, addr_netdestid, addresses, af): """Generates the netdestinations text block. Args: addr_netdestid: netdestinations identifier. addresses: IP addresses. af: address family. Returns: A text block suitable for netdestinations in Aruba ACLs. """ ret_str = [] # Aruba does not use IP version identifier for IPv4. addr_family = '6' if af == 6 else '' ret_str.append('%s %s' % (self._NET_DEST_STR + addr_family, addr_netdestid)) for address in addresses: ret_str.append('%s%s' % (self._IDENT, self._GenerateNetworkOrHostTokens(address))) ret_str.append('%s\n' % _TERMINATOR_MARKER) return '\n'.join(t for t in ret_str if t) def _GenerateNetworkOrHostTokens(self, address): """Generates the text block host or network identifier for netdestinations. Args: address: IP address. Returns: A string line using either 'host' or 'network', properly formatted for Aruba ACLs. """ if address.num_addresses == 1: return '%s %s' % (self._HOST_STRING, address.network_address) if address.version == 6: return '%s %s/%s' % (self._NETWORK_STRING, address.network_address, address.prefixlen) return '%s %s %s' % (self._NETWORK_STRING, address.network_address, address.netmask) def _GeneratePortTokens(self, protocols, ports): """Generates string tokens for ports. Args: protocols: protocol to use (e.g. tcp, udp, etc.) ports: port number. Returns: A list of strings to be used as the port selector in Aruba ACLs. """ ret_ports = [] for protocol in protocols: if protocol in self._PROTOCOL_MAP: return [str(self._PROTOCOL_MAP[protocol])] for start_port, end_port in ports: ret_ports.append('%s %s' % (protocol.lower(), ' '.join( str(x) for x in set([start_port, end_port])))) return ret_ports class Aruba(aclgenerator.ACLGenerator): """An Aruba policy object. This class takes a policy object and renders the output (via __str__ method) into a syntax which is understood by Aruba devices. Args: pol: policy.Policy object. """ SUFFIX = '.aacl' _ACL_LINE_HEADER = 'ip access-list session' def _BuildTokens(self): """Build supported tokens for platform. Returns: Tuple containing both supported tokens and sub tokens. """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens -= { 'destination_address_exclude', 'icmp_type', 'source_port', 'source_address_exclude', 'platform', 'platform_exclude', } supported_sub_tokens.update({ 'action': { 'accept', 'deny', }, 'option': { 'source-is-user', 'destination-is-user', 'negate', }, }) del supported_sub_tokens['icmp_type'] return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.aruba_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: filter_name = header.FilterName(_PLATFORM) filter_options = header.FilterOptions(_PLATFORM) verbose = True if 'noverbose' in filter_options: filter_options.remove('noverbose') verbose = False filter_type = 'inet' if 'inet6' in filter_options: filter_type += '6' new_terms = [] for term in terms: if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue new_terms.append(Term(term, filter_type, verbose)) self.aruba_policies.append((filter_name, new_terms, filter_type)) def __str__(self): target = [] target.extend(aclgenerator.AddRepositoryTags('%s ' % _COMMENT_MARKER)) for filter_name, terms, _ in self.aruba_policies: netdestinations = [] term_strings = [] for term in terms: term_strings.append(str(term)) netdestinations.extend(term.netdestinations) target.extend(netdestinations) target.append('%s %s' % (self._ACL_LINE_HEADER, filter_name)) target.extend(term_strings) target.extend(_TERMINATOR_MARKER) if target: target.append('') return '\n'.join(target) capirca-2.0.9/capirca/lib/brocade.py000066400000000000000000000020151437377527500172670ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Brocade generator.""" from capirca.lib import cisco class Brocade(cisco.Cisco): """A brocade policy object. Brocade devices do not like protocol numbers. Revert the protocol numbers to names just before emitting acl lines to minimize difference from Cisco logic. """ _PLATFORM = 'brocade' SUFFIX = '.bacl' # Protocols should be emitted as they were in the policy (names). _PROTO_INT = False _TERM_REMARK = False capirca-2.0.9/capirca/lib/cisco.py000066400000000000000000001153011437377527500167730ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Cisco generator.""" import datetime import ipaddress from typing import cast, Union from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import summarizer _ACTION_TABLE = { 'accept': 'permit', 'deny': 'deny', 'reject': 'deny', 'next': '! next', 'reject-with-tcp-rst': 'deny', # tcp rst not supported } _COMMENT_MAX_WIDTH = 70 # generic error class class Error(Exception): """Generic error class.""" class CiscoDuplicateTermError(Error): """Raised on duplicate term names.""" class CiscoNextIpError(Error): """Raised when next-ip is misconfigured.""" class UnsupportedCiscoAccessListError(Error): """Raised when we're give a non named access list.""" class StandardAclTermError(Error): """Raised when there is a problem in a standard access list.""" class ExtendedACLTermError(Error): """Raised when there is a problem in an extended access list.""" class TermStandard: """A single standard ACL Term.""" def __init__(self, term, filter_name, platform='cisco', verbose=True): self.term = term self.filter_name = filter_name self.platform = platform self.options = [] self.logstring = '' self.dscpstring = '' self.verbose = verbose # sanity checking for standard acls if self.term.protocol: raise StandardAclTermError( 'Standard ACLs cannot specify protocols') if self.term.icmp_type: raise StandardAclTermError( 'ICMP Type specifications are not permissible in standard ACLs') if (self.term.source_address or self.term.source_address_exclude or self.term.destination_address or self.term.destination_address_exclude): raise StandardAclTermError( 'Standard ACLs cannot use source or destination addresses') if self.term.option: raise StandardAclTermError( 'Standard ACLs prohibit use of options') if self.term.source_port or self.term.destination_port: raise StandardAclTermError( 'Standard ACLs prohibit use of port numbers') if self.term.logging: logging.warning( 'WARNING: Standard ACL logging is set in filter %s, term %s and ' 'may not implemented on all IOS versions', self.filter_name, self.term.name) self.logstring = ' log' if self.term.dscp_match: logging.warning( 'WARNING: dscp-match is set in filter %s, term %s and may not be ' 'implemented on all IOS version', self.filter_name, self.term.name) self.dscpstring = ' dscp' + self.term.dscp_match def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self.platform not in self.term.platform: return '' if self.term.platform_exclude: if self.platform in self.term.platform_exclude: return '' ret_str = [] # Term verbatim output - this will skip over normal term creation # code by returning early. Warnings provided in policy.py. if self.term.verbatim: for next_verbatim in self.term.verbatim: if next_verbatim[0] == self.platform: ret_str.append(str(next_verbatim[1])) return '\n'.join(ret_str) v4_addresses = [x for x in self.term.address if not isinstance(x, nacaddr.IPv6)] if self.filter_name.isdigit(): if self.verbose: ret_str.append('access-list %s remark %s' % (self.filter_name, self.term.name)) comments = aclgenerator.WrapWords(self.term.comment, _COMMENT_MAX_WIDTH) for comment in comments: ret_str.append('access-list %s remark %s' % (self.filter_name, comment)) action = _ACTION_TABLE.get(str(self.term.action[0])) if v4_addresses: for addr in v4_addresses: if addr.prefixlen == 32: ret_str.append('access-list %s %s %s%s%s' % (self.filter_name, action, addr.network_address, self.logstring, self.dscpstring)) else: ret_str.append('access-list %s %s %s %s%s%s' % ( self.filter_name, action, addr.network_address, addr.hostmask, self.logstring, self.dscpstring)) else: ret_str.append('access-list %s %s %s%s%s' % (self.filter_name, action, 'any', self.logstring, self.dscpstring)) else: if self.verbose: ret_str.append(' remark ' + self.term.name) comments = aclgenerator.WrapWords(self.term.comment, _COMMENT_MAX_WIDTH) if comments and comments[0]: for comment in comments: ret_str.append(' remark ' + str(comment)) action = _ACTION_TABLE.get(str(self.term.action[0])) if v4_addresses: for addr in v4_addresses: if addr.prefixlen == 32: ret_str.append(' %s host %s%s%s' % (action, addr.network_address, self.logstring, self.dscpstring)) elif self.platform == 'arista': ret_str.append(' %s %s/%s%s%s' % (action, addr.network_address, addr.prefixlen, self.logstring, self.dscpstring)) else: ret_str.append(' %s %s %s%s%s' % (action, addr.network_address, addr.hostmask, self.logstring, self.dscpstring)) else: ret_str.append(' %s %s%s%s' % (action, 'any', self.logstring, self.dscpstring)) return '\n'.join(ret_str) class ObjectGroup: """Used for printing out the object group definitions. since the ports don't store the token name information, we have to fudge their names. ports will be written out like object-group port - range exit where as the addressess can be written as object-group address ipv4 first-term-source-address 172.16.0.0 172.20.0.0 255.255.0.0 172.22.0.0 255.128.0.0 172.24.0.0 172.28.0.0 exit """ def __init__(self): self.filter_name = '' self.terms = [] @property def valid(self): return bool(self.terms) def AddTerm(self, term): self.terms.append(term) def AddName(self, filter_name): self.filter_name = filter_name def __str__(self): ret_str = ['\n'] # netgroups will contain two-tuples of group name string and family int. netgroups = set() ports = {} for term in self.terms: # I don't have an easy way get the token name used in the pol file # w/o reading the pol file twice (with some other library) or doing # some other ugly hackery. Instead, the entire block of source and dest # addresses for a given term is given a unique, computable name which # is not related to the NETWORK.net token name. that's what you get # for using cisco, which has decided to implement its own meta language. # Create network object-groups addr_type = ('source_address', 'destination_address') addr_family = (4, 6) for source_or_dest in addr_type: for family in addr_family: addrs = term.GetAddressOfVersion(source_or_dest, family) if addrs: net_def_name = addrs[0].parent_token # We have addresses for this family and have not already seen it. if (net_def_name, family) not in netgroups: netgroups.add((net_def_name, family)) ret_str.append('object-group network ipv%d %s' % ( family, net_def_name)) for addr in addrs: ret_str.append(' %s/%s' % (addr.network_address, addr.prefixlen)) ret_str.append('exit\n') # Create port object-groups for port in term.source_port + term.destination_port: if not port: continue port_key = '%s-%s' % (port[0], port[1]) if port_key not in ports: ports[port_key] = True ret_str.append('object-group port %s' % port_key) if port[0] != port[1]: ret_str.append(' range %d %d' % (port[0], port[1])) else: ret_str.append(' eq %d' % port[0]) ret_str.append('exit\n') return '\n'.join(ret_str) class PortMap: """Map port numbers to service names.""" # Define port mappings common to all protocols _PORTS_TCP = { 179: 'bgp', 19: 'chargen', 514: 'cmd', 13: 'daytime', 9: 'discard', 53: 'domain', 7: 'echo', 512: 'exec', 79: 'finger', 21: 'ftp', 20: 'ftp-data', 70: 'gopher', 443: 'https', 113: 'ident', 194: 'irc', 543: 'klogin', 544: 'kshell', 389: 'ldap', 636: 'ldaps', 513: 'login', 515: 'lpd', 2049: 'nfs', 119: 'nntp', 496: 'pim-auto-rp', 109: 'pop2', 110: 'pop3', 1723: 'pptp', 25: 'smtp', 22: 'ssh', 111: 'sunrpc', 49: 'tacacs', 517: 'talk', 23: 'telnet', 540: 'uucp', 43: 'whois', 80: 'www', } _PORTS_UDP = { 512: 'biff', 68: 'bootpc', 67: 'bootps', 9: 'discard', 195: 'dnsix', 53: 'domain', 7: 'echo', 500: 'isakmp', 434: 'mobile-ip', 42: 'nameserver', 138: 'netbios-dgm', 137: 'netbios-ns', 2049: 'nfs', 123: 'ntp', 496: 'pim-auto-rp', 520: 'rip', 161: 'snmp', 162: 'snmptrap', 111: 'sunrpc', 514: 'syslog', 49: 'tacacs', 517: 'talk', 69: 'tftp', 37: 'time', 513: 'who', 177: 'xdmcp', } _TYPES_ICMP = { 6: 'alternate-address', 31: 'conversion-error', 8: 'echo', 0: 'echo-reply', 16: 'information-reply', 15: 'information-request', 18: 'mask-reply', 17: 'mask-request', 32: 'mobile-redirect', 12: 'parameter-problem', 5: 'redirect', 9: 'router-advertisement', 10: 'router-solicitation', 4: 'source-quench', 11: 'time-exceeded', 14: 'timestamp-reply', 13: 'timestamp-request', 30: 'traceroute', 3: 'unreachable', } # Combine cisco-specific port mappings with common ones _CISCO_PORTS_TCP = { 5190: 'aol', 1494: 'citrix-ica', 2748: 'ctiqbe', 1720: 'h323', 101: 'hostname', 143: 'imap4', 750: 'kerberos', 1352: 'lotusnotes', 139: 'netbios-ssn', 5631: 'pcanywhere-data', 1521: 'sqlnet', } _CISCO_PORTS_TCP.update(_PORTS_TCP) _CISCO_PORTS_UDP = { 750: 'kerberos', 5632: 'pcanywhere-status', 1645: 'radius', 1646: 'radius-acct', 5510: 'secureid-udp', } _CISCO_PORTS_UDP.update(_PORTS_UDP) # Combine arista-specific port mappings with common ones _ARISTA_PORTS_TCP = { 143: 'imap', 88: 'kerberos', } _ARISTA_PORTS_TCP.update(_PORTS_TCP) _ARISTA_PORTS_UDP = { 88: 'kerberos', 1812: 'radius', 1813: 'radius-acct', } _ARISTA_PORTS_UDP.update(_PORTS_UDP) # Full port map data structure _PORT_MAP = { 'cisco': { 'tcp': _CISCO_PORTS_TCP, 'udp': _CISCO_PORTS_UDP, 'icmp': _TYPES_ICMP }, 'arista': { 'tcp': _ARISTA_PORTS_TCP, 'udp': _ARISTA_PORTS_UDP, 'icmp': _TYPES_ICMP } } @staticmethod def GetProtocol(port_num, proto, platform='cisco'): """Converts a port number to a name or returns the number. Args: port_num: integer representing the port number. proto: string representing proto (tcp, udp, etc). platform: string representing platform (cisco, arista) Returns: A name of the protocol or the port number that was provided. """ try: port_map = PortMap._PORT_MAP[platform][proto] return port_map[port_num] except KeyError: return port_num class Term(aclgenerator.Term): """A single ACL Term.""" ALLOWED_PROTO_STRINGS = ['eigrp', 'gre', 'icmp', 'igmp', 'igrp', 'ip', 'ipinip', 'nos', 'pim', 'tcp', 'udp', 'sctp', 'ahp'] IPV4_ADDRESS = Union[nacaddr.IPv4, ipaddress.IPv4Network] IPV6_ADDRESS = Union[nacaddr.IPv6, ipaddress.IPv6Network] def __init__(self, term, af=4, proto_int=True, enable_dsmo=False, term_remark=True, platform='cisco', verbose=True): super().__init__(term) self.term = term self.proto_int = proto_int self.options = [] self.enable_dsmo = enable_dsmo self.term_remark = term_remark self.platform = platform self.verbose = verbose # Our caller should have already verified the address family. assert af in (4, 6) self.af = af if af == 4: self.text_af = 'inet' else: self.text_af = 'inet6' self.ALLOWED_PROTO_STRINGS.extend([self.PROTO_MAP.get(x) for x in self.ALWAYS_PROTO_NUM]) def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self.platform not in self.term.platform: return '' if self.term.platform_exclude: if self.platform in self.term.platform_exclude: return '' ret_str = ['\n'] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.af == 6 and 'icmp' in self.term.protocol) or (self.af == 4 and 'icmpv6' in self.term.protocol)): logging.debug(self.NO_AF_LOG_PROTO.substitute( term=self.term.name, proto=', '.join(self.term.protocol), af=self.text_af)) return '' if self.verbose: if self.term_remark: ret_str.append(' remark ' + self.term.name) if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) for comment in self.term.comment: for line in comment.split('\n'): ret_str.append(' remark ' + str(line)[:100].rstrip()) # Term verbatim output - this will skip over normal term creation # code by returning early. Warnings provided in policy.py. if self.term.verbatim: for next_verbatim in self.term.verbatim: if next_verbatim[0] == self.platform: ret_str.append(str(next_verbatim[1])) return '\n'.join(ret_str) # protocol if not self.term.protocol: if self.af == 6: protocol = ['ipv6'] elif self.platform == 'ciscoxr': protocol = ['ipv4'] else: protocol = ['ip'] elif self.term.protocol == ['hopopt']: protocol = ['hbh'] elif self.proto_int: protocol = [proto if proto in self.ALLOWED_PROTO_STRINGS else self.PROTO_MAP.get(proto) for proto in self.term.protocol] else: protocol = self.term.protocol # Arista can not process acls with esp/ah, these must appear as integers. if self.platform == 'arista': if 'esp' in protocol: protocol = [x if x != 'esp' else '50' for x in protocol] if 'ah' in protocol: protocol = [x if x != 'ah' else '51' for x in protocol] # source address if self.term.source_address: source_address = self.term.GetAddressOfVersion('source_address', self.af) source_address_exclude = self.term.GetAddressOfVersion( 'source_address_exclude', self.af) if source_address_exclude: source_address = nacaddr.ExcludeAddrs( source_address, source_address_exclude) if not source_address: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.text_af)) return '' if self.enable_dsmo: source_address = summarizer.Summarize(source_address) else: # source address not set source_address = ['any'] # destination address if self.term.destination_address: destination_address = self.term.GetAddressOfVersion( 'destination_address', self.af) destination_address_exclude = self.term.GetAddressOfVersion( 'destination_address_exclude', self.af) if destination_address_exclude: destination_address = nacaddr.ExcludeAddrs( destination_address, destination_address_exclude) if not destination_address: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.text_af)) return '' if self.enable_dsmo: destination_address = summarizer.Summarize(destination_address) else: # destination address not set destination_address = ['any'] # options opts = [str(x) for x in self.term.option] if ((self.PROTO_MAP['tcp'] in protocol or 'tcp' in protocol) and ('tcp-established' in opts or 'established' in opts)): if 'established' not in self.options: self.options.append('established') # Using both 'fragments' and 'is-fragment', ref Github Issue #187 if ('ip' in protocol) and (('fragments' in opts) or ('is-fragment' in opts)): if 'fragments' not in self.options: self.options.append('fragments') # ACL-based Forwarding if (self.platform == 'ciscoxr' ) and not self.term.action and self.term.next_ip and ( 'nexthop1' not in opts): if len(self.term.next_ip) > 1: raise CiscoNextIpError('The following term has more than one next IP ' 'value: %s' % self.term.name) if (not isinstance(self.term.next_ip[0], nacaddr.IPv4) and not isinstance(self.term.next_ip[0], nacaddr.IPv6)): raise CiscoNextIpError('Next IP value must be an IP address. ' 'Invalid term: %s' % self.term.name) if self.term.next_ip[0].num_addresses > 1: raise CiscoNextIpError('The following term has a subnet instead of a ' 'host: %s' % self.term.name) nexthop = self.term.next_ip[0].network_address nexthop_protocol = 'ipv4' if nexthop.version == 4 else 'ipv6' self.options.append('nexthop1 %s %s' % (nexthop_protocol, nexthop)) action = _ACTION_TABLE.get('accept') if self.term.action: action = _ACTION_TABLE.get(str(self.term.action[0])) # ports source_port = [()] destination_port = [()] if self.term.source_port: source_port = self._FixConsecutivePorts(self.term.source_port) if self.term.destination_port: destination_port = self._FixConsecutivePorts(self.term.destination_port) # logging if self.term.logging: self.options.append('log') # dscp; unlike srx, cisco only supports single, non-except values if self.term.dscp_match: if len(self.term.dscp_match) > 1: raise ExtendedACLTermError( 'Extended ACLs cannot specify more than one dscp match value') else: self.options.append('dscp %s' % ' '.join(self.term.dscp_match)) # icmp-types icmp_types = [''] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, self.af) icmp_codes = [''] if self.term.icmp_code: icmp_codes = self.term.icmp_code fixed_src_addresses = [self._GetIpString(x) for x in source_address] fixed_dst_addresses = [self._GetIpString(x) for x in destination_address] fixed_opts = {} for p in protocol: fixed_opts[p] = self._FixOptions(p, self.options) for saddr in fixed_src_addresses: for daddr in fixed_dst_addresses: for sport in source_port: for dport in destination_port: for proto in protocol: opts = fixed_opts[proto] # cisconx uses icmp for both ipv4 and ipv6 if self.platform == 'cisconx': if self.af == 6: proto = 'icmp' if proto == 'icmpv6' else proto for icmp_type in icmp_types: for icmp_code in icmp_codes: ret_str.extend( self._TermletToStr(action, proto, saddr, self._FormatPort(sport, proto), daddr, self._FormatPort(dport, proto), icmp_type, icmp_code, opts)) return '\n'.join(ret_str) def _GetIpString(self, addr): """Formats the address object for printing in the ACL. Args: addr: str or ipaddr, address Returns: An address string suitable for the ACL. """ if isinstance(addr, nacaddr.IPv4) or isinstance(addr, ipaddress.IPv4Network): addr = cast(self.IPV4_ADDRESS, addr) if addr.num_addresses > 1: if self.platform == 'arista': return addr.with_prefixlen return '%s %s' % (addr.network_address, addr.hostmask) return 'host %s' % (addr.network_address) if isinstance(addr, nacaddr.IPv6) or isinstance(addr, ipaddress.IPv6Network): addr = cast(self.IPV6_ADDRESS, addr) if addr.num_addresses > 1: return addr.with_prefixlen return 'host %s' % (addr.network_address) # DSMO enabled if isinstance(addr, summarizer.DSMNet): return '%s %s' % summarizer.ToDottedQuad(addr, negate=True) return addr def _FormatPort(self, port, proto): """Returns a formatted port string for the range. Args: port: str list or none, the port range. proto: str representing proto (tcp, udp, etc). Returns: A string suitable for the ACL. """ if not port: return '' port0 = port[0] port1 = port[1] if self.platform == 'arista': port0 = PortMap.GetProtocol(port0, proto, self.platform) port1 = PortMap.GetProtocol(port1, proto, self.platform) if port[0] != port[1]: return 'range %s %s' % (port0, port1) return 'eq %s' % (port0) def _FixOptions(self, proto, option): """Returns a set of options suitable for the given protocol. Fix done: - Filter out 'established' for UDP. - Filter out 'fragments' for TCP/UDP Args: proto: str or int, protocol option: list or none, optional, eg. 'logging' tokens. Returns: A list of options suitable for that protocol. """ # Prevent UDP from appending 'established' to ACL line sane_options = list(option) if ((proto == self.PROTO_MAP['udp'] or proto == 'udp') and 'established' in sane_options): sane_options.remove('established') return sane_options def _TermletToStr(self, action, proto, saddr, sport, daddr, dport, icmp_type, icmp_code, option): """Take the various compenents and turn them into a cisco acl line. Args: action: str, action proto: str or int, protocol saddr: str, source address sport: str, the source port daddr: str, the destination address dport: str, the destination port icmp_type: icmp-type numeric specification (if any) icmp_code: icmp-code numeric specification (if any) option: list or none, optional, eg. 'logging' tokens. Returns: string of the cisco acl line, suitable for printing. Raises: UnsupportedCiscoAccessListError: When unknown icmp-types specified """ # str(icmp_type) is needed to ensure 0 maps to '0' instead of FALSE icmp_type = str(icmp_type) icmp_code = str(icmp_code) all_elements = [action, str(proto), saddr, sport, daddr, dport, icmp_type, icmp_code, ' '.join(option)] non_empty_elements = [x for x in all_elements if x] return [' ' + ' '.join(non_empty_elements)] def _FixConsecutivePorts(self, port_list): """Takes a list of tuples and expands the tuple if the range is two. http://www.cisco.com/warp/public/cc/pd/si/casi/ca6000/tech/65acl_wp.pdf Args: port_list: A list of tuples representing ports. Returns: list of tuples """ temporary_port_list = [] for low_port, high_port in port_list: if low_port == high_port - 1: temporary_port_list.append((low_port, low_port)) temporary_port_list.append((high_port, high_port)) else: temporary_port_list.append((low_port, high_port)) return temporary_port_list class ObjectGroupTerm(Term): """An individual term of an object-group'd acl. Object Group acls are very similar to extended acls in their syntax except they use a meta language with address/service definitions. eg: permit tcp first-term-source-address 179-179 ANY where first-term-source-address, ANY and 179-179 are defined elsewhere in the acl. """ # Protocols should be emitted as integers rather than strings. _PROTO_INT = True def __init__(self, term, filter_name, platform='cisco', verbose=True): super().__init__(term) self.term = term self.filter_name = filter_name self.platform = platform self.verbose = verbose def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self.platform not in self.term.platform: return '' if self.term.platform_exclude: if self.platform in self.term.platform_exclude: return '' source_address_set = set() destination_address_set = set() ret_str = ['\n'] if self.verbose: ret_str.append(' remark %s' % self.term.name) comments = aclgenerator.WrapWords(self.term.comment, _COMMENT_MAX_WIDTH) if comments and comments[0]: for comment in comments: ret_str.append(' remark %s' % str(comment)) # Term verbatim output - this will skip over normal term creation # code by returning early. Warnings provided in policy.py. if self.term.verbatim: for next_verbatim in self.term.verbatim: if next_verbatim[0] == self.platform: ret_str.append(str(next_verbatim[1])) return '\n'.join(ret_str) # protocol if not self.term.protocol: protocol = ['ip'] else: protocol = [proto if proto in self.ALLOWED_PROTO_STRINGS else self.PROTO_MAP.get(proto) for proto in self.term.protocol] # addresses source_address = self.term.source_address if not self.term.source_address: source_address = [nacaddr.IPv4('0.0.0.0/0', token='any')] source_address_set.add(source_address[0].parent_token) destination_address = self.term.destination_address if not self.term.destination_address: destination_address = [nacaddr.IPv4('0.0.0.0/0', token='any')] destination_address_set.add(destination_address[0].parent_token) # ports source_port = [()] destination_port = [()] if self.term.source_port: source_port = self.term.source_port if self.term.destination_port: destination_port = self.term.destination_port for saddr in source_address_set: for daddr in destination_address_set: for sport in source_port: for dport in destination_port: for proto in protocol: ret_str.append( self._TermletToStr(_ACTION_TABLE.get(str( self.term.action[0])), proto, saddr, sport, daddr, dport)) return '\n'.join(ret_str) def _TermletToStr(self, action, proto, saddr, sport, daddr, dport): """Output a portion of a cisco term/filter only, based on the 5-tuple.""" # Empty addr/port destinations should emit 'any' if saddr and saddr != 'any': saddr = 'net-group %s' % saddr if daddr and daddr != 'any': daddr = 'net-group %s' % daddr # fix ports if sport: sport = ' port-group %d-%d' % (sport[0], sport[1]) else: sport = '' if dport: dport = ' port-group %d-%d' % (dport[0], dport[1]) else: dport = '' return (' %s %s %s%s %s%s' % ( action, proto, saddr, sport, daddr, dport)).rstrip() class Cisco(aclgenerator.ACLGenerator): """A cisco policy object.""" _PLATFORM = 'cisco' _DEFAULT_PROTOCOL = 'ip' SUFFIX = '.acl' # Protocols should be emitted as numbers. _PROTO_INT = True _TERM_REMARK = True def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'address', 'restrict_address_family', 'dscp_match', 'icmp_code', 'logging', 'owner'} supported_sub_tokens.update({'option': {'established', 'tcp-established', 'is-fragment', 'fragments'}, # Warning, some of these are mapped # differently. See _ACTION_TABLE 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}}) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.cisco_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) # a mixed filter outputs both ipv4 and ipv6 acls in the same output file good_filters = ['extended', 'standard', 'object-group', 'inet6', 'mixed', 'enable_dsmo'] for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue obj_target = ObjectGroup() filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) self.verbose = True if 'noverbose' in filter_options: filter_options.remove('noverbose') self.verbose = False # extended is the most common filter type. filter_type = 'extended' if len(filter_options) > 1: filter_type = filter_options[1] # check if filter type is renderable if filter_type not in good_filters: raise UnsupportedCiscoAccessListError( 'access list type %s not supported by %s (good types: %s)' % ( filter_type, self._PLATFORM, str(good_filters))) filter_list = [filter_type] if filter_type == 'mixed': # Loop through filter and generate output for inet and inet6 in sequence filter_list = ['extended', 'inet6'] for next_filter in filter_list: # Numeric access lists can be extended or standard, but have specific # known ranges. if next_filter == 'extended' and filter_name.isdigit(): if int(filter_name) in list(range(1, 100)) + list(range(1300, 2000)): raise UnsupportedCiscoAccessListError( 'Access lists between 1-99 and 1300-1999 are reserved for ' 'standard ACLs') if next_filter == 'standard' and filter_name.isdigit(): if (int(filter_name) not in list(range(1, 100)) + list(range(1300, 2000))): raise UnsupportedCiscoAccessListError( 'Standard access lists must be numeric in the range of 1-99' ' or 1300-1999.') term_dup_check = set() new_terms = [] for term in terms: if term.name in term_dup_check: raise CiscoDuplicateTermError('You have a duplicate term: %s' % term.name) term_dup_check.add(term.name) term.name = self.FixTermLength(term.name) af = 'inet' if next_filter == 'inet6': af = 'inet6' term = self.FixHighPorts(term, af=af) if not term: continue # Ignore if the term is for a different AF if term.restrict_address_family and term.restrict_address_family != af: continue if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue # render terms based on filter type if next_filter == 'standard': # keep track of sequence numbers across terms new_terms.append(TermStandard(term, filter_name, self._PLATFORM, self.verbose)) elif next_filter == 'extended': enable_dsmo = (len(filter_options) > 2 and filter_options[2] == 'enable_dsmo') new_terms.append( Term(term, proto_int=self._PROTO_INT, enable_dsmo=enable_dsmo, term_remark=self._TERM_REMARK, platform=self._PLATFORM, verbose=self.verbose)) elif next_filter == 'object-group': obj_target.AddTerm(term) new_terms.append(self._GetObjectGroupTerm(term, filter_name, verbose=self.verbose)) elif next_filter == 'inet6': new_terms.append( Term( term, 6, proto_int=self._PROTO_INT, platform=self._PLATFORM, verbose=self.verbose)) # cisco requires different name for the v4 and v6 acls if filter_type == 'mixed' and next_filter == 'inet6': filter_name = 'ipv6-%s' % filter_name self.cisco_policies.append((header, filter_name, [next_filter], new_terms, obj_target)) def _GetObjectGroupTerm(self, term, filter_name, verbose=True): """Returns an ObjectGroupTerm object.""" return ObjectGroupTerm(term, filter_name, verbose=verbose) def _AppendTargetByFilterType(self, filter_name, filter_type): """Takes in the filter name and type and appends headers. Args: filter_name: Name of the current filter filter_type: Type of current filter Returns: list of strings Raises: UnsupportedCiscoAccessListError: When unknown filter type is used. """ target = [] if filter_type == 'standard': if filter_name.isdigit(): target.append('no access-list %s' % filter_name) else: target.append('no ip access-list standard %s' % filter_name) target.append('ip access-list standard %s' % filter_name) elif filter_type == 'extended': target.append('no ip access-list extended %s' % filter_name) target.append('ip access-list extended %s' % filter_name) elif filter_type == 'object-group': target.append('no ip access-list extended %s' % filter_name) target.append('ip access-list extended %s' % filter_name) elif filter_type == 'inet6': target.append('no ipv6 access-list %s' % filter_name) target.append('ipv6 access-list %s' % filter_name) else: raise UnsupportedCiscoAccessListError( 'access list type %s not supported by %s' % ( filter_type, self._PLATFORM)) return target def _RepositoryTagsHelper(self, target=None, filter_type='', filter_name=''): if target is None: target = [] if filter_type == 'standard' and filter_name.isdigit(): target.extend(aclgenerator.AddRepositoryTags( 'access-list %s remark ' % filter_name, date=False, revision=False)) else: target.extend(aclgenerator.AddRepositoryTags( ' remark ', date=False, revision=False)) return target def __str__(self): target_header = [] target = [] # add the p4 tags target.extend(aclgenerator.AddRepositoryTags('! ')) for (header, filter_name, filter_list, terms, obj_target ) in self.cisco_policies: for filter_type in filter_list: target.extend(self._AppendTargetByFilterType(filter_name, filter_type)) if filter_type == 'object-group': obj_target.AddName(filter_name) # Add the Perforce Id/Date tags, these must come after # remove/re-create of the filter, otherwise config mode doesn't # know where to place these remarks in the configuration. if self.verbose: target = self._RepositoryTagsHelper(target, filter_type, filter_name) # add a header comment if one exists for comment in aclgenerator.WrapWords(header.comment, _COMMENT_MAX_WIDTH): for line in comment.split('\n'): if (self._PLATFORM == 'cisco' and filter_type == 'standard' and filter_name.isdigit()): target.append('access-list %s remark %s' % (filter_name, line)) else: target.append(' remark %s' % line) # now add the terms for term in terms: term_str = str(term) if term_str: target.append(term_str) if obj_target.valid: target = [str(obj_target)] + target # ensure that the header is always first target = target_header + target target += ['', 'exit', ''] return '\n'.join(target) capirca-2.0.9/capirca/lib/ciscoasa.py000066400000000000000000000303361437377527500174640ustar00rootroot00000000000000# Copyright 2011 Capirca Project Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cisco ASA renderer.""" import datetime import ipaddress import logging import re from typing import cast from capirca.lib import aclgenerator from capirca.lib import cisco from capirca.lib import nacaddr _ACTION_TABLE = { 'accept': 'permit', 'deny': 'deny', 'reject': 'deny', 'next': '! next', 'reject-with-tcp-rst': 'deny', # tcp rst not supported } # generic error class class Error(Exception): """Generic error class.""" pass class UnsupportedCiscoAccessListError(Error): """Raised when we're give a non named access list.""" pass class StandardAclTermError(Error): """Raised when there is a problem in a standard access list.""" pass class NoCiscoPolicyError(Error): """Raised when a policy is errantly passed to this module for rendering.""" pass class Term(cisco.Term): """A single ACL Term.""" def __init__(self, term, filter_name, af=4): self.term = term self.filter_name = filter_name self.options = [] assert af in (4, 6) self.af = af def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if 'ciscoasa' not in self.term.platform: return '' if self.term.platform_exclude: if 'ciscoasa' in self.term.platform_exclude: return '' ret_str = ['\n'] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.af == 6 and 'icmp' in self.term.protocol) or (self.af == 4 and 'icmpv6' in self.term.protocol)): ret_str.append('remark Term %s' % self.term.name) ret_str.append('remark not rendered due to protocol/AF mismatch.') return '\n'.join(ret_str) ret_str.append('access-list %s remark %s' % (self.filter_name, self.term.name)) if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) for comment in self.term.comment: for line in comment.split('\n'): ret_str.append('access-list %s remark %s' % (self.filter_name, str(line)[:100])) # Term verbatim output - this will skip over normal term creation # code by returning early. Warnings provided in policy.py. if self.term.verbatim: for line in self.term.verbatim: if line[0] == 'ciscoasa': ret_str.append(str(line[1])) return '\n'.join(ret_str) # protocol if not self.term.protocol: protocol = ['ip'] else: # fix the protocol protocol = self.term.protocol # source address if self.term.source_address: source_address = self.term.GetAddressOfVersion('source_address', self.af) source_address_exclude = self.term.GetAddressOfVersion( 'source_address_exclude', self.af) if source_address_exclude: source_address = nacaddr.ExcludeAddrs( source_address, source_address_exclude) else: # source address not set source_address = ['any'] # destination address if self.term.destination_address: destination_address = self.term.GetAddressOfVersion( 'destination_address', self.af) destination_address_exclude = self.term.GetAddressOfVersion( 'destination_address_exclude', self.af) if destination_address_exclude: destination_address = nacaddr.ExcludeAddrs( destination_address, destination_address_exclude) else: # destination address not set destination_address = ['any'] # options extra_options = [] for opt in [str(x) for x in self.term.option]: if opt.find('tcp-established') == 0 and 6 in protocol: extra_options.append('established') elif opt.find('established') == 0 and 6 in protocol: # only needed for TCP, for other protocols policy.py handles high-ports extra_options.append('established') self.options.extend(extra_options) # ports source_port = [()] destination_port = [()] if self.term.source_port: source_port = self.term.source_port if self.term.destination_port: destination_port = self.term.destination_port # logging if self.term.logging: self.options.append('log') if 'disable' in [x.value for x in self.term.logging]: self.options.append('disable') # icmp-types icmp_types = [''] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, self.af) for saddr in source_address: for daddr in destination_address: for sport in source_port: for dport in destination_port: for proto in protocol: for icmp_type in icmp_types: # only output address family appropriate IP addresses do_output = False if self.af == 4: if (((isinstance(saddr, nacaddr.IPv4)) or (saddr == 'any')) and ((isinstance(daddr, nacaddr.IPv4)) or (daddr == 'any'))): do_output = True if self.af == 6: if (((isinstance(saddr, nacaddr.IPv6)) or (saddr == 'any')) and ((isinstance(daddr, nacaddr.IPv6)) or (daddr == 'any'))): do_output = True if do_output: ret_str.extend(self._TermletToStr( self.filter_name, _ACTION_TABLE.get(str(self.term.action[0])), proto, saddr, sport, daddr, dport, icmp_type, self.options)) return '\n'.join(ret_str) def _TermletToStr(self, filter_name, action, proto, saddr, sport, daddr, dport, icmp_type, option): """Take the various compenents and turn them into a cisco acl line. Args: filter_name: name of the filter action: str, action proto: str, protocl saddr: str or ipaddress, source address sport: str list or none, the source port daddr: str or ipaddress, the destination address dport: str list or none, the destination port icmp_type: icmp-type numeric specification (if any) option: list or none, optional, eg. 'logging' tokens. Returns: string of the cisco acl line, suitable for printing. """ # inet4 if isinstance(saddr, nacaddr.IPv4) or isinstance(saddr, ipaddress.IPv4Network): saddr = cast(self.IPV4_ADDRESS, saddr) if saddr.num_addresses > 1: saddr = '%s %s' % (saddr.network_address, saddr.netmask) else: saddr = 'host %s' % (saddr.network_address) if isinstance(daddr, nacaddr.IPv4) or isinstance(daddr, ipaddress.IPv4Network): daddr = cast(self.IPV4_ADDRESS, daddr) if daddr.num_addresses > 1: daddr = '%s %s' % (daddr.network_address, daddr.netmask) else: daddr = 'host %s' % (daddr.network_address) # inet6 if isinstance(saddr, nacaddr.IPv6) or isinstance(saddr, ipaddress.IPv6Network): saddr = cast(self.IPV6_ADDRESS, saddr) if saddr.num_addresses > 1: saddr = '%s/%s' % (saddr.network_address, saddr.prefixlen) else: saddr = 'host %s' % (saddr.network_address) if isinstance(daddr, nacaddr.IPv6) or isinstance(daddr, ipaddress.IPv6Network): daddr = cast(self.IPV6_ADDRESS, daddr) if daddr.num_addresses > 1: daddr = '%s/%s' % (daddr.network_address, daddr.prefixlen) else: daddr = 'host %s' % (daddr.network_address) # fix ports if not sport: sport = '' elif sport[0] != sport[1]: sport = ' range %s %s' % (cisco.PortMap.GetProtocol(sport[0], proto), cisco.PortMap.GetProtocol(sport[1], proto)) else: sport = ' eq %s' % (cisco.PortMap.GetProtocol(sport[0], proto)) if not dport: dport = '' elif dport[0] != dport[1]: dport = ' range %s %s' % (cisco.PortMap.GetProtocol(dport[0], proto), cisco.PortMap.GetProtocol(dport[1], proto)) else: dport = ' eq %s' % (cisco.PortMap.GetProtocol(dport[0], proto)) if not option: option = [''] # Prevent UDP from appending 'established' to ACL line sane_options = list(option) if proto == 'udp' and 'established' in sane_options: sane_options.remove('established') ret_lines = [] # str(icmp_type) is needed to ensure 0 maps to '0' instead of FALSE icmp_type = str(cisco.PortMap.GetProtocol(icmp_type, 'icmp')) ret_lines.append('access-list %s extended %s %s %s %s %s %s %s %s' % (filter_name, action, proto, saddr, sport, daddr, dport, icmp_type, ' '.join(sane_options) )) # remove any trailing spaces and replace multiple spaces with singles stripped_ret_lines = [re.sub(r'\s+', ' ', x).rstrip() for x in ret_lines] return stripped_ret_lines class CiscoASA(aclgenerator.ACLGenerator): """A cisco ASA policy object.""" _PLATFORM = 'ciscoasa' _DEFAULT_PROTOCOL = 'ip' SUFFIX = '.asa' def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'logging', 'owner'} supported_sub_tokens.update({'option': {'established', 'tcp-established'}, # Warning, some of these are mapped # differently. See _ACTION_TABLE 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}}) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.ciscoasa_policies = [] current_date = datetime.date.today() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in self.policy.filters: filter_name = header.FilterName('ciscoasa') new_terms = [] # now add the terms for term in terms: if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue new_terms.append(str(Term(term, filter_name))) self.ciscoasa_policies.append((header, filter_name, new_terms)) def __str__(self): target = [] for (header, filter_name, terms) in self.ciscoasa_policies: target.append('clear configure access-list %s' % filter_name) # add the p4 tags target.extend(aclgenerator.AddRepositoryTags('access-list %s remark ' % filter_name)) # add a header comment if one exists for comment in header.comment: for line in comment.split('\n'): target.append('access-list %s remark %s' % (filter_name, line)) # now add the terms for term in terms: target.append(str(term)) # end for header, filter_name, filter_type... return '\n'.join(target) capirca-2.0.9/capirca/lib/cisconx.py000066400000000000000000000046071437377527500173470ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """CiscoNX generator.""" from capirca.lib import aclgenerator from capirca.lib import cisco class Error(Exception): """Base error class.""" class UnsupportedNXosAccessListError(Error): """When a filter type is not supported in an NXOS policy target.""" class CiscoNX(cisco.Cisco): """An CiscoNX policy object. CiscoNX devices differ slightly from Cisco, omitting the extended argument to ACLs for example. """ _PLATFORM = 'cisconx' SUFFIX = '.nxacl' # Protocols should be emitted as they were in the policy (names). _PROTO_INT = False def _RepositoryTagsHelper(self, target=None, filter_type='', filter_name=''): if target is None: target = [] target.extend(aclgenerator.AddRepositoryTags( ' remark ', rid=False, wrap=True)) return target # CiscoNX omits the "extended" access-list argument. def _AppendTargetByFilterType(self, filter_name, filter_type): """Takes in the filter name and type and appends headers. Args: filter_name: Name of the current filter filter_type: Type of current filter Returns: list of strings Raises: UnsupportedNXosAccessListError: When unknown filter type is used. """ target = [] if filter_type == 'extended': target.append('no ip access-list %s' % filter_name) target.append('ip access-list %s' % filter_name) elif filter_type == 'object-group': target.append('no ip access-list %s' % filter_name) target.append('ip access-list %s' % filter_name) elif filter_type == 'inet6': target.append('no ipv6 access-list %s' % filter_name) target.append('ipv6 access-list %s' % filter_name) else: raise UnsupportedNXosAccessListError( 'access list type %s not supported by %s' % (filter_type, self._PLATFORM)) return target capirca-2.0.9/capirca/lib/ciscoxr.py000066400000000000000000000040401437377527500173420ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Cisco IOS-XR filter renderer.""" from capirca.lib import cisco class CiscoXR(cisco.Cisco): """A cisco policy object.""" _PLATFORM = 'ciscoxr' _DEFAULT_PROTOCOL = 'ip' SUFFIX = '.xacl' _PROTO_INT = False def _AppendTargetByFilterType(self, filter_name, filter_type): """Takes in the filter name and type and appends headers. Args: filter_name: Name of the current filter filter_type: Type of current filter Returns: list of strings """ target = [] if filter_type == 'inet6': target.append('no ipv6 access-list %s' % filter_name) target.append('ipv6 access-list %s' % filter_name) else: target.append('no ipv4 access-list %s' % filter_name) target.append('ipv4 access-list %s' % filter_name) return target def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'next_ip'} return supported_tokens, supported_sub_tokens def _GetObjectGroupTerm(self, term, filter_name, verbose=True): """Returns an ObjectGroupTerm object.""" return CiscoXRObjectGroupTerm(term, filter_name, platform=self._PLATFORM, verbose=verbose) class CiscoXRObjectGroupTerm(cisco.ObjectGroupTerm): ALLOWED_PROTO_STRINGS = cisco.Term.ALLOWED_PROTO_STRINGS + ['pcp', 'esp'] capirca-2.0.9/capirca/lib/cloudarmor.py000066400000000000000000000211571437377527500200470ustar00rootroot00000000000000"""Google Cloud Armor Firewall Generator. Refer to the links below for more information https://cloud.google.com/armor/ https://cloud.google.com/armor/docs/ """ import copy import json import logging from capirca.lib import aclgenerator import six # Generic error class class Error(Exception): """Generic error class.""" class ExceededMaxTermsError(Error): """Raised when number of terms in a policy exceed _MAX_RULES_PER_POLICY.""" class UnsupportedFilterTypeError(Error): """Raised when unsupported filter type (i.e address family) is specified.""" class Term(aclgenerator.Term): """Generates the Term for CloudArmor.""" # Max srcIpRanges within a single term _MAX_IP_RANGES_PER_TERM = 5 ACTION_MAP = {'accept': 'allow', 'deny': 'deny(404)'} _MAX_TERM_COMMENT_LENGTH = 64 def __init__(self, term, address_family='inet', verbose=True): super().__init__(term) self.term = term self.address_family = address_family self.verbose = verbose def __str__(self): return '' def ConvertToDict(self, priority_index): """Converts term to dictionary representation of CloudArmor's JSON format. Takes all of the attributes associated with a term (match, action, etc) and converts them into a dictionary which most closely represents the CloudArmor API's JSON rule format. Additionally, splits a single term into multiple terms if the number of srcIpRanges exceed _MAX_IP_RANGES_PER_TERM. Args: priority_index: An integer priority value assigned to the term. In case the term is split into i sub-terms, the ith sub-term has priority = priority_index + i Returns: A list of dicts where each dict is a term Raises: UnsupportedFilterTypeError: Raised when an unsupported filter type is specified """ term_dict = {} rules = [] if self.term.comment and self.verbose: raw_comment = ' '.join(self.term.comment) if len(raw_comment) > self._MAX_TERM_COMMENT_LENGTH: term_dict['description'] = raw_comment[:self._MAX_TERM_COMMENT_LENGTH] logging.warning('Term comment exceeds maximum length = %d; Truncating ' 'comment..', self._MAX_TERM_COMMENT_LENGTH) else: term_dict['description'] = raw_comment term_dict['action'] = self.ACTION_MAP[self.term.action[0]] term_dict['preview'] = False if self.address_family == 'inet': saddrs = self.term.GetAddressOfVersion('source_address', 4) elif self.address_family == 'inet6': saddrs = self.term.GetAddressOfVersion('source_address', 6) elif self.address_family == 'mixed': saddrs = (self.term.GetAddressOfVersion('source_address', 4) + self.term.GetAddressOfVersion('source_address', 6)) else: raise UnsupportedFilterTypeError("'%s' is not a valid filter type" % self.address_family) term_dict['match'] = { 'versionedExpr': 'SRC_IPS_V1', 'config': { 'srcIpRanges': saddrs, } } # If scrIpRanges within a single term exceed _MAX_IP_RANGES_PER_TERM, # split into multiple terms source_addr_chunks = [ saddrs[x:x+self._MAX_IP_RANGES_PER_TERM] for x in range( 0, len(saddrs), self._MAX_IP_RANGES_PER_TERM)] if not source_addr_chunks: rule = copy.deepcopy(term_dict) rule['priority'] = priority_index rule['match']['config']['srcIpRanges'] = ['*'] rules.append(rule) else: split_rule_count = len(source_addr_chunks) for i, chunk in enumerate(source_addr_chunks): rule = copy.deepcopy(term_dict) if split_rule_count > 1: term_position_suffix = ' [%d/%d]' % (i+1, split_rule_count) desc_limit = self._MAX_TERM_COMMENT_LENGTH - len(term_position_suffix) rule['description'] = (rule.get('description', '')[:desc_limit] + term_position_suffix) rule['priority'] = priority_index + i rule['match'] = { 'versionedExpr': 'SRC_IPS_V1', 'config': { 'srcIpRanges': [str(saddr) for saddr in chunk], } } rules.append(rule) # TODO(robankeny@): Review this log entry to make it cleaner/more useful. # Right now, it prints the entire term which might be huge if len(source_addr_chunks) > 1: logging.debug('Current term [%s] was split into %d sub-terms since ' '_MAX_IP_RANGES_PER_TERM was exceeded', str(term_dict), len(source_addr_chunks)) return rules class CloudArmor(aclgenerator.ACLGenerator): """A CloudArmor policy object.""" _PLATFORM = 'cloudarmor' SUFFIX = '.gca' _SUPPORTED_AF = set(('inet', 'inet6', 'mixed')) # Maximum number of rules that a CloudArmor policy can contain _MAX_RULES_PER_POLICY = 200 # Warn user when rule count exceeds this number _RULECOUNT_WARN_THRESHOLD = 190 # Maps indiviudal filter options to their index positions in the POL header _FILTER_OPTIONS_MAP = {'filter_type': 0} def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, _ = super()._BuildTokens() supported_tokens -= {'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'stateless_reply', 'option', 'protocol', 'platform', 'platform_exclude', 'source_address_exclude', 'source_port', 'verbatim'} supported_sub_tokens = {'action': {'accept', 'deny'}} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): """Translates a Capirca policy into a CloudArmor-specific data structure. Takes in a POL file, parses each term and populates the cloudarmor_policies list. Each term in this list is a dictionary formatted according to CloudArmor's rule API specification. Args: pol: A Policy() object representing a given POL file. exp_info: An int that specifies number of weeks till policy expiry. Returns: N.A. Raises: ExceededMaxTermsError: Raised when the number of terms in a policy exceed _MAX_RULES_PER_POLICY. UnsupportedFilterTypeError: Raised when an unsupported filter type is specified """ self.cloudarmor_policies = [] for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) verbose = True if 'noverbose' in filter_options: filter_options.remove('noverbose') verbose = False if filter_options is None or not filter_options: filter_type = 'inet' logging.debug('No filter_type specified. Defaulting to inet (IPv4)') else: filter_type = filter_options[self._FILTER_OPTIONS_MAP['filter_type']] if filter_type not in self._SUPPORTED_AF: raise UnsupportedFilterTypeError("'%s' is not a valid filter type" % filter_type) counter = 1 for term in terms: json_rule_list = Term(term, address_family=filter_type, verbose=verbose).ConvertToDict( priority_index=counter) # count number of rules generated after split (if any) split_rule_count = len(json_rule_list) self.cloudarmor_policies.extend(json_rule_list) counter = counter + split_rule_count total_rule_count = len(self.cloudarmor_policies) if total_rule_count > self._RULECOUNT_WARN_THRESHOLD: if total_rule_count > self._MAX_RULES_PER_POLICY: raise ExceededMaxTermsError('Exceeded maximum number of rules in ' ' a single policy | MAX = %d' % self._MAX_RULES_PER_POLICY) else: logging.warning('Current rule count (%d) is almost at maximum ' 'limit of %d', total_rule_count, self._MAX_RULES_PER_POLICY) def __str__(self): """Return the JSON blob for CloudArmor.""" out = '%s\n\n' % ( json.dumps(self.cloudarmor_policies, indent=2, separators=(six.ensure_str(','), six.ensure_str(': ')), sort_keys=True)) return out capirca-2.0.9/capirca/lib/demo.py000066400000000000000000000172471437377527500166310ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Demo generator for capirca.""" import datetime import logging from capirca.lib import aclgenerator class Term(aclgenerator.Term): """Used to create an individual term. The __str__ method must be implemented. Args: term policy.Term object This is created to be a demo. """ _ACTIONS = {'accept': 'allow', 'deny': 'discard', 'reject': 'say go away to', 'next': 'pass it onto the next term', 'reject-with-tcp-rst': 'reset' } def __init__(self, term, term_type): self.term = term self.term_type = term_type def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if 'demo' not in self.term.platform: return '' if self.term.platform_exclude: if 'demo' in self.term.platform_exclude: return '' ret_str = [] # NAME ret_str.append(' ' * 4 + 'Term: '+self.term.name+'{') # COMMENTS if self.term.comment: ret_str.append(' ') ret_str.append(' ' * 8 + '#COMMENTS') for comment in self.term.comment: for line in comment.split('\n'): ret_str.append(' ' * 8 + '#'+line) # SOURCE ADDRESS source_address = self.term.GetAddressOfVersion( 'source_address', self.AF_MAP.get(self.term_type)) source_address_exclude = self.term.GetAddressOfVersion( 'source_address_exclude', self.AF_MAP.get(self.term_type)) if source_address: ret_str.append(' ') ret_str.append(' ' * 8 + 'Source IP\'s') for saddr in source_address: ret_str.append(' ' * 8 + str(saddr)) # SOURCE ADDRESS EXCLUDE if source_address_exclude: ret_str.append(' ') ret_str.append(' ' * 8 + 'Excluded Source IP\'s') for ex in source_address: ret_str.append(' ' * 8 + str(ex)) # SOURCE PORT if self.term.source_port: ret_str.append(' ') ret_str.append(' ' * 8 + 'Source ports') ret_str.append(' ' * 8 + self._Group(self.term.source_port)) # DESTINATION destination_address = self.term.GetAddressOfVersion( 'destination_address', self.AF_MAP.get(self.term_type)) destination_address_exclude = self.term.GetAddressOfVersion( 'destination_address_exclude', self.AF_MAP.get(self.term_type)) if destination_address: ret_str.append(' ') ret_str.append(' ' * 8 + 'Destination IP\'s') for daddr in destination_address: ret_str.append(' ' * 8 + str(daddr)) # DESINATION ADDRESS EXCLUDE if destination_address_exclude: ret_str.append(' ') ret_str.append(' ' * 8 + 'Excluded Destination IP\'s') for ex in destination_address_exclude: ret_str.append(' ' * 8 + str(ex)) # DESTINATION PORT if self.term.destination_port: ret_str.append(' ') ret_str.append(' ' * 8 + 'Destination Ports') ret_str.append(' ' * 8 + self._Group(self.term.destination_port)) # PROTOCOL if self.term.protocol: ret_str.append(' ') ret_str.append(' ' * 8 + 'Protocol') ret_str.append(' ' * 8 + self._Group(self.term.protocol)) # OPTION if self.term.option: ret_str.append(' ') ret_str.append(' ' * 8 + 'Options') for option in self.term.option: ret_str.append(' ' * 8 + option) # ACTION for action in self.term.action: ret_str.append(' ') ret_str.append(' ' * 8 + 'Action: ' + self._ACTIONS.get(str(action))+' all traffic') return '\n '.join(ret_str) def _Group(self, group): def _FormattedGroup(el): if isinstance(el, str): return el.lower() elif isinstance(el, int): return str(el) elif el[0] == el[1]: return '%d' % el[0] else: return '%d-%d' % (el[0], el[1]) if len(group) > 1: rval = '' for item in group: rval = rval + str(item[0])+' ' else: rval = _FormattedGroup(group[0]) return rval class Demo(aclgenerator.ACLGenerator): """Demo rendering class. This class takes a policy object and renders output into a syntax which is not useable by routers. This class should only be used for testing and understanding how to create a generator of your own. Args: pol: policy.Policy object Steps to implement this library 1) Import library in aclgen.py 2) Create a 3 letter entry in the table in the render_filters function for the demo library and set it to False 3) In the for header in policy.headers: use the previous entry to add an if statement to create a deep copy of the policy object 4) Create an if statement that will be used if that specific policy object is present will pass the policy file onto the demo Class. 5) The returned object can be then printed to a file using the do_output_filter function 6) Create a policy file with a target set to use demo """ _PLATFORM = 'demo' _SUFFIX = '.demo' _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',]) def _TranslatePolicy(self, pol, exp_info): current_date = datetime.date.today() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) self.demo_policies = [] for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions('demo') filter_name = filter_options[0] if len(filter_options) > 1: interface_specific = filter_options[1] else: interface_specific = 'none' filter_type = 'inet' term_names = set() new_terms = [] for term in terms: if term.name in term_names: raise DemoFilterError('Duplicate term name') term_names.add(term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue new_terms.append(Term(term, filter_type)) self.demo_policies.append((header, filter_name, filter_type, interface_specific, new_terms)) def __str__(self): target = [] for (header, filter_name, filter_type, interface_specific, terms) in self.demo_policies: target.append('Header {') target.append(' ' * 4 + 'Name: %s {' % filter_name) target.append(' ' * 8 + 'Type: %s ' % filter_type) for comment in header.comment: for line in comment.split('\n'): target.append(' ' * 8 + 'Comment: %s'%line) target.append(' ' * 8 + 'Family type: %s'%interface_specific) target.append(' ' * 4 +'}') for term in terms: target.append(str(term)) target.append(' ' * 4 +'}') target.append(' ') target.append('}') return '\n'.join(target) class Error(Exception): pass class DemoFilterError(Error): pass capirca-2.0.9/capirca/lib/gce.py000066400000000000000000000560771437377527500164470ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Google Compute Engine firewall generator. More information about GCE networking and firewalls: https://cloud.google.com/compute/docs/networking https://cloud.google.com/compute/docs/reference/latest/firewalls """ import copy import datetime import ipaddress import json import logging import re from typing import Dict, Any from capirca.lib import gcp from capirca.lib import nacaddr import six class Error(Exception): """Generic error class.""" class GceFirewallError(Error): """Raised with problems in formatting for GCE firewall.""" class ExceededAttributeCountError(Error): """Raised when the total attribute count of a policy is above the maximum.""" def IsDefaultDeny(term): """Returns true if a term is a default deny without IPs, ports, etc.""" skip_attrs = ['flattened', 'flattened_addr', 'flattened_saddr', 'flattened_daddr', 'action', 'comment', 'name', 'logging'] if 'deny' not in term.action: return False # This lc will look through all methods and attributes of the object. # It returns only the attributes that need to be looked at to determine if # this is a default deny. for i in [a for a in dir(term) if not a.startswith('__') and a.islower() and not callable(getattr(term, a))]: if i in skip_attrs: continue v = getattr(term, i) if isinstance(v, str) and v: return False if isinstance(v, list) and v: return False return True def GetNextPriority(priority): """Get the priority for the next rule.""" return priority class Term(gcp.Term): """Creates the term for the GCE firewall.""" ACTION_MAP = {'accept': 'allowed', 'deny': 'denied'} # Restrict the number of addresses per term to 256. # Similar restrictions apply to source and target tags, and ports. # Details: https://cloud.google.com/vpc/docs/quota#per_network_2 _TERM_ADDRESS_LIMIT = 256 _TERM_SOURCE_TAGS_LIMIT = 30 _TERM_TARGET_TAGS_LIMIT = 70 _TERM_PORTS_LIMIT = 256 _TERM_SERVICE_ACCOUNTS_LIMIT = 10 # Firewall rule name has to match specific RE: # The first character must be a lowercase letter, and all following characters # must be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. # Details: https://cloud.google.com/compute/docs/reference/latest/firewalls _TERM_NAME_RE = re.compile(r'^[a-z]([-a-z0-9]*[a-z0-9])?$') # Protocols allowed by name from: # https://cloud.google.com/vpc/docs/firewalls#protocols_and_ports _ALLOW_PROTO_NAME = frozenset( ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp', 'all' # Needed for default deny, do not use in policy file. ]) # Any protocol not in _ALLOW_PROTO_NAME must be passed by number. ALWAYS_PROTO_NUM = set(gcp.Term.PROTO_MAP.keys()) - _ALLOW_PROTO_NAME def __init__(self, term, inet_version='inet', policy_inet_version='inet'): super().__init__(term) self.term = term self.inet_version = inet_version # This is to handle mixed, where the policy_inet_version is mixed, # but the term inet version is either inet/inet6. # This is only useful for term name and priority. self.policy_inet_version = policy_inet_version self._validateDirection() if self.term.source_address_exclude and not self.term.source_address: raise GceFirewallError( 'GCE firewall does not support address exclusions without a source ' 'address list.') # The reason for the error below isn't because of a GCE restriction, but # because we don't want to use a bad default of GCE that allows talking # to anything when there's no source address, source tag, or source service # account. if (not self.term.source_address and not self.term.source_tag) and self.term.direction == 'INGRESS': raise GceFirewallError( 'GCE firewall needs either to specify source address or source tags.') if self.term.source_port: raise GceFirewallError( 'GCE firewall does not support source port restrictions.') if (self.term.source_address_exclude and self.term.source_address or self.term.destination_address_exclude and self.term.destination_address): self.term.FlattenAll() if not self.term.source_address and self.term.direction == 'INGRESS': raise GceFirewallError( 'GCE firewall rule no longer contains any source addresses after ' 'the prefixes in source_address_exclude were removed.') # Similarly to the comment above, the reason for this error is also # because we do not want to use the bad default of GCE that allows for # talking to anything when there is no IP address provided for this field. if not self.term.destination_address and self.term.direction == 'EGRESS': raise GceFirewallError( 'GCE firewall rule no longer contains any destination addresses ' 'after the prefixes in destination_address_exclude were removed.') def __str__(self): """Convert term to a string.""" json.dumps(self.ConvertToDict(), indent=2, separators=(six.ensure_str(','), six.ensure_str(': '))) def _validateDirection(self): if self.term.direction == 'INGRESS': if not self.term.source_address and not self.term.source_tag: raise GceFirewallError( 'Ingress rule missing required field oneof "sourceRanges" or ' '"sourceTags".') if self.term.destination_address: raise GceFirewallError('Ingress rules cannot include ' '"destinationRanges.') elif self.term.direction == 'EGRESS': if self.term.source_address: raise GceFirewallError( 'Egress rules cannot include "sourceRanges".') if not self.term.destination_address: raise GceFirewallError( 'Egress rule missing required field "destinationRanges".') if self.term.destination_tag: raise GceFirewallError( 'GCE Egress rule cannot have destination tag.') def ConvertToDict(self): """Convert term to a dictionary. This is used to get a dictionary describing this term which can be output easily as a JSON blob. Returns: A dictionary that contains all fields necessary to create or update a GCE firewall. Raises: GceFirewallError: The term name is too long. """ if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) term_dict = { 'description': ' '.join(self.term.comment), 'name': self.term.name, 'direction': self.term.direction } if self.term.network: term_dict['network'] = self.term.network term_dict['name'] = '%s-%s' % ( self.term.network.split('/')[-1], term_dict['name']) # Identify if this is inet6 processing for a term under a mixed policy. mixed_policy_inet6_term = False if self.policy_inet_version == 'mixed' and self.inet_version == 'inet6': mixed_policy_inet6_term = True # Update term name to have the IPv6 suffix for the inet6 rule. if mixed_policy_inet6_term: term_dict['name'] = gcp.GetIpv6TermName(term_dict['name']) # Checking counts of tags, and ports to see if they exceeded limits. if len(self.term.source_tag) > self._TERM_SOURCE_TAGS_LIMIT: raise GceFirewallError( 'GCE firewall rule exceeded number of source tags per rule: %s' % self.term.name) if len(self.term.destination_tag) > self._TERM_TARGET_TAGS_LIMIT: raise GceFirewallError( 'GCE firewall rule exceeded number of target tags per rule: %s' % self.term.name) if len( self.term.source_service_accounts ) > self._TERM_SERVICE_ACCOUNTS_LIMIT or len( self.term.target_service_accounts) > self._TERM_SERVICE_ACCOUNTS_LIMIT: raise GceFirewallError( 'GCE firewall rule exceeded number of service accounts per rule: %s' % self.term.name) if self.term.source_tag: if self.term.direction == 'INGRESS': term_dict['sourceTags'] = self.term.source_tag elif self.term.direction == 'EGRESS': term_dict['targetTags'] = self.term.source_tag if self.term.destination_tag and self.term.direction == 'INGRESS': term_dict['targetTags'] = self.term.destination_tag if self.term.source_service_accounts: if 'targetTags' in term_dict or 'sourceTags' in term_dict: raise GceFirewallError( 'sourceServiceAccounts cannot be used at the same time as targetTags or sourceTags: %s' % self.term.source_service_accounts) term_dict['sourceServiceAccounts'] = self.term.source_service_accounts if self.term.target_service_accounts: if 'targetTags' in term_dict or 'sourceTags' in term_dict: raise GceFirewallError( 'targetServiceAccounts cannot be used at the same time as targetTags or sourceTags: %s' % self.term.target_service_accounts) term_dict['targetServiceAccounts'] = self.term.target_service_accounts if self.term.priority: term_dict['priority'] = self.term.priority # Update term priority for the inet6 rule. if mixed_policy_inet6_term: term_dict['priority'] = GetNextPriority(term_dict['priority']) rules = [] # If 'mixed' ends up in indvidual term inet_version, something has gone # horribly wrong. The only valid values are inet/inet6. term_af = self.AF_MAP.get(self.inet_version) if self.inet_version == 'mixed': raise GceFirewallError( 'GCE firewall rule has incorrect inet_version for rule: %s' % self.term.name) # Exit early for inet6 processing of mixed rules that have only tags, # and no IP addresses, since this is handled in the inet processing. if mixed_policy_inet6_term: if not self.term.source_address and not self.term.destination_address: if 'targetTags' in term_dict or 'sourceTags' in term_dict: return [] saddrs = sorted(self.term.GetAddressOfVersion('source_address', term_af), key=ipaddress.get_mixed_type_key) saddrs = gcp.FilterIPv4InIPv6FormatAddrs(saddrs) daddrs = sorted( self.term.GetAddressOfVersion('destination_address', term_af), key=ipaddress.get_mixed_type_key) daddrs = gcp.FilterIPv4InIPv6FormatAddrs(daddrs) # If the address got filtered out and is empty due to address family, we # don't render the term. At this point of term processing, the direction # has already been validated, so we can just log and return empty rule. if self.term.source_address and not saddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.inet_version) return [] if self.term.destination_address and not daddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.inet_version) return [] filtered_protocols = [] if not self.term.protocol: # Any protocol is represented as "all" filtered_protocols = ['all'] logging.info( 'INFO: Term %s has no protocol specified,' 'which is interpreted as "all" protocols.', self.term.name) proto_dict = copy.deepcopy(term_dict) if self.term.logging: proto_dict['logConfig'] = {'enable': True} for proto in self.term.protocol: # ICMP filtering by inet_version # Since each term has inet_version, 'mixed' is correctly processed here. # Convert protocol to number for uniformity of comparison. # PROTO_MAP always returns protocol number. if proto in self._ALLOW_PROTO_NAME: proto_num = self.PROTO_MAP[proto] else: proto_num = proto if proto_num == self.PROTO_MAP['icmp'] and self.inet_version == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, ICMP ' 'protocol will not be rendered.', self.term.name) continue if proto_num == self.PROTO_MAP['icmpv6'] and self.inet_version == 'inet': logging.warning( 'WARNING: Term %s is being rendered for inet, ICMPv6 ' 'protocol will not be rendered.', self.term.name) continue if proto_num == self.PROTO_MAP['igmp'] and self.inet_version == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, IGMP ' 'protocol will not be rendered.', self.term.name) continue filtered_protocols.append(proto) # If there is no protocol left after ICMP/IGMP filtering, drop this term. if not filtered_protocols: return [] for proto in filtered_protocols: # If the protocol name is not supported, protocol number is used. # This is done by default in policy.py. if proto not in self._ALLOW_PROTO_NAME: logging.info( 'INFO: Term %s is being rendered using protocol number', self.term.name) dest = { 'IPProtocol': proto } if self.term.destination_port: ports = [] for start, end in self.term.destination_port: if start == end: ports.append(str(start)) else: ports.append('%d-%d' % (start, end)) if len(ports) > self._TERM_PORTS_LIMIT: raise GceFirewallError( 'GCE firewall rule exceeded number of ports per rule: %s' % self.term.name) dest['ports'] = ports action = self.ACTION_MAP[self.term.action[0]] dict_val = [] if action in proto_dict: dict_val = proto_dict[action] if not isinstance(dict_val, list): dict_val = [dict_val] dict_val.append(dest) proto_dict[action] = dict_val # There's a limit of 256 addresses each term can contain. # If we're above that limit, we're breaking it down in more terms. if saddrs: source_addr_chunks = [ saddrs[x:x+self._TERM_ADDRESS_LIMIT] for x in range( 0, len(saddrs), self._TERM_ADDRESS_LIMIT)] for i, chunk in enumerate(source_addr_chunks): rule = copy.deepcopy(proto_dict) if len(source_addr_chunks) > 1: rule['name'] = '%s-%d' % (rule['name'], i+1) rule['sourceRanges'] = [str(saddr) for saddr in chunk] rules.append(rule) elif daddrs: dest_addr_chunks = [ daddrs[x:x+self._TERM_ADDRESS_LIMIT] for x in range( 0, len(daddrs), self._TERM_ADDRESS_LIMIT)] for i, chunk in enumerate(dest_addr_chunks): rule = copy.deepcopy(proto_dict) if len(dest_addr_chunks) > 1: rule['name'] = '%s-%d' % (rule['name'], i+1) rule['destinationRanges'] = [str(daddr) for daddr in chunk] rules.append(rule) else: rules.append(proto_dict) # Sanity checking term name lengths. long_rules = [rule['name'] for rule in rules if len(rule['name']) > 63] if long_rules: raise GceFirewallError( 'GCE firewall name ended up being too long: %s' % long_rules) return rules class GCE(gcp.GCP): """A GCE firewall policy object.""" _PLATFORM = 'gce' SUFFIX = '.gce' _SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed')) _ANY_IP = { 'inet': nacaddr.IP('0.0.0.0/0'), 'inet6': nacaddr.IP('::/0'), } # Supported is 63 but we need to account for dynamic updates when the term # is rendered (which can add proto and a counter). _TERM_MAX_LENGTH = 53 _GOOD_DIRECTION = ['INGRESS', 'EGRESS'] _OPTIONAL_SUPPORTED_KEYWORDS = frozenset([ 'expiration', 'destination_tag', 'source_tag', 'source_service_accounts', 'target_service_accounts' ]) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, _ = super()._BuildTokens() # add extra things supported_tokens |= { 'destination_tag', 'expiration', 'owner', 'priority', 'source_tag', 'source_service_accounts', 'target_service_accounts' } # remove unsupported things supported_tokens -= {'icmp_type', 'verbatim'} # easier to make a new structure supported_sub_tokens = {'action': {'accept', 'deny'}} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.gce_policies = [] max_attribute_count = 0 total_attribute_count = 0 total_rule_count = 0 current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) network = '' direction = 'INGRESS' if filter_options: for i in self._GOOD_DIRECTION: if i in filter_options: direction = i filter_options.remove(i) # Get the address family if set. address_family = 'inet' for i in self._SUPPORTED_AF: if i in filter_options: address_family = i filter_options.remove(i) for opt in filter_options: try: max_attribute_count = int(opt) logging.info( 'Checking policy for max attribute count %d', max_attribute_count) filter_options.remove(opt) break except ValueError: continue if filter_options: network = filter_options[0] else: logging.warning('GCE filter does not specify a network.') term_names = set() if IsDefaultDeny(terms[-1]): terms[-1].protocol = ['all'] terms[-1].priority = 65534 if direction == 'EGRESS': if address_family != 'mixed': # Default deny also gets processed as part of terms processing. # The name and priority get updated there. terms[-1].destination_address = [self._ANY_IP[address_family]] else: terms[-1].destination_address = [self._ANY_IP['inet'], self._ANY_IP['inet6']] else: if address_family != 'mixed': terms[-1].source_address = [self._ANY_IP[address_family]] else: terms[-1].source_address = [ self._ANY_IP['inet'], self._ANY_IP['inet6'] ] for term in terms: if term.stateless_reply: logging.warning('WARNING: Term %s in policy %s is a stateless reply ' 'term and will not be rendered.', term.name, filter_name) continue term.network = network if not term.comment: term.comment = header.comment if direction == 'EGRESS': term.name += '-e' term.name = self.FixTermLength(term.name) if term.name in term_names: raise GceFirewallError('Duplicate term name %s' % term.name) term_names.add(term.name) term.direction = direction if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if term.option: raise GceFirewallError( 'GCE firewall does not support term options.') # Only generate the term if it's for the appropriate platform if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue # Handle mixed for each indvidual term as inet and inet6. # inet/inet6 are treated the same. term_address_families = [] if address_family == 'mixed': term_address_families = ['inet', 'inet6'] else: term_address_families = [address_family] for term_af in term_address_families: for rules in Term(term, term_af, address_family).ConvertToDict(): logging.debug('Attribute count of rule %s is: %d', term.name, GetAttributeCount(rules)) total_attribute_count += GetAttributeCount(rules) total_rule_count += 1 if max_attribute_count and total_attribute_count > max_attribute_count: # Stop processing rules as soon as the attribute count is over the # limit. raise ExceededAttributeCountError( 'Attribute count (%d) for %s exceeded the maximum (%d)' % (total_attribute_count, filter_name, max_attribute_count)) self.gce_policies.append(rules) logging.info('Total rule count of policy %s is: %d', filter_name, total_rule_count) logging.info('Total attribute count of policy %s is: %d', filter_name, total_attribute_count) def __str__(self): out = '%s\n\n' % (json.dumps(self.gce_policies, indent=2, separators=(six.ensure_str(','), six.ensure_str(': ')), sort_keys=True)) return out def GetAttributeCount(dict_term: Dict[str, Any]) -> int: """Calculate the attribute count of a term in its dictionary form. The attribute count of a rule is the sum of the number of ports, protocols, IP ranges, tags and target service account. Note: The goal of this function is not to determine if a term is valid, but to calculate its attribute count regardless of correctness. Args: dict_term: A dict object. Returns: int: The attribute count of the term. """ addresses = (len(dict_term.get('destinationRanges', [])) or len(dict_term.get('sourceRanges', []))) proto_ports = 0 for allowed in dict_term.get('allowed', []): proto_ports += len(allowed.get('ports', [])) + 1 # 1 for ipProtocol for denied in dict_term.get('denied', []): proto_ports += len(denied.get('ports', [])) + 1 # 1 for ipProtocol tags = 0 for _ in dict_term.get('sourceTags', []): tags += 1 for _ in dict_term.get('targetTags', []): tags += 1 service_accounts = 0 for _ in dict_term.get('sourceServiceAccounts', []): service_accounts += 1 for _ in dict_term.get('targetServiceAccounts', []): service_accounts += 1 return addresses + proto_ports + tags + service_accounts capirca-2.0.9/capirca/lib/gce_vpc_tf.py000066400000000000000000000607751437377527500200100ustar00rootroot00000000000000# Copyright 2022 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Terraform GCE config generator. More information about Terraform and its config syntax: https://developer.hashicorp.com/terraform/language/syntax/json """ import copy import datetime import ipaddress import json import logging import re from typing import Dict, Any from capirca.lib import gcp from capirca.lib import nacaddr import six class Error(Exception): """Generic error class.""" class TerraformFirewallError(Error): """Raised with problems in formatting for GCE firewall.""" class ExceededAttributeCountError(Error): """Raised when the total attribute count of a policy is above the maximum.""" def IsDefaultDeny(term): """Returns true if a term is a default deny without IPs, ports, etc.""" skip_attrs = [ 'flattened', 'flattened_addr', 'flattened_saddr', 'flattened_daddr', 'action', 'comment', 'name', 'logging' ] if 'deny' not in term.action: return False # This lc will look through all methods and attributes of the object. # It returns only the attributes that need to be looked at to determine if # this is a default deny. for i in [ a for a in dir(term) if not a.startswith('__') and a.islower() and not callable(getattr(term, a)) ]: if i in skip_attrs: continue v = getattr(term, i) if isinstance(v, str) and v: return False if isinstance(v, list) and v: return False return True def GetNextPriority(priority): """Get the priority for the next rule.""" return priority class Term(gcp.Term): """Creates the term for the Terraform GCE config.""" ACTION_MAP = {'accept': 'allow', 'deny': 'deny'} # Restrict the number of addresses per term to 256. # Similar restrictions apply to source and target tags, and ports. # Details: https://cloud.google.com/vpc/docs/quota#per_network_2 _TERM_ADDRESS_LIMIT = 256 _TERM_SOURCE_TAGS_LIMIT = 30 _TERM_TARGET_TAGS_LIMIT = 70 _TERM_SERVICE_ACCOUNTS_LIMIT = 10 _TERM_PORTS_LIMIT = 256 # Firewall rule name has to match specific RE: # The first character must be a lowercase letter, and all following characters # must be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. # Details: https://cloud.google.com/compute/docs/reference/latest/firewalls _TERM_NAME_RE = re.compile(r'^[a-z]([-a-z0-9]*[a-z0-9])?$') # Protocols allowed by name from: # https://cloud.google.com/vpc/docs/firewalls#protocols_and_ports _ALLOW_PROTO_NAME = frozenset([ 'tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp', 'all' # Needed for default deny, do not use in policy file. ]) # Any protocol not in _ALLOW_PROTO_NAME must be passed by number. ALWAYS_PROTO_NUM = set(gcp.Term.PROTO_MAP.keys()) - _ALLOW_PROTO_NAME #DENY_ALL_PRIORITY = 65534 def __init__(self, term, inet_version='inet', policy_inet_version='inet'): super().__init__(term) self.term = term self.inet_version = inet_version # This is to handle mixed, where the policy_inet_version is mixed, # but the term inet version is either inet/inet6. # This is only useful for term name and priority. self.policy_inet_version = policy_inet_version self._validateDirection() if self.term.source_address_exclude and not self.term.source_address: raise TerraformFirewallError( 'GCE firewall does not support address exclusions without a source ' 'address list.') # The reason for the error below isn't because of a GCE restriction, but # because we don't want to use a bad default of GCE that allows talking # to anything when there's no source address, source tag, or source service # account. if (not self.term.source_address and not self.term.source_tag) and self.term.direction == 'INGRESS': raise TerraformFirewallError( 'GCE firewall needs either to specify source address or source tags.') if self.term.source_port: raise TerraformFirewallError( 'GCE firewall does not support source port restrictions.') if (self.term.source_address_exclude and self.term.source_address or self.term.destination_address_exclude and self.term.destination_address): self.term.FlattenAll() if not self.term.source_address and self.term.direction == 'INGRESS': raise TerraformFirewallError( 'GCE firewall rule no longer contains any source addresses after ' 'the prefixes in source_address_exclude were removed.') # Similarly to the comment above, the reason for this error is also # because we do not want to use the bad default of GCE that allows for # talking to anything when there is no IP address provided for this field. if not self.term.destination_address and self.term.direction == 'EGRESS': raise TerraformFirewallError( 'GCE firewall rule no longer contains any destination addresses ' 'after the prefixes in destination_address_exclude were removed.') def __str__(self): """Convert term to a string.""" json.dumps( self.ConvertToDict(priority_index=1), indent=2, separators=(six.ensure_str(','), six.ensure_str(': '))) def _validateDirection(self): if self.term.direction == 'INGRESS': if not self.term.source_address and not self.term.source_tag: raise TerraformFirewallError( 'Ingress rule missing required field oneof "source_ranges" or ' '"source_tags".') if self.term.destination_address: raise TerraformFirewallError('Ingress rules cannot include ' '"destination_ranges.') elif self.term.direction == 'EGRESS': if self.term.source_address: raise TerraformFirewallError( 'Egress rules cannot include "source_ranges".') if not self.term.destination_address: raise TerraformFirewallError( 'Egress rule missing required field "destination_ranges".') if self.term.destination_tag: raise TerraformFirewallError( 'GCE Egress rule cannot have destination tag.') def ConvertToDict(self, priority_index): """Convert term to a dictionary. This is used to get a dictionary describing this term which can be output easily as a JSON blob. Args: priority_index: An integer priority value assigned to the term. Returns: A dictionary that contains all fields necessary to create or update a GCE firewall. Raises: TerraformFirewallError: The term name is too long. """ if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) term_dict = { 'description': ' '.join(self.term.comment), 'name': self.term.name, 'direction': self.term.direction, 'priority': priority_index } if self.term.network: term_dict['network'] = self.term.network term_dict['name'] = '%s-%s' % (self.term.network.split('/')[-1], term_dict['name']) # Identify if this is inet6 processing for a term under a mixed policy. mixed_policy_inet6_term = False if self.policy_inet_version == 'mixed' and self.inet_version == 'inet6': mixed_policy_inet6_term = True # Update term name to have the IPv6 suffix for the inet6 rule. if mixed_policy_inet6_term: term_dict['name'] = gcp.GetIpv6TermName(term_dict['name']) # Checking counts of tags, and ports to see if they exceeded limits. if len(self.term.source_tag) > self._TERM_SOURCE_TAGS_LIMIT: raise TerraformFirewallError( 'GCE firewall rule exceeded number of source tags per rule: %s' % self.term.name) if len(self.term.destination_tag) > self._TERM_TARGET_TAGS_LIMIT: raise TerraformFirewallError( 'GCE firewall rule exceeded number of target tags per rule: %s' % self.term.name) if len( self.term.source_service_accounts ) > self._TERM_SERVICE_ACCOUNTS_LIMIT or len( self.term.target_service_accounts) > self._TERM_SERVICE_ACCOUNTS_LIMIT: raise TerraformFirewallError( 'GCE firewall rule exceeded number of service accounts per rule: %s' % self.term.name) if self.term.source_tag: if self.term.direction == 'INGRESS': term_dict['source_tags'] = self.term.source_tag elif self.term.direction == 'EGRESS': term_dict['target_tags'] = self.term.source_tag if self.term.destination_tag and self.term.direction == 'INGRESS': term_dict['target_tags'] = self.term.destination_tag if self.term.source_service_accounts: if 'target_tags' in term_dict or 'source_tags' in term_dict: raise TerraformFirewallError( 'source_service_accounts cannot be used at the same time as target_tags or source_tags: %s' % self.term.source_service_accounts) term_dict['source_service_accounts'] = self.term.source_service_accounts if self.term.target_service_accounts: if 'target_tags' in term_dict or 'source_tags' in term_dict: raise TerraformFirewallError( 'target_service_accounts cannot be used at the same time as target_tags or source_tags: %s' % self.term.target_service_accounts) term_dict['target_service_accounts'] = self.term.target_service_accounts # If priority is explicitly set, it'll be used. Otherwise the sequence will # be incremented. if self.term.priority: term_dict['priority'] = self.term.priority rules = [] # If 'mixed' ends up in indvidual term inet_version, something has gone # horribly wrong. The only valid values are inet/inet6. term_af = self.AF_MAP.get(self.inet_version) if self.inet_version == 'mixed': raise TerraformFirewallError( 'GCE firewall rule has incorrect inet_version for rule: %s' % self.term.name) # Exit early for inet6 processing of mixed rules that have only tags, # and no IP addresses, since this is handled in the inet processing. if mixed_policy_inet6_term: if not self.term.source_address and not self.term.destination_address: if 'target_tags' in term_dict or 'source_tags' in term_dict: return [] saddrs = sorted( self.term.GetAddressOfVersion('source_address', term_af), key=ipaddress.get_mixed_type_key) saddrs = gcp.FilterIPv4InIPv6FormatAddrs(saddrs) daddrs = sorted( self.term.GetAddressOfVersion('destination_address', term_af), key=ipaddress.get_mixed_type_key) daddrs = gcp.FilterIPv4InIPv6FormatAddrs(daddrs) # If the address got filtered out and is empty due to address family, we # don't render the term. At this point of term processing, the direction # has already been validated, so we can just log and return empty rule. if self.term.source_address and not saddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.inet_version) return [] if self.term.destination_address and not daddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.inet_version) return [] filtered_protocols = [] if not self.term.protocol: # Any protocol is represented as "all" filtered_protocols = ['all'] logging.info( 'INFO: Term %s has no protocol specified,' 'which is interpreted as "all" protocols.', self.term.name) proto_dict = copy.deepcopy(term_dict) if self.term.logging: proto_dict['log_config'] = {'metadata': 'INCLUDE_ALL_METADATA'} for proto in self.term.protocol: # ICMP filtering by inet_version # Since each term has inet_version, 'mixed' is correctly processed here. # Convert protocol to number for uniformity of comparison. # PROTO_MAP always returns protocol number. if proto in self._ALLOW_PROTO_NAME: proto_num = self.PROTO_MAP[proto] else: proto_num = proto if proto_num == self.PROTO_MAP['icmp'] and self.inet_version == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, ICMP ' 'protocol will not be rendered.', self.term.name) continue if proto_num == self.PROTO_MAP['icmpv6'] and self.inet_version == 'inet': logging.warning( 'WARNING: Term %s is being rendered for inet, ICMPv6 ' 'protocol will not be rendered.', self.term.name) continue if proto_num == self.PROTO_MAP['igmp'] and self.inet_version == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, IGMP ' 'protocol will not be rendered.', self.term.name) continue filtered_protocols.append(proto) # If there is no protocol left after ICMP/IGMP filtering, drop this term. if not filtered_protocols: return [] for proto in filtered_protocols: # If the protocol name is not supported, protocol number is used. # This is done by default in policy.py. if proto not in self._ALLOW_PROTO_NAME: logging.info('INFO: Term %s is being rendered using protocol number', self.term.name) dest = {'protocol': proto} if self.term.destination_port: ports = [] for start, end in self.term.destination_port: if start == end: ports.append(str(start)) else: ports.append('%d-%d' % (start, end)) if len(ports) > self._TERM_PORTS_LIMIT: raise TerraformFirewallError( 'GCE firewall rule exceeded number of ports per rule: %s' % self.term.name) dest['ports'] = ports action = self.ACTION_MAP[self.term.action[0]] dict_val = [] if action in proto_dict: dict_val = proto_dict[action] if not isinstance(dict_val, list): dict_val = [dict_val] dict_val.append(dest) proto_dict[action] = dict_val # There's a limit of 256 addresses each term can contain. # If we're above that limit, we're breaking it down in more terms. if saddrs: source_addr_chunks = [ saddrs[x:x + self._TERM_ADDRESS_LIMIT] for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT) ] for i, chunk in enumerate(source_addr_chunks): rule = copy.deepcopy(proto_dict) if len(source_addr_chunks) > 1: rule['name'] = '%s-%d' % (rule['name'], i + 1) rule['source_ranges'] = [str(saddr) for saddr in chunk] # if rule[ # 'priority'] != Term.DENY_ALL_PRIORITY: # If not the deny-all rule. rule['priority'] = priority_index priority_index += 1 rules.append(rule) elif daddrs: dest_addr_chunks = [ daddrs[x:x + self._TERM_ADDRESS_LIMIT] for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT) ] for i, chunk in enumerate(dest_addr_chunks): rule = copy.deepcopy(proto_dict) if len(dest_addr_chunks) > 1: rule['name'] = '%s-%d' % (rule['name'], i + 1) rule['destination_ranges'] = [str(daddr) for daddr in chunk] # if rule[ # 'priority'] != Term.DENY_ALL_PRIORITY: # If not the deny-all rule. rule['priority'] = priority_index priority_index += 1 rules.append(rule) else: rules.append(proto_dict) # Sanity checking term name lengths. long_rules = [rule['name'] for rule in rules if len(rule['name']) > 63] if long_rules: raise TerraformFirewallError( 'GCE firewall name ended up being too long: %s' % long_rules) # Attach the name of the rule to the inner array. new_rule = {} final_rules = [] for rule in rules: rule_name = rule['name'] new_rule[rule_name] = rule final_rules.append(new_rule) return final_rules class TerraformGCE(gcp.GCP): """A Terraform GCE config.""" _PLATFORM = 'gce_vpc_tf' SUFFIX = '.tf.json' # NOTE: While this code will generate inet6/mixed rules, terraform doesn't # currently support this (but hopefully will soon, which is why it is coded). _SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed')) _ANY_IP = { 'inet': nacaddr.IP('0.0.0.0/0'), 'inet6': nacaddr.IP('::/0'), } # Supported is 63 but we need to account for dynamic updates when the term # is rendered (which can add proto and a counter). _TERM_MAX_LENGTH = 53 _GOOD_DIRECTION = ['INGRESS', 'EGRESS'] _OPTIONAL_SUPPORTED_KEYWORDS = frozenset([ 'expiration', 'destination_tag', 'source_tag', 'source_service_accounts', 'target_service_accounts' ]) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, _ = super()._BuildTokens() # add extra things supported_tokens |= { 'destination_tag', 'expiration', 'owner', 'priority', 'source_tag', 'source_service_accounts', 'target_service_accounts' } # remove unsupported things supported_tokens -= {'icmp_type', 'verbatim'} # easier to make a new structure supported_sub_tokens = {'action': {'accept', 'deny'}} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): # Add "google_compute_firewall" label to the outer json dict. self.terraform_resources = {'google_compute_firewall': []} max_attribute_count = 0 total_attribute_count = 0 total_rule_count = 0 priority_counter = 1 current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) network = '' direction = 'INGRESS' if filter_options: for i in self._GOOD_DIRECTION: if i in filter_options: direction = i filter_options.remove(i) # Get the address family if set. address_family = 'inet' for i in self._SUPPORTED_AF: if i in ['inet6', 'mixed']: logging.error('Terraform does not currently support ipv6!') if i in filter_options: address_family = i filter_options.remove(i) for opt in filter_options: try: max_attribute_count = int(opt) logging.info('Checking policy for max attribute count %d', max_attribute_count) filter_options.remove(opt) break except ValueError: continue if filter_options: network = filter_options[0] else: raise TerraformFirewallError('GCE filter does not specify a network.') term_names = set() if IsDefaultDeny(terms[-1]): terms[-1].protocol = ['all'] #terms[-1].priority = Term.DENY_ALL_PRIORITY if direction == 'EGRESS': if address_family != 'mixed': # Default deny also gets processed as part of terms processing. # The name and priority get updated there. terms[-1].destination_address = [self._ANY_IP[address_family]] else: terms[-1].destination_address = [ self._ANY_IP['inet'], self._ANY_IP['inet6'] ] else: if address_family != 'mixed': terms[-1].source_address = [self._ANY_IP[address_family]] else: terms[-1].source_address = [ self._ANY_IP['inet'], self._ANY_IP['inet6'] ] for term in terms: if term.stateless_reply: logging.warning( 'WARNING: Term %s in policy %s is a stateless reply ' 'term and will not be rendered.', term.name, filter_name) continue term.network = network if not term.comment: term.comment = header.comment if direction == 'EGRESS': term.name += '-e' term.name = self.FixTermLength(term.name) if term.name in term_names: raise TerraformFirewallError('Duplicate term name %s' % term.name) term_names.add(term.name) term.direction = direction if term.expiration: if term.expiration <= exp_info_date: logging.info( 'INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if term.option: raise TerraformFirewallError( 'GCE firewall does not support term options.') # Only generate the term if it's for the appropriate platform if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue # Handle mixed for each indvidual term as inet and inet6. # inet/inet6 are treated the same. term_address_families = [] if address_family == 'mixed': term_address_families = ['inet', 'inet6'] else: term_address_families = [address_family] for term_af in term_address_families: for rules in Term( term, term_af, address_family).ConvertToDict(priority_index=priority_counter): for rule_key, rule_dict in rules.items(): logging.debug('Attribute count of rule %s is: %d', rule_key, GetAttributeCount(rule_dict)) total_attribute_count += GetAttributeCount(rule_dict) total_rule_count += 1 if max_attribute_count and total_attribute_count > max_attribute_count: # Stop processing rules as soon as the attribute count is over the # limit. raise ExceededAttributeCountError( 'Attribute count (%d) for %s exceeded the maximum (%d)' % (total_attribute_count, filter_name, max_attribute_count)) self.terraform_resources['google_compute_firewall'].append(rules) priority_counter += len(rules) logging.info('Total rule count of policy %s is: %d', filter_name, total_rule_count) logging.info('Total attribute count of policy %s is: %d', filter_name, total_attribute_count) self.resource_wrapper = {'resource': self.terraform_resources} def __str__(self): out = '%s\n\n' % ( json.dumps( self.resource_wrapper, indent=2, separators=(six.ensure_str(','), six.ensure_str(': ')), sort_keys=True)) return out def GetAttributeCount(dict_term: Dict[str, Any]) -> int: """Calculate the attribute count of a term in its dictionary form. The attribute count of a rule is the sum of the number of ports, protocols, IP ranges, tags and target service account. Note: The goal of this function is not to determine if a term is valid, but to calculate its attribute count regardless of correctness. Args: dict_term: A dict object. Returns: int: The attribute count of the term. """ addresses = ( len(dict_term.get('destination_ranges', [])) or len(dict_term.get('source_ranges', []))) proto_ports = 0 for allowed in dict_term.get('allow', []): proto_ports += len(allowed.get('ports', [])) + 1 # 1 for ipProtocol for denied in dict_term.get('deny', []): proto_ports += len(denied.get('ports', [])) + 1 # 1 for ipProtocol tags = 0 for _ in dict_term.get('source_tags', []): tags += 1 for _ in dict_term.get('target_tags', []): tags += 1 service_accounts = 0 for _ in dict_term.get('source_service_accounts', []): service_accounts += 1 for _ in dict_term.get('target_service_accounts', []): service_accounts += 1 return addresses + proto_ports + tags + service_accounts capirca-2.0.9/capirca/lib/gcp.py000066400000000000000000000116111437377527500164430ustar00rootroot00000000000000"""Generic Google Cloud Platform multi-product generator. Base class for GCP firewalling products. """ import ipaddress import json import re from capirca.lib import aclgenerator import six class Error(Exception): """Generic error class.""" class TermError(Error): """Raised when a term is not valid.""" class HeaderError(Error): """Raised when a header is not valid.""" class UnsupportedFilterTypeError(Error): """Raised when an unsupported filter type is specified.""" class Term(aclgenerator.Term): """A Term object.""" # Protocols allowed by name from: # https://cloud.google.com/vpc/docs/firewalls#protocols_and_ports # 'all' is needed for the dedault deny, it should not be used in a pol file. _ALLOW_PROTO_NAME = frozenset( ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp', 'all']) def _GetPorts(self): """Return a port or port range in string format.""" ports = [] for start, end in self.term.destination_port: if start == end: ports.append(str(start)) else: ports.append('%d-%d' % (start, end)) return ports def _GetLoggingSetting(self): """Return true if a term indicates that logging is desired.""" # Supported values in GCP are '', 'true', and 'True'. settings = [str(x) for x in self.term.logging] if any(value in settings for value in ['true', 'True']): return True return False class GCP(aclgenerator.ACLGenerator): """A GCP object.""" policies = [] _GOOD_DIRECTION = ['INGRESS', 'EGRESS'] def __str__(self): """Return the JSON blob for a GCP object.""" out = '%s\n\n' % ( json.dumps( self.policies, indent=2, separators=(six.ensure_str(','), six.ensure_str(': ')), sort_keys=True)) return out def IsDefaultDeny(term): """Return true if a term is a default deny without IPs, ports, etc.""" skip_attrs = [ 'flattened', 'flattened_addr', 'flattened_saddr', 'flattened_daddr', 'action', 'comment', 'name', 'logging' ] if 'deny' not in term.action: return False # This lc will look through all methods and attributes of the object. # It returns only the attributes that need to be looked at to determine if # this is a default deny. for i in [ a for a in dir(term) if not a.startswith('__') and a.islower() and not callable(getattr(term, a)) ]: if i in skip_attrs: continue v = getattr(term, i) if isinstance(v, str) and v: return False if isinstance(v, list) and v: return False return True def IsProjectIDValid(project): """Return true if a project ID is valid. https://cloud.google.com/resource-manager/reference/rest/v1/projects "It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited." Args: project: A string. Returns: bool: True if a project ID matches the pattern and length requirements. """ if len(project) < 6 or len(project) > 30: return False return bool(re.match('^[a-z][a-z0-9\\-]*[a-z0-9]$', project)) def IsVPCNameValid(vpc): """Return true if a VPC name is valid. https://cloud.google.com/compute/docs/reference/rest/v1/networks "The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit." Args: vpc: A string. Returns: bool: True if a VPC name matches the pattern and length requirements. """ if len(vpc) < 1 or len(vpc) > 63: return False return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc)) def TruncateString(raw_string, max_length): """Returns truncated raw_string based on max length. Args: raw_string: String to be truncated. max_length: max length of string. Returns: string: The truncated string. """ if len(raw_string) > max_length: return raw_string[:max_length] return raw_string def GetIpv6TermName(term_name): """Returns the equivalent term name for IPv6 terms. Args: term_name: A string. Returns: string: The IPv6 requivalent term name. """ return '%s-%s' % (term_name, 'v6') def FilterIPv4InIPv6FormatAddrs(addrs): """Returns addresses of the appropriate Address Family. Args: addrs: list of IP addresses. Returns: list of filtered IPs with no IPv4 in IPv6 format addresses. """ filtered = [] for addr in addrs: ipaddr = ipaddress.ip_interface(addr).ip if isinstance(ipaddr, ipaddress.IPv6Address): ipv6 = ipaddress.IPv6Address(ipaddr) # Check if it's an IPv4-mapped or 6to4 address. if ipv6.ipv4_mapped is not None or ipv6.sixtofour is not None: continue # Check if it's an IPv4-compatible address. if ipv6.packed.hex( )[:24] == '000000000000000000000000' and not ipv6.is_unspecified: continue filtered += [addr] return filtered capirca-2.0.9/capirca/lib/gcp_hf.py000066400000000000000000000504031437377527500171220ustar00rootroot00000000000000"""Google Cloud Hierarchical Firewall Generator. Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce. """ import copy import re from typing import Dict, Any from absl import logging from capirca.lib import gcp from capirca.lib import nacaddr class ExceededCostError(gcp.Error): """Raised when the total cost of a policy is above the maximum.""" class DifferentPolicyNameError(gcp.Error): """Raised when headers in the same policy have a different policy name.""" class ApiVersionSyntaxMap: """Defines the syntax changes between different API versions. http://cloud/compute/docs/reference/rest/v1/firewallPolicies/addRule http://cloud/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule """ SYNTAX_MAP = { 'beta': { 'display_name': 'displayName', 'dest_ip_range': 'destIpRanges', 'src_ip_range': 'srcIpRanges', 'layer_4_config': 'layer4Configs' }, 'ga': { 'display_name': 'shortName', 'dest_ip_range': 'destIpRanges', 'src_ip_range': 'srcIpRanges', 'layer_4_config': 'layer4Configs' } } class Term(gcp.Term): """Used to create an individual term.""" ACTION_MAP = {'accept': 'allow', 'next': 'goto_next'} _MAX_TERM_COMMENT_LENGTH = 64 _TARGET_RESOURCE_FORMAT = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}' _TERM_ADDRESS_LIMIT = 256 _TERM_TARGET_RESOURCES_LIMIT = 256 _TERM_DESTINATION_PORTS_LIMIT = 256 def __init__(self, term, address_family='inet', policy_inet_version='inet', api_version='beta'): super().__init__(term) self.address_family = address_family self.term = term self.skip = False self._ValidateTerm() self.api_version = api_version # This is to handle mixed, where the policy_inet_version is mixed, # but the term inet version is either inet/inet6. # This is only useful for term name and priority. self.policy_inet_version = policy_inet_version def _ValidateTerm(self): if self.term.destination_tag or self.term.source_tag: raise gcp.TermError('Hierarchical Firewall does not support tags') if len(self.term.target_resources) > self._TERM_TARGET_RESOURCES_LIMIT: raise gcp.TermError( 'Term: %s target_resources field contains %s resources. It should not contain more than "%s".' % (self.term.name, str(len( self.term.target_resources)), self._TERM_TARGET_RESOURCES_LIMIT)) for proj, vpc in self.term.target_resources: if not gcp.IsProjectIDValid(proj): raise gcp.TermError( 'Project ID "%s" must be 6 to 30 lowercase letters, digits, or hyphens.' ' It must start with a letter. Trailing hyphens are prohibited.' % proj) if not gcp.IsVPCNameValid(vpc): raise gcp.TermError('VPC name "%s" must start with a lowercase letter ' 'followed by up to 62 lowercase letters, numbers, ' 'or hyphens, and cannot end with a hyphen.' % vpc) if self.term.source_port: raise gcp.TermError('Hierarchical firewall does not support source port ' 'restrictions.') if self.term.option: raise gcp.TermError('Hierarchical firewall does not support the ' 'TCP_ESTABLISHED option.') if len(self.term.destination_port) > self._TERM_DESTINATION_PORTS_LIMIT: raise gcp.TermError( 'Term: %s destination_port field contains %s ports. It should not contain more than "%s".' % (self.term.name, str(len( self.term.destination_port)), self._TERM_DESTINATION_PORTS_LIMIT)) # Since policy_inet_version is used to handle 'mixed'. # We should error out if the individual term's inet version (address_family) # is anything other than inet/inet6, since this should never happen # naturally. Something has gone horribly wrong if you encounter this error. if self.address_family == 'mixed': raise gcp.TermError( 'Hierarchical firewall rule has incorrect inet_version for rule: %s' % self.term.name) def ConvertToDict(self, priority_index): """Converts term to dict representation of SecurityPolicy.Rule JSON format. Takes all of the attributes associated with a term (match, action, etc) and converts them into a dictionary which most closely represents the SecurityPolicy.Rule JSON format. Args: priority_index: An integer priority value assigned to the term. Returns: A dict term. """ if self.skip: return {} rules = [] # Identify if this is inet6 processing for a term under a mixed policy. mixed_policy_inet6_term = False if self.policy_inet_version == 'mixed' and self.address_family == 'inet6': mixed_policy_inet6_term = True term_dict = { 'action': self.ACTION_MAP.get(self.term.action[0], self.term.action[0]), 'direction': self.term.direction, 'priority': priority_index } # Get the correct syntax for API versions. src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[ self.api_version]['src_ip_range'] dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[ self.api_version]['dest_ip_range'] layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[ self.api_version]['layer_4_config'] target_resources = [] for proj, vpc in self.term.target_resources: target_resources.append(self._TARGET_RESOURCE_FORMAT.format(proj, vpc)) if target_resources: # Only set when non-empty. term_dict['targetResources'] = target_resources term_dict['enableLogging'] = self._GetLoggingSetting() # This combo provides ability to identify the rule. term_name = self.term.name if mixed_policy_inet6_term: term_name = gcp.GetIpv6TermName(term_name) raw_description = term_name + ': ' + ' '.join(self.term.comment) term_dict['description'] = gcp.TruncateString(raw_description, self._MAX_TERM_COMMENT_LENGTH) filtered_protocols = [] for proto in self.term.protocol: # ICMP filtering by inet_version # Since each term has inet_version, 'mixed' is correctly processed here. if proto == 'icmp' and self.address_family == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, ICMP ' 'protocol will not be rendered.', self.term.name) continue if proto == 'icmpv6' and self.address_family == 'inet': logging.warning( 'WARNING: Term %s is being rendered for inet, ICMPv6 ' 'protocol will not be rendered.', self.term.name) continue if proto == 'igmp' and self.address_family == 'inet6': logging.warning( 'WARNING: Term %s is being rendered for inet6, IGMP ' 'protocol will not be rendered.', self.term.name) continue filtered_protocols.append(proto) # If there is no protocol left after ICMP/IGMP filtering, drop this term. # But only do this for terms that originally had protocols. # Otherwise you end up dropping the default-deny. if self.term.protocol and not filtered_protocols: return {} protocols_and_ports = [] if not self.term.protocol: # Empty protocol list means any protocol, but any protocol in HF is # represented as "all" protocols_and_ports = [{'ipProtocol': 'all'}] else: for proto in filtered_protocols: # If the protocol name is not supported, use the protocol number. if proto not in self._ALLOW_PROTO_NAME: proto = str(self.PROTO_MAP[proto]) logging.info('INFO: Term %s is being rendered using protocol number', self.term.name) proto_ports = {'ipProtocol': proto} if self.term.destination_port: ports = self._GetPorts() if ports: # Only set when non-empty. proto_ports['ports'] = ports protocols_and_ports.append(proto_ports) if self.api_version == 'ga': term_dict['match'] = {layer_4_config: protocols_and_ports} else: term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}} # match needs a field called versionedExpr with value FIREWALL # See documentation: # https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule term_dict['match']['versionedExpr'] = 'FIREWALL' ip_version = self.AF_MAP[self.address_family] if ip_version == 4: any_ip = [nacaddr.IP('0.0.0.0/0')] else: any_ip = [nacaddr.IPv6('::/0')] if self.term.direction == 'EGRESS': daddrs = self.term.GetAddressOfVersion('destination_address', ip_version) daddrs = gcp.FilterIPv4InIPv6FormatAddrs(daddrs) # If the address got filtered out and is empty due to address family, we # don't render the term. At this point of term processing, the direction # has already been validated, so we can just log and return empty rule. if self.term.destination_address and not daddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.address_family) return [] # This should only happen if there were no addresses set originally. if not daddrs: daddrs = any_ip destination_address_chunks = [ daddrs[x:x + self._TERM_ADDRESS_LIMIT] for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT) ] for daddr_chunk in destination_address_chunks: rule = copy.deepcopy(term_dict) if self.api_version == 'ga': rule['match'][dest_ip_range] = [ daddr.with_prefixlen for daddr in daddr_chunk ] else: rule['match']['config'][dest_ip_range] = [ daddr.with_prefixlen for daddr in daddr_chunk ] rule['priority'] = priority_index rules.append(rule) priority_index += 1 else: saddrs = gcp.FilterIPv4InIPv6FormatAddrs( self.term.GetAddressOfVersion('source_address', ip_version)) # If the address got filtered out and is empty due to address family, we # don't render the term. At this point of term processing, the direction # has already been validated, so we can just log and return empty rule. if self.term.source_address and not saddrs: logging.warning( 'WARNING: Term %s is not being rendered for %s, ' 'because there are no addresses of that family.', self.term.name, self.address_family) return [] # This should only happen if there were no addresses set originally. if not saddrs: saddrs = any_ip source_address_chunks = [ saddrs[x:x + self._TERM_ADDRESS_LIMIT] for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT) ] for saddr_chunk in source_address_chunks: rule = copy.deepcopy(term_dict) if self.api_version == 'ga': rule['match'][src_ip_range] = [ saddr.with_prefixlen for saddr in saddr_chunk ] else: rule['match']['config'][src_ip_range] = [ saddr.with_prefixlen for saddr in saddr_chunk ] rule['priority'] = priority_index rules.append(rule) priority_index += 1 return rules def __str__(self): return '' class HierarchicalFirewall(gcp.GCP): """A GCP Hierarchical Firewall policy.""" SUFFIX = '.gcphf' _ANY_IP = { 'inet': nacaddr.IP('0.0.0.0/0'), 'inet6': nacaddr.IP('::/0'), } _PLATFORM = 'gcp_hf' _SUPPORTED_AF = frozenset(['inet', 'inet6', 'mixed']) # Beta is the default API version. GA supports IPv6 (inet6/mixed). _SUPPORTED_API_VERSION = frozenset(['beta', 'ga']) _DEFAULT_MAXIMUM_COST = 100 def _BuildTokens(self): """Build supported tokens for platform. Returns: Tuple containing both supported tokens and sub tokens. """ supported_tokens, _ = super()._BuildTokens() supported_tokens |= { 'destination_tag', 'expiration', 'source_tag', 'translated', 'target_resources', 'logging' } supported_tokens -= { 'destination_address_exclude', 'expiration', 'icmp_type', 'source_address_exclude', 'verbatim' } supported_sub_tokens = {'action': {'accept', 'deny', 'next'}} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): """Translates a Capirca policy into a HF-specific data structure. Takes in a POL file, parses each term and populates the policy dict. Each term in this list is a dictionary formatted according to HF's rule API specification. Additionally, checks for its quota. Args: pol: A Policy() object representing a given POL file. exp_info: An int that specifies number of weeks until policy expiry. Raises: ExceededCostError: Raised when the cost of a policy exceeds the default maximum cost. HeaderError: Raised when the header cannot be parsed or a header option is invalid. DifferentPolicyNameError: Raised when a header policy name differs from other in the same policy. """ self.policies = [] policy = { 'rules': [], 'type': 'FIREWALL' } is_policy_modified = False counter = 1 total_cost = 0 policies_max_cost = self._DEFAULT_MAXIMUM_COST previous_max_cost = -1 for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) is_policy_modified = True # Get term direction if set. direction = 'INGRESS' for i in self._GOOD_DIRECTION: if i in filter_options: direction = i filter_options.remove(i) # Get the address family if set. address_family = 'inet' for i in self._SUPPORTED_AF: if i in filter_options: address_family = i filter_options.remove(i) # Get the compute API version if set. api_version = 'beta' for i in self._SUPPORTED_API_VERSION: if i in filter_options: api_version = i filter_options.remove(i) break # Find the default maximum cost of a policy, an integer, if specified. max_cost = self._DEFAULT_MAXIMUM_COST for opt in filter_options: try: max_cost = int(opt) filter_options.remove(opt) break except ValueError: continue if max_cost > 65536: raise gcp.HeaderError( 'Default maximum cost cannot be higher than 65536') if previous_max_cost != -1 and previous_max_cost != max_cost: raise gcp.HeaderError( 'Maximum costs of each policy specified must be equal. ' 'Unequal costs found: %d and %d' % (previous_max_cost, max_cost)) policies_max_cost = max_cost previous_max_cost = max_cost display_name = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['display_name'] # Get policy name and validate it to meet displayName requirements. policy_name = header.FilterName(self._PLATFORM) if not policy_name: raise gcp.HeaderError( 'Policy name was not specified in header') filter_options.remove(policy_name) if len(policy_name) > 63: raise gcp.HeaderError( 'Policy name "%s" is too long; the maximum number of characters ' 'allowed is 63' % (policy_name)) if not bool(re.match('^[a-z]([-a-z0-9]*[a-z0-9])?$', policy_name)): raise gcp.HeaderError( 'Invalid string for displayName, "%s"; the first character must be ' 'a lowercase letter, and all following characters must be a dash, ' 'lowercase letter, or digit, except the last character, which ' 'cannot be a dash.' % (policy_name)) if display_name in policy and policy[display_name] != policy_name: raise DifferentPolicyNameError( 'Policy names that are from the same policy are expected to be ' 'equal, but %s is different to %s' % (policy[display_name], policy_name)) policy[display_name] = policy_name # If there are remaining options, they are unknown/unsupported options. if filter_options: raise gcp.HeaderError( 'Unsupported or unknown filter options %s in policy %s ' % (str(filter_options), policy_name)) # Handle mixed for each indvidual term as inet and inet6. # inet/inet6 are treated the same. term_address_families = [] if address_family == 'mixed': term_address_families = ['inet', 'inet6'] else: term_address_families = [address_family] for term in terms: if term.stateless_reply: continue if gcp.IsDefaultDeny(term): if direction == 'EGRESS': if address_family != 'mixed': # Default deny also gets processed as part of terms processing. # The name and priority get updated there. term.destination_address = [self._ANY_IP[address_family]] else: term.destination_address = [ self._ANY_IP['inet'], self._ANY_IP['inet6'] ] else: if address_family != 'mixed': term.source_address = [self._ANY_IP[address_family]] else: term.source_address = [ self._ANY_IP['inet'], self._ANY_IP['inet6'] ] term.name = self.FixTermLength(term.name) term.direction = direction # Only generate the term if it's for the appropriate platform if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue for term_af in term_address_families: rules = Term( term, address_family=term_af, policy_inet_version=address_family, api_version=api_version).ConvertToDict(priority_index=counter) if not rules: continue for dict_term in rules: total_cost += GetRuleTupleCount(dict_term, api_version) policy['rules'].append(dict_term) counter += len(rules) # We want to check the total policy cost, not just per policy. if total_cost > policies_max_cost: raise ExceededCostError( 'Policy cost (%d) for %s reached the ' 'maximum (%d)' % (total_cost, policy[display_name], policies_max_cost)) self.policies.append(policy) # Do not render an empty rules if no policies have been evaluated. if not is_policy_modified: self.policies = [] if total_cost > 0: logging.info('Policy %s quota cost: %d', policy[display_name], total_cost) def GetRuleTupleCount(dict_term: Dict[str, Any], api_version): """Calculate the tuple count of a rule in its dictionary form. Quota is charged based on how complex the rules are rather than simply limiting the number of rules. The cost of a rule is the number of distinct protocol:port combinations plus the number of IP addresses plus the number of targets. Note: The goal of this function is not to determine if a rule is valid, but to calculate its tuple count regardless of correctness. Args: dict_term: A dict object. api_version: A string indicating the api version. Returns: int: The tuple count of the rule. """ layer4_count = 0 layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config'] dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range'] src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range'] targets_count = len(dict_term.get('targetResources', [])) if api_version == 'ga': config = dict_term.get('match', {}) else: config = dict_term.get('match', {}).get('config', {}) addresses_count = len( config.get(dest_ip_range, []) + config.get(src_ip_range, [])) for l4config in config.get(layer_4_config, []): for _ in l4config.get('ports', []): layer4_count += 1 if l4config.get('ipProtocol'): layer4_count += +1 return addresses_count + layer4_count + targets_count capirca-2.0.9/capirca/lib/ipset.py000066400000000000000000000202151437377527500170160ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Ipset iptables generator. This is a subclass of Iptables generator. ipset is a system inside the Linux kernel, which can very efficiently store and match IPv4 and IPv6 addresses. This can be used to dramatically increase performace of iptables firewall. """ import string from capirca.lib import iptables from capirca.lib import nacaddr class Error(Exception): """Base error class.""" class Term(iptables.Term): """Single Ipset term representation.""" _PLATFORM = 'ipset' _SET_MAX_LENGTH = 31 _POSTJUMP_FORMAT = None _PREJUMP_FORMAT = None _TERM_FORMAT = None _COMMENT_FORMAT = string.Template( '-A $filter -m comment --comment "$comment"') _FILTER_TOP_FORMAT = string.Template('-A $filter') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # This stores tuples of set name and set contents, keyed by direction. # For example: # { 'src': ('set_name', [ipaddr object, ipaddr object]), # 'dst': ('set_name', [ipaddr object, ipaddr object]) } self.addr_sets = {} def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list, dst_addr_list, dst_addr_exclude_list): """Calculates source and destination address list for a term. Since ipset is very efficient at matching large number of addresses, we never return any exclude addresses. Instead least positive match is calculated for both source and destination addresses. For source and destination address list, three cases are possible. First case is when there are no addresses. In that case we return _all_ips. Second case is when there is strictly one address. In that case, we optimize by not generating a set, and it's then the only element of returned set. Third case is when there are more than one address in a set. In that case we generate a set and also return _all_ips. Note the difference to the first case where no set is actually generated. Args: src_addr_list: source address list of the term. src_addr_exclude_list: source address exclude list of the term. dst_addr_list: destination address list of the term. dst_addr_exclude_list: destination address exclude list of the term. Returns: tuple containing source address list, source address exclude list, destination address list, destination address exclude list in that order. """ target_af = self.AF_MAP[self.af] src_addr_list = self._CalculateAddrList(src_addr_list, src_addr_exclude_list, target_af, 'src') dst_addr_list = self._CalculateAddrList(dst_addr_list, dst_addr_exclude_list, target_af, 'dst') return (src_addr_list, [], dst_addr_list, []) def _CalculateAddrList(self, addr_list, addr_exclude_list, target_af, direction): """Calculates and stores address list for target AF and direction. Args: addr_list: address list. addr_exclude_list: address exclude list of the term. target_af: target address family. direction: direction in which address list will be used. Returns: calculated address list. """ if not addr_list: addr_list = [self._all_ips] addr_list = [addr for addr in addr_list if addr.version == target_af] if addr_exclude_list: addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if addr_exclude.version == target_af] addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list) if len(addr_list) > 1: set_name = self._GenerateSetName(self.term.name, direction) self.addr_sets[direction] = (set_name, addr_list) addr_list = [self._all_ips] return addr_list def _GenerateAddressStatement(self, src_addr, dst_addr): """Returns the address section of an individual iptables rule. See _CalculateAddresses documentation. Three cases are possible here, and they map directly to cases in _CalculateAddresses. First, there can be no addresses for a direction (value is _all_ips then) In that case we return empty string. Second there can be stricly one address. In that case we return single address match (-s or -d). Third case, is when the value is _all_ips but also the set for particular direction is present. That's when we return a set match. Args: src_addr: ipaddr address or network object with source address of the rule. dst_addr: ipaddr address or network object with destination address of the rule. Returns: tuple containing source and destination address statement, in that order. """ src_addr_stmt = '' dst_addr_stmt = '' if src_addr and dst_addr: if src_addr == self._all_ips: if 'src' in self.addr_sets: src_addr_stmt = ('-m set --match-set %s src' % self.addr_sets['src'][0]) else: src_addr_stmt = '-s %s/%d' % (src_addr.network_address, src_addr.prefixlen) if dst_addr == self._all_ips: if 'dst' in self.addr_sets: dst_addr_stmt = ('-m set --match-set %s dst' % self.addr_sets['dst'][0]) else: dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address, dst_addr.prefixlen) return (src_addr_stmt, dst_addr_stmt) def _GenerateSetName(self, term_name, suffix): if self.af == 'inet6': suffix += '-v6' if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH: set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1 term_name = term_name[:set_name_max_lenth] return '%s-%s' % (term_name, suffix) class Ipset(iptables.Iptables): """Ipset generator.""" _PLATFORM = 'ipset' _SET_TYPE = 'hash:net' SUFFIX = '.ips' _TERM = Term _MARKER_BEGIN = '# begin:ipset-rules' _MARKER_END = '# end:ipset-rules' _GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose', 'exists'] # TODO(vklimovs): some not trivial processing is happening inside this # __str__, replace with explicit method def __str__(self): # Actual rendering happens in __str__, so it has to be called # before we do set specific part. iptables_output = super().__str__() output = [] output.append(self._MARKER_BEGIN) for (_, _, _, _, terms) in self.iptables_policies: for term in terms: output.extend(self._GenerateSetConfig(term)) output.append(self._MARKER_END) output.append(iptables_output) return '\n'.join(output) def _GenerateSetConfig(self, term): """Generates set configuration for supplied term. Args: term: input term. Returns: string that is configuration of supplied term. """ output = [] c_str = 'create' a_str = 'add' if 'exists' in self.filter_options: c_str = c_str + ' -exist' a_str = a_str + ' -exist' for direction in sorted(term.addr_sets, reverse=True): set_name, addr_list = term.addr_sets[direction] set_hashsize = 1 << len(addr_list).bit_length() set_maxelem = set_hashsize output.append('%s %s %s family %s hashsize %i maxelem %i' % (c_str, set_name, self._SET_TYPE, term.af, set_hashsize, set_maxelem)) for address in addr_list: output.append('%s %s %s' % (a_str, set_name, address)) return output capirca-2.0.9/capirca/lib/iptables.py000066400000000000000000001003571437377527500175030ustar00rootroot00000000000000# Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Iptables generator.""" import datetime import re from string import Template # pylint: disable=g-importing-member from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr class Term(aclgenerator.Term): """Generate Iptables policy terms.""" # Validate that term does not contain any fields we do not # support. This prevents us from thinking that our output is # correct in cases where we've omitted fields from term. _PLATFORM = 'iptables' _POSTJUMP_FORMAT = None _PREJUMP_FORMAT = Template('-A $filter -j $term') _TERM_FORMAT = Template('-N $term') _COMMENT_FORMAT = Template('-A $term -m comment --comment "$comment"') _FILTER_TOP_FORMAT = Template('-A $term') _LOG_FORMAT = Template('-j LOG --log-prefix $term') _PROTO_TABLE = { 'icmpv6': '-p ipv6-icmp', 'icmp': '-p icmp', 'tcp': '-p tcp', 'udp': '-p udp', 'all': '-p all', 'esp': '-p esp', 'ah': '-p ah', 'gre': '-p gre', } _TCP_FLAGS_TABLE = { 'syn': 'SYN', 'ack': 'ACK', 'fin': 'FIN', 'rst': 'RST', 'urg': 'URG', 'psh': 'PSH', 'all': 'ALL', 'none': 'NONE', } _KNOWN_OPTIONS_MATCHERS = { # '! -f' also matches non-fragmented packets. 'first-fragment': '-m u32 --u32 4&0x3FFF=0x2000', 'initial': '--syn', 'tcp-initial': '--syn', 'sample': '', } def __init__(self, term, filter_name, trackstate, filter_action, af='inet', verbose=True): """Setup a new term. Args: term: A policy.Term object to represent in iptables. filter_name: The name of the filter chan to attach the term to. trackstate: Specifies if conntrack should be used for new connections. filter_action: The default action of the filter. af: Which address family ('inet' or 'inet6') to apply the term to. verbose: boolean if comments should be printed Raises: UnsupportedFilterError: Filter is not supported. """ super().__init__(term) self._action_table = { 'accept': '-j ACCEPT', 'deny': '-j DROP', 'reject': '-j REJECT --reject-with icmp-host-prohibited', 'reject-with-tcp-rst': '-j REJECT --reject-with tcp-reset', 'next': '-j RETURN' } self.trackstate = trackstate self.term = term # term object self.filter = filter_name # actual name of filter self.default_action = filter_action self.options = [] self.af = af self.verbose = verbose if af == 'inet6': self._all_ips = nacaddr.IPv6('::/0') self._action_table['reject'] = ('-j REJECT --reject-with ' 'icmp6-adm-prohibited') else: self._all_ips = nacaddr.IPv4('0.0.0.0/0') self._action_table['reject'] = ('-j REJECT --reject-with ' 'icmp-host-prohibited') self.term_name = '%s_%s' % (self.filter[:1], self.term.name) def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' ret_str = [] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.af == 'inet6' and 'icmp' in self.term.protocol) or (self.af == 'inet' and 'icmpv6' in self.term.protocol)): logging.debug(self.NO_AF_LOG_PROTO.substitute( term=self.term.name, proto=', '.join(self.term.protocol), af=self.af)) return '' # Term verbatim output - this will skip over most normal term # creation code by returning early. Warnings provided in policy.py if self.term.verbatim: for next_verbatim in self.term.verbatim: if next_verbatim[0] == self._PLATFORM: ret_str.append(str(next_verbatim[1])) return '\n'.join(ret_str) # Create a new term self._SetDefaultAction() if self._TERM_FORMAT: ret_str.append(self._TERM_FORMAT.substitute(term=self.term_name)) if self._PREJUMP_FORMAT: ret_str.append(self._PREJUMP_FORMAT.substitute(filter=self.filter, term=self.term_name)) if self.verbose: if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) # reformat long comments, if needed # # iptables allows individual comments up to 256 chars. # But our generator will limit a single comment line to < 120, using: # max = 119 - 27 (static chars in comment command) - [length of term name] comment_max_width = 92 - len(self.term_name) if comment_max_width < 40: comment_max_width = 40 comments = aclgenerator.WrapWords(self.term.comment, comment_max_width) # append comments to output if comments and comments[0]: for line in comments: if not line: continue # iptables-restore does not like 0-length comments. # term comments # Strip out quotes as iptables cant have nested quotes ret_str.append(self._COMMENT_FORMAT.substitute( filter=self.filter, term=self.term_name, comment=str(line).replace('\"', ''))) # Unsupported configuration; in the case of 'accept' or 'next', we # skip the rule. In other cases, we blow up (raise an exception) # to ensure that this is not considered valid configuration. if self.term.source_prefix or self.term.destination_prefix: if str(self.term.action[0]) not in set(['accept', 'next']): raise UnsupportedFilterError('%s %s %s %s %s %s %s %s' % ( '\nTerm', self.term.name, 'has action', str(self.term.action[0]), 'with source_prefix or destination_prefix,', ' which is unsupported in', self._PLATFORM, 'iptables output.')) return ('# skipped %s due to source or destination prefix rule' % self.term.name) # protocol if self.term.protocol: protocol = self.term.protocol else: protocol = ['all'] if 'hopopt' in protocol and self.af == 'inet': logging.warning('Term %s is using hopopt in IPv4 context.', self.term_name) return '' (term_saddr, exclude_saddr, term_daddr, exclude_daddr) = self._CalculateAddresses( self.term.source_address, self.term.source_address_exclude, self.term.destination_address, self.term.destination_address_exclude) if not term_saddr: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.af)) return '' if not term_daddr: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.af)) return '' # ports source_port = [] destination_port = [] if self.term.source_port: source_port = self.term.source_port if self.term.destination_port: destination_port = self.term.destination_port # icmp-types icmp_types = [''] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, protocol, self.af) source_interface = '' if self.term.source_interface: source_interface = self.term.source_interface destination_interface = '' if self.term.destination_interface: destination_interface = self.term.destination_interface log_hits = False if self.term.logging: # Iptables sends logs to hosts configured syslog log_hits = True # options tcp_flags = [] tcp_track_options = [] for next_opt in [str(x) for x in self.term.option]: # # Sanity checking and high-ports are added as appropriate in # pre-processing that is done in __str__ within class Iptables. # Option established will add destination port high-ports if protocol # contains only tcp, udp or both. This is done earlier in class Iptables. # if ((next_opt.find('established') == 0 or next_opt.find('tcp-established') == 0) and 'ESTABLISHED' not in [x.strip() for x in self.options]): if next_opt.find('tcp-established') == 0 and protocol != ['tcp']: raise TcpEstablishedError('%s %s %s' % ( '\noption tcp-established can only be applied for proto tcp.', '\nError in term:', self.term.name)) if self.trackstate: # Use nf_conntrack to track state -- works with any proto self.options.append('-m state --state ESTABLISHED,RELATED') elif protocol == ['tcp']: # Simple established-only rule for TCP: Must have ACK field # (SYN/ACK or subsequent ACK), or RST and no other flags. tcp_track_options = [(['ACK'], ['ACK']), (['SYN', 'FIN', 'ACK', 'RST'], ['RST'])] # Iterate through flags table, and create list of tcp-flags to append for next_flag in self._TCP_FLAGS_TABLE: if next_opt.find(next_flag) == 0: tcp_flags.append(self._TCP_FLAGS_TABLE.get(next_flag)) if next_opt in self._KNOWN_OPTIONS_MATCHERS: self.options.append(self._KNOWN_OPTIONS_MATCHERS[next_opt]) if self.term.packet_length: # Policy format is "#-#", but iptables format is "#:#" self.options.append('-m length --length %s' % self.term.packet_length.replace('-', ':')) if self.term.fragment_offset: self.options.append('-m u32 --u32 4&0x1FFF=%s' % self.term.fragment_offset.replace('-', ':')) icmp_code = [''] if self.term.icmp_code: icmp_code = self.term.icmp_code for saddr in exclude_saddr: ret_str.extend(self._FormatPart( '', saddr, '', '', '', '', '', '', '', '', '', '', '', self._action_table.get('next'))) for daddr in exclude_daddr: ret_str.extend(self._FormatPart( '', '', '', daddr, '', '', '', '', '', '', '', '', '', self._action_table.get('next'))) for saddr in term_saddr: for daddr in term_daddr: for icmp in icmp_types: for code in icmp_code: for proto in protocol: for tcp_matcher in tcp_track_options or (([], []),): ret_str.extend(self._FormatPart( str(proto), saddr, source_port, daddr, destination_port, self.options, tcp_flags, icmp, code, tcp_matcher, source_interface, destination_interface, log_hits, self._action_table.get(str(self.term.action[0])) )) if self._POSTJUMP_FORMAT: ret_str.append(self._POSTJUMP_FORMAT.substitute(filter=self.filter, term=self.term_name)) return '\n'.join(str(v) for v in ret_str if v) def _CalculateAddresses(self, term_saddr, exclude_saddr, term_daddr, exclude_daddr): """Calculate source and destination address list for a term. Args: term_saddr: source address list of the term exclude_saddr: source address exclude list of the term term_daddr: destination address list of the term exclude_daddr: destination address exclude list of the term Returns: tuple containing source address list, source exclude address list, destination address list, destination exclude address list in that order """ # source address term_saddr_excluded = [] if not term_saddr: term_saddr = [self._all_ips] if exclude_saddr: term_saddr_excluded.extend(nacaddr.ExcludeAddrs(term_saddr, exclude_saddr)) # destination address term_daddr_excluded = [] if not term_daddr: term_daddr = [self._all_ips] if exclude_daddr: term_daddr_excluded.extend(nacaddr.ExcludeAddrs(term_daddr, exclude_daddr)) # Just to be safe, always have a result of at least 1 to avoid * by zero # returning incorrect results (10src*10dst=100, but 10src*0dst=0, not 10) bailout_count = len(exclude_saddr) + len(exclude_daddr) + ( (len(self.term.source_address) or 1) * (len(self.term.destination_address) or 1)) exclude_count = ((len(term_saddr_excluded) or 1) * (len(term_daddr_excluded) or 1)) # Use bailout jumps for excluded addresses if it results in fewer output # lines than nacaddr.ExcludeAddrs() method. if exclude_count < bailout_count: exclude_saddr = [] exclude_daddr = [] if term_saddr_excluded: term_saddr = term_saddr_excluded if term_daddr_excluded: term_daddr = term_daddr_excluded # With many sources and destinations, iptables needs to generate the # cartesian product of sources and destinations. If there are no # exclude rules, this can instead be written as exclude [0/0 - # srcs], exclude [0/0 - dsts]. v4_src_count = len([x for x in term_saddr if x.version == 4]) v4_dst_count = len([x for x in term_daddr if x.version == 4]) v6_src_count = len([x for x in term_saddr if x.version == 6]) v6_dst_count = len([x for x in term_daddr if x.version == 6]) num_pairs = v4_src_count * v4_dst_count + v6_src_count * v6_dst_count if num_pairs > 100: new_exclude_source = nacaddr.ExcludeAddrs([self._all_ips], term_saddr) new_exclude_dest = nacaddr.ExcludeAddrs([self._all_ips], term_daddr) # Invert the shortest list that does not already have exclude addresses if len(new_exclude_source) < len(new_exclude_dest) and not exclude_saddr: if len(new_exclude_source) + len(term_daddr) < num_pairs: exclude_saddr = new_exclude_source term_saddr = [self._all_ips] elif not exclude_daddr: if len(new_exclude_dest) + len(term_saddr) < num_pairs: exclude_daddr = new_exclude_dest term_daddr = [self._all_ips] term_saddr = [x for x in term_saddr if x.version == self.AF_MAP[self.af]] exclude_saddr = [x for x in exclude_saddr if x.version == self.AF_MAP[self.af]] term_daddr = [x for x in term_daddr if x.version == self.AF_MAP[self.af]] exclude_daddr = [x for x in exclude_daddr if x.version == self.AF_MAP[self.af]] return (term_saddr, exclude_saddr, term_daddr, exclude_daddr) def _FormatPart(self, protocol, saddr, sport, daddr, dport, options, tcp_flags, icmp_type, code, track_flags, sint, dint, log_hits, action): """Compose one iteration of the term parts into a string. Args: protocol: The network protocol saddr: Source IP address sport: Source port numbers daddr: Destination IP address dport: Destination port numbers options: Optional arguments to append to our rule tcp_flags: Which tcp_flag arguments, if any, should be appended icmp_type: What icmp protocol to allow, if any code: ICMP code allowed, if any track_flags: A tuple of ([check-flags], [set-flags]) arguments to tcp-flag sint: Optional source interface dint: Optional destination interface log_hits: Boolean, to log matches or not action: What should happen if this rule matches Returns: rval: A single iptables argument line """ src, dst = self._GenerateAddressStatement(saddr, daddr) filter_top = self._FILTER_TOP_FORMAT.substitute(filter=self.filter, term=self.term_name) source_int = '' if sint: source_int = '-i %s' % sint destination_int = '' if dint: destination_int = '-o %s' % dint log_jump = '' if log_hits: log_jump = self._LOG_FORMAT.substitute(term=self.term.name) if self.term.log_limit: log_jump = '-m --limit {}/{} {}'.format( self.term.log_limit[0], self.term.log_limit[1], log_jump) if not options: options = [] proto = self._PROTO_TABLE.get(str(protocol)) # Don't drop protocol if we don't recognize it if protocol and not proto: proto = '-p %s' % str(protocol) # TODO(vklimovs): generalize to all v6 special cases # Use u32 module as named modules are not available # everywhere. if protocol == 'hopopt': proto = '' # Select 4 bytes at offset 0x3, mask out all but # last byte. That produces byte at position 7, # Next Header. Compare to 0x0, Hop By Hop options.append('-m u32 --u32 "0x3&0xff=0x0"') if protocol == 'fragment': proto = '' # Ditto, but compare to 0x2c, 44, Fragment options.append('-m u32 --u32 "0x3&0xff=0x2c"') # set conntrack state to NEW, unless policy requested "nostate" # for icmpv6 use trackstate only for type 128 and 139, as this is the # only supported types in nf_conntrack_proto_icmpv6 if self.trackstate and ( protocol != 'icmpv6' or icmp_type in [128, 139] ): already_stateful = False # we will add new stateful arguments only if none already exist, such # as from "option:: established" for option in options: if 'state' in option: already_stateful = True if not already_stateful: if 'ACCEPT' in action: # We have to permit established/related since a policy may not # have an existing blank permit for established/related, which # may be more efficient, but slightly less secure. options.append('-m state --state NEW,ESTABLISHED,RELATED') if tcp_flags or (track_flags and track_flags[0]): check_fields = ','.join(sorted(set(tcp_flags + track_flags[0]))) set_fields = ','.join(sorted(set(tcp_flags + track_flags[1]))) flags = '--tcp-flags %s %s' % (check_fields, set_fields) else: flags = '' icmp_type = str(icmp_type) if not icmp_type: icmp = '' elif str(protocol) == 'icmpv6': icmp = '-m icmp6 --icmpv6-type %s' % icmp_type else: icmp = '--icmp-type %s' % icmp_type if code: icmp += r'/%d' % code # format tcp and udp ports sports = dports = [''] if sport: sports = self._GeneratePortStatement(sport, source=True) if dport: dports = self._GeneratePortStatement(dport, dest=True) ret_lines = [] for sport in sports: for dport in dports: rval = [filter_top] if re.search('multiport', sport) and not re.search('multiport', dport): # Due to bug in iptables, use of multiport module before a single # port specification will result in multiport trying to consume it. # this is a little hack to ensure single ports are listed before # any multiport specification. dport, sport = sport, dport if str(protocol) == 'icmpv6': # Due to a bug in ip6tables, iptables-save returns icmpv6 matches in # order (address spec) (icmpv6 spec). Fake this using options # datastructure. options.extend((proto, icmp)) proto = '' icmp = '' if (str(self.af) == 'inet6' and str(protocol) == 'all' and 'REJECT' in str(action)): # Due to a bug in ip6tables, when -p all and -j REJECT, proto # is being eaten proto = '' for value in (proto, flags, sport, dport, icmp, src, dst, ' '.join(options), source_int, destination_int): if value: rval.append(str(value)) if log_jump: # -j LOG ret_lines.append(' '.join(rval+[log_jump])) # -j ACTION ret_lines.append(' '.join(rval+[action])) return ret_lines def _GenerateAddressStatement(self, saddr, daddr): """Return the address section of an individual iptables rule. Args: saddr: source address of the rule daddr: destination address of the rule Returns: tuple containing source and destination address statement, in that order """ src = '' dst = '' if not saddr or saddr == self._all_ips: src = '' else: src = '-s %s/%d' % (saddr.network_address, saddr.prefixlen) if not daddr or daddr == self._all_ips: dst = '' else: dst = '-d %s/%d' % (daddr.network_address, daddr.prefixlen) return (src, dst) def _GeneratePortStatement(self, ports, source=False, dest=False): """Return the 'port' section of an individual iptables rule. Args: ports: list of ports or port ranges (pairs) source: (bool) generate a source port rule dest: (bool) generate a dest port rule Returns: list holding the 'port' sections of an iptables rule. Raises: BadPortsError: if too many ports are passed in, or if both 'source' and 'dest' are true. NotImplementedError: if both 'source' and 'dest' are true. """ if not ports: return '' direction = '' # default: no direction / '--port'. As yet, unused. if source and dest: raise BadPortsError('_GeneratePortStatement called ambiguously.') elif source: direction = 's' # source port / '--sport' elif dest: direction = 'd' # dest port / '--dport' else: raise NotImplementedError('--port support not yet implemented.') # Normalize ports and get accurate port count. # iptables multiport module limits to 15, but we use 14 to ensure a range # doesn't tip us over the limit max_ports = 14 norm_ports = [] portstrings = [] count = 0 for port in ports: if port[0] == port[1]: norm_ports.append(str(port[0])) count += 1 else: norm_ports.append('%d:%d' % (port[0], port[1])) count += 2 if count >= max_ports: count = 0 portstrings.append('-m multiport --%sports %s' % (direction, ','.join(norm_ports))) norm_ports = [] if norm_ports: if len(norm_ports) == 1: portstrings.append('--%sport %s' % (direction, norm_ports[0])) else: portstrings.append('-m multiport --%sports %s' % (direction, ','.join(norm_ports))) return portstrings def _SetDefaultAction(self): """If term does not specify action, use filter default action.""" if not self.term.action: self.term.action[0].value = self.default_action class Iptables(aclgenerator.ACLGenerator): """Generates filters and terms from provided policy object.""" _PLATFORM = 'iptables' _DEFAULT_PROTOCOL = 'all' SUFFIX = '' _RENDER_PREFIX = None _RENDER_SUFFIX = None _DEFAULTACTION_FORMAT = '-P %s %s' _DEFAULTACTION_FORMAT_CUSTOM_CHAIN = '-N %s' _DEFAULT_ACTION = 'DROP' _TERM = Term _TERM_MAX_LENGTH = 24 _GOOD_FILTERS = ['INPUT', 'OUTPUT', 'FORWARD'] _GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose'] def __init__(self, pol, exp_info): self.iptables_policies = [] super().__init__(pol, exp_info) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'counter', 'destination_interface', 'destination_prefix', 'fragment_offset', 'icmp_code', 'logging', 'log_limit', 'owner', 'packet_length', 'routing_instance', 'source_interface', 'source_prefix'} supported_sub_tokens.update( {'option': {'established', 'first-fragment', 'initial', 'sample', 'tcp-established', 'tcp-initial', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'}, }) return supported_tokens, supported_sub_tokens def _WarnIfCustomTarget(self, target): """Emit a warning if a policy's default target is not a built-in chain.""" if target not in self._GOOD_FILTERS: logging.warning('Filter is generating a non-standard chain that will not ' 'apply to traffic unless linked from INPUT, OUTPUT or ' 'FORWARD filters. New chain name is: %s', target) def _TranslatePolicy(self, pol, exp_info): """Translate a policy from objects into strings.""" current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) default_action = None good_default_actions = ['ACCEPT', 'DROP'] good_afs = ['inet', 'inet6'] all_protocols_stateful = True self.verbose = True for header, terms in pol.filters: filter_type = None if self._PLATFORM not in header.platforms: continue self.filter_options = header.FilterOptions(self._PLATFORM)[1:] filter_name = header.FilterName(self._PLATFORM) self._WarnIfCustomTarget(filter_name) # ensure all options after the filter name are expected for opt in self.filter_options: if opt not in good_default_actions + good_afs + self._GOOD_OPTIONS: raise UnsupportedTargetOptionError('%s %s %s %s' % ( '\nUnsupported option found in', self._PLATFORM, 'target definition:', opt)) # disable stateful? if 'nostate' in self.filter_options: all_protocols_stateful = False if 'noverbose' in self.filter_options: self.verbose = False # Check for matching af for address_family in good_afs: if address_family in self.filter_options: # should not specify more than one AF in options if filter_type is not None: raise UnsupportedFilterError('%s %s %s %s' % ( '\nMay only specify one of', good_afs, 'in filter options:', self.filter_options)) filter_type = address_family if filter_type is None: filter_type = 'inet' if self._PLATFORM == 'iptables' and filter_name == 'FORWARD': default_action = 'DROP' # does this policy override the default filter actions? for next_target in header.target: if next_target.platform == self._PLATFORM: if len(next_target.options) > 1: for arg in next_target.options: if arg in good_default_actions: default_action = arg if default_action and default_action not in good_default_actions: raise UnsupportedDefaultActionError('%s %s %s %s %s' % ( '\nOnly', ', '.join(good_default_actions), 'default filter action allowed;', default_action, 'used.')) # add the terms new_terms = [] term_names = set() for term in terms: term.name = self.FixTermLength(term.name, 'abbreviateterms' in self.filter_options, 'truncateterms' in self.filter_options) if term.name in term_names: raise aclgenerator.DuplicateTermError( 'You have a duplicate term: %s' % term.name) term_names.add(term.name) if not term.logging and term.log_limit: raise LimitButNoLogError( 'Term %s: Cannoy use log-limit without logging' % term.name) term = self.FixHighPorts(term, af=filter_type, all_protocols_stateful=all_protocols_stateful) if not term: continue if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue new_terms.append(self._TERM(term, filter_name, all_protocols_stateful, default_action, filter_type, self.verbose)) self.iptables_policies.append((header, filter_name, filter_type, default_action, new_terms)) def SetTarget(self, target, action=None): """Sets policy's target and default action. Args: target: (string) target name action: (string) default action, only valid if target is a built-in chain """ # there is only one item in iptables_policies pol = list(self.iptables_policies[0]) pol[1] = target self._WarnIfCustomTarget(target) if action: pol[3] = action self.iptables_policies[0] = tuple(pol) def __str__(self): target = [] pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:]) if self._RENDER_PREFIX: target.append(self._RENDER_PREFIX) for (header, filter_name, filter_type, default_action, terms ) in self.iptables_policies: # Add comments for this filter target.append('# %s %s Policy' % (pretty_platform, header.FilterName(self._PLATFORM))) # reformat long text comments, if needed comments = aclgenerator.WrapWords(header.comment, 70) if comments and comments[0]: for line in comments: target.append('# %s' % line) target.append('#') # add the p4 tags target.extend(aclgenerator.AddRepositoryTags('# ')) target.append('# ' + filter_type) if filter_name in self._GOOD_FILTERS: if default_action: target.append(self._DEFAULTACTION_FORMAT % (filter_name, default_action)) elif self._PLATFORM == 'speedway': # always specify the default filter states for speedway, # if default action policy not specified for iptables, do nothing. target.append( self._DEFAULTACTION_FORMAT % (filter_name, self._DEFAULT_ACTION)) else: # Custom chains have no concept of default policy. target.append(self._DEFAULTACTION_FORMAT_CUSTOM_CHAIN % filter_name) # add the terms for term in terms: term_str = str(term) if term_str: target.append(term_str) if self._RENDER_SUFFIX: target.append(self._RENDER_SUFFIX) target.append('') return '\n'.join(target) class Error(Exception): """Base error class.""" class BadPortsError(Error): """Too many ports for a single iptables statement.""" class UnsupportedFilterError(Error): """Raised when we see an inappropriate filter.""" class NoIptablesPolicyError(Error): """Raised when a policy is received that doesn't support iptables.""" class TcpEstablishedError(Error): """Raised when a term has tcp-established option but not proto tcp only.""" class EstablishedError(Error): """Raised when a term has established option with inappropriate protocol.""" class UnsupportedDefaultActionError(Error): """Raised when a filter has an impermissible default action specified.""" class UnsupportedTargetOptionError(Error): """Raised when a filter has an impermissible default action specified.""" class LimitButNoLogError(Error): """Raised when log-limit is used by logging is not.""" capirca-2.0.9/capirca/lib/juniper.py000066400000000000000000001175071437377527500173610ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Juniper JCL generator.""" import datetime from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import summarizer import six # generic error class class Error(Exception): pass class JuniperTermPortProtocolError(Error): pass class TcpEstablishedWithNonTcpError(Error): pass class JuniperDuplicateTermError(Error): pass class UnsupportedFilterError(Error): pass class PrecedenceError(Error): pass class JuniperIndentationError(Error): pass class JuniperNextIpError(Error): pass class JuniperMultipleTerminatingActionError(Error): pass class JuniperFragmentInV6Error(Error): pass class Config: """Config allows a configuration to be assembled easily. Configurations are automatically indented following Juniper's style. A textual representation of the config can be extracted with str(). Attributes: indent: The number of leading spaces on the current line. tabstop: The number of spaces to indent for a new level. lines: the text lines of the configuration. """ def __init__(self, indent=0, tabstop=4): self.indent = indent self._initial_indent = indent self.tabstop = tabstop self.lines = [] def __str__(self): if self.indent != self._initial_indent: raise JuniperIndentationError( 'Expected indent %d but got %d' % (self._initial_indent, self.indent)) return '\n'.join(self.lines) def Append(self, line, verbatim=False): """Append one line to the configuration. Args: line: The string to append to the config. verbatim: append line without adjusting indentation. Default False. Raises: JuniperIndentationError: If the indentation would be further left than the initial indent. e.g. too many close braces. """ if verbatim: self.lines.append(line) return if line.endswith('}'): self.indent -= self.tabstop if self.indent < self._initial_indent: raise JuniperIndentationError('Too many close braces.') spaces = ' ' * self.indent self.lines.append(spaces + line.strip()) if not line.find('/*') >= 0 and line.find('*/') >= 0: self.indent -= 1 if self.indent < self._initial_indent: raise JuniperIndentationError('Too many close comments.') if not line.find('*/') >= 0 and line.find('/*') >= 0: self.indent += 1 if line.endswith(' {'): self.indent += self.tabstop class Term(aclgenerator.Term): """Representation of an individual Juniper term. This is mostly useful for the __str__() method. Attributes: term: The term object from policy. term_type: String indicating type of term, inet, inet6 icmp etc. enable_dsmo: Boolean to enable dsmo. noverbose: Boolean to disable verbosity. filter_direction: Enum indicating the direction of the filter on an interface e.g. INGRESS. interface_type: Enum indicating the type of interface filter will be applied e.g. LOOPBACK. """ _PLATFORM = 'juniper' _DEFAULT_INDENT = 12 ACTIONS = {'accept': 'accept', 'deny': 'discard', 'reject': 'reject', 'next': 'next term', 'reject-with-tcp-rst': 'reject tcp-reset', 'encapsulate': 'encapsulate', 'decapsulate': 'decapsulate', 'port-mirror': 'port-mirror'} # the following lookup table is used to map between the various types of # filters the juniper generator can render. As new differences are # encountered, they should be added to this table. Accessing members # of this table looks like: # self._TERM_TYPE('inet').get('saddr') -> 'source-address' # # it's critical that the members of each filter type be the same, that is # to say that if _TERM_TYPE.get('inet').get('foo') returns something, # _TERM_TYPE.get('inet6').get('foo') must return the inet6 equivalent. _TERM_TYPE = {'inet': {'addr': 'address', 'saddr': 'source-address', 'daddr': 'destination-address', 'protocol': 'protocol', 'protocol-except': 'protocol-except', 'tcp-est': 'tcp-established'}, 'inet6': {'addr': 'address', 'saddr': 'source-address', 'daddr': 'destination-address', 'protocol': 'next-header', 'protocol-except': 'next-header-except', 'tcp-est': 'tcp-established'}, 'bridge': {'addr': 'ip-address', 'saddr': 'ip-source-address', 'daddr': 'ip-destination-address', 'protocol': 'ip-protocol', 'protocol-except': 'ip-protocol-except', 'tcp-est': 'tcp-flags "(ack|rst)"'} } def __init__(self, term, term_type, enable_dsmo, noverbose, filter_direction=None, interface_type=None): super().__init__(term) self.term = term self.term_type = term_type self.enable_dsmo = enable_dsmo self.noverbose = noverbose # Filter direction and interface type are needed in juniperevo sub-class for IPv6 filters. self.filter_direction = filter_direction self.interface_type = interface_type if self._PLATFORM != 'msmpc': if term_type not in self._TERM_TYPE: raise ValueError('Unknown Filter Type: %s' % term_type) if 'hopopt' in self.term.protocol: loc = self.term.protocol.index('hopopt') self.term.protocol[loc] = 'hop-by-hop' if 'hopopt' in self.term.protocol_except: loc = self.term.protocol_except.index('hopopt') self.term.protocol_except[loc] = 'hop-by-hop' # some options need to modify the actions self.extra_actions = [] # TODO(pmoody): get rid of all of the default string concatenation here. # eg, indent(8) + 'foo;' -> '%s%s;' % (indent(8), 'foo'). pyglint likes this # more. def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' config = Config(indent=self._DEFAULT_INDENT) from_str = [] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.term_type == 'inet6' and 'icmp' in self.term.protocol) or (self.term_type == 'inet' and ('icmpv6' in self.term.protocol or 'icmp6' in self.term.protocol))): logging.debug(self.NO_AF_LOG_PROTO.substitute( term=self.term.name, proto=', '.join(self.term.protocol), af=self.term_type)) return '' # comment # this deals just fine with multi line comments, but we could probably # output them a little cleaner; do things like make sure the # len(output) < 80, etc. Note, if 'noverbose' is set for the filter, skip # all comment processing. if self.term.owner and not self.noverbose: self.term.comment.append('Owner: %s' % self.term.owner) if self.term.comment and not self.noverbose: config.Append('/*') for comment in self.term.comment: for line in comment.split('\n'): config.Append('** ' + line) config.Append('*/') # Term verbatim output - this will skip over normal term creation # code. Warning generated from policy.py if appropriate. if self.term.verbatim: for next_term in self.term.verbatim: if next_term[0] == self._PLATFORM: config.Append(str(next_term[1]), verbatim=True) return str(config) # Helper for per-address-family keywords. family_keywords = self._TERM_TYPE.get(self.term_type) # option # this is going to be a little ugly b/c there are a few little messed # up options we can deal with. if self.term.option: for opt in [str(x) for x in self.term.option]: # there should be a better way to search the array of protocols if opt.startswith('sample'): self.extra_actions.append('sample') # only append tcp-established for option established when # tcp is the only protocol, otherwise other protos break on juniper elif opt.startswith('established'): if self.term.protocol == ['tcp']: if 'tcp-established;' not in from_str: from_str.append(family_keywords['tcp-est'] + ';') # if tcp-established specified, but more than just tcp is included # in the protocols, raise an error elif opt.startswith('tcp-established'): flag = family_keywords['tcp-est'] + ';' if self.term.protocol == ['tcp']: if flag not in from_str: from_str.append(flag) else: raise TcpEstablishedWithNonTcpError( 'tcp-established can only be used with tcp protocol in term %s' % self.term.name) elif opt.startswith('rst'): from_str.append('tcp-flags "rst";') elif opt.startswith('initial') and 'tcp' in self.term.protocol: from_str.append('tcp-initial;') elif opt.startswith('first-fragment'): from_str.append('first-fragment;') # we don't have a special way of dealing with this, so we output it and # hope the user knows what they're doing. else: from_str.append('%s;' % opt) # if the term is inactive we have to set the prefix if self.term.inactive: term_prefix = 'inactive:' else: term_prefix = '' # term name config.Append('%s term %s {' % (term_prefix, self.term.name)) # The "filter" keyword is not compatible with from or then if self.term.filter_term: config.Append('filter %s;' % self.term.filter_term) config.Append('}') # end term accept-foo-to-bar { ... } return str(config) # a default action term doesn't have any from { clause has_match_criteria = (self.term.address or self.term.dscp_except or self.term.dscp_match or self.term.destination_address or self.term.destination_port or self.term.destination_prefix or self.term.destination_prefix_except or self.term.encapsulate or self.term.ether_type or self.term.flexible_match_range or self.term.forwarding_class or self.term.forwarding_class_except or self.term.fragment_offset or self.term.hop_limit or self.term.next_ip or self.term.port or self.term.precedence or self.term.protocol or self.term.protocol_except or self.term.source_address or self.term.source_port or self.term.source_prefix or self.term.source_prefix_except or self.term.traffic_type or self.term.ttl) if has_match_criteria: config.Append('from {') term_af = self.AF_MAP.get(self.term_type) # address address = self.term.GetAddressOfVersion('address', term_af) if self.enable_dsmo: address = summarizer.Summarize(address) if address: config.Append('%s {' % family_keywords['addr']) for addr in address: for comment in self._Comment(addr): config.Append('%s' % comment) if self.enable_dsmo: config.Append('%s/%s;' % summarizer.ToDottedQuad(addr, nondsm=True)) else: config.Append('%s;' % addr) config.Append('}') elif self.term.address: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, af=self.term_type)) return '' # source address src_addr = self.term.GetAddressOfVersion('source_address', term_af) src_addr_ex = self.term.GetAddressOfVersion('source_address_exclude', term_af) if self.enable_dsmo: src_addr = summarizer.Summarize(src_addr) src_addr_ex = summarizer.Summarize(src_addr_ex) else: src_addr, src_addr_ex = self._MinimizePrefixes(src_addr, src_addr_ex) if src_addr: config.Append('%s {' % family_keywords['saddr']) for addr in src_addr: for comment in self._Comment(addr): config.Append('%s' % comment) if self.enable_dsmo: config.Append('%s/%s;' % summarizer.ToDottedQuad(addr, nondsm=True)) else: config.Append('%s;' % addr) for addr in src_addr_ex: for comment in self._Comment(addr, exclude=True): config.Append('%s' % comment) if self.enable_dsmo: config.Append('%s/%s except;' % summarizer.ToDottedQuad(addr, nondsm=True)) else: config.Append('%s except;' % addr) config.Append('}') elif self.term.source_address: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.term_type)) return '' # destination address dst_addr = self.term.GetAddressOfVersion('destination_address', term_af) dst_addr_ex = self.term.GetAddressOfVersion('destination_address_exclude', term_af) if self.enable_dsmo: dst_addr = summarizer.Summarize(dst_addr) dst_addr_ex = summarizer.Summarize(dst_addr_ex) else: dst_addr, dst_addr_ex = self._MinimizePrefixes(dst_addr, dst_addr_ex) if dst_addr: config.Append('%s {' % family_keywords['daddr']) for addr in dst_addr: for comment in self._Comment(addr): config.Append('%s' % comment) if self.enable_dsmo: config.Append('%s/%s;' % summarizer.ToDottedQuad(addr, nondsm=True)) else: config.Append('%s;' % addr) for addr in dst_addr_ex: for comment in self._Comment(addr, exclude=True): config.Append('%s' % comment) if self.enable_dsmo: config.Append('%s/%s except;' % summarizer.ToDottedQuad(addr, nondsm=True)) else: config.Append('%s except;' % addr) config.Append('}') elif self.term.destination_address: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.term_type)) return '' # forwarding-class if self.term.forwarding_class: config.Append('forwarding-class %s' % self._Group( self.term.forwarding_class, lc=False)) # forwarding-class-except if self.term.forwarding_class_except: config.Append('forwarding-class-except %s' % self._Group( self.term.forwarding_class_except, lc=False)) # source prefix list if self.term.source_prefix or self.term.source_prefix_except: config.Append('source-prefix-list {') for pfx in self.term.source_prefix: config.Append(pfx + ';') for epfx in self.term.source_prefix_except: config.Append(epfx + ' except;') config.Append('}') # destination prefix list if self.term.destination_prefix or self.term.destination_prefix_except: config.Append('destination-prefix-list {') for pfx in self.term.destination_prefix: config.Append(pfx + ';') for epfx in self.term.destination_prefix_except: config.Append(epfx + ' except;') config.Append('}') # Only generate ttl if inet, inet6 uses hop-limit instead. if self.term.ttl and self.term_type == 'inet': config.Append('ttl %s;' % self.term.ttl) # port if self.term.port: config.Append('port %s' % self._Group(self.term.port)) # source port if self.term.source_port: config.Append('source-port %s' % self._Group(self.term.source_port)) # destination port if self.term.destination_port: config.Append('destination-port %s' % self._Group(self.term.destination_port)) # append any options beloging in the from {} section for next_str in from_str: config.Append(next_str) # packet length if self.term.packet_length: config.Append('packet-length %s;' % self.term.packet_length) # fragment offset if self.term.fragment_offset: config.Append('fragment-offset %s;' % self.term.fragment_offset) # icmp-types icmp_types = [''] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, self.term_type) if icmp_types != ['']: config.Append('icmp-type %s' % self._Group(icmp_types)) if self.term.icmp_code: config.Append('icmp-code %s' % self._Group(self.term.icmp_code)) if self.term.ether_type: config.Append('ether-type %s' % self._Group(self.term.ether_type)) # protocol if self.term.protocol: # both are supported on JunOS, but only icmp6 is supported # on SRX loopback stateless filter, so set all instances of icmpv6 to icmp6. if 'icmpv6' in self.term.protocol: loc = self.term.protocol.index('icmpv6') self.term.protocol[loc] = 'icmp6' config.Append(family_keywords['protocol'] + ' ' + self._Group(self.term.protocol)) # protocol if self.term.protocol_except: # same as above if 'icmpv6' in self.term.protocol_except: loc = self.term.protocol_except.index('icmpv6') self.term.protocol_except[loc] = 'icmp6' config.Append(family_keywords['protocol-except'] + ' ' + self._Group(self.term.protocol_except)) if self.term.traffic_type: config.Append('traffic-type %s' % self._Group(self.term.traffic_type)) if self.term.precedence: # precedence may be a single integer, or a space separated list policy_precedences = set() # precedence values may only be 0 through 7 for precedence in self.term.precedence: if int(precedence) in range(0, 8): policy_precedences.add(precedence) else: raise PrecedenceError('Precedence value %s is out of bounds in %s' % (precedence, self.term.name)) config.Append('precedence %s' % self._Group(sorted(policy_precedences))) # DSCP Match if self.term.dscp_match: if self.term_type == 'inet6': config.Append('traffic-class [ %s ];' % ( ' '.join(self.term.dscp_match))) else: config.Append('dscp [ %s ];' % ' '.join(self.term.dscp_match)) # DSCP Except if self.term.dscp_except: if self.term_type == 'inet6': config.Append('traffic-class-except [ %s ];' % ( ' '.join(self.term.dscp_except))) else: config.Append('dscp-except [ %s ];' % ' '.join(self.term.dscp_except)) if self.term.hop_limit: # Only generate a hop-limit if inet6, inet4 has not hop-limit. if self.term_type == 'inet6': config.Append('hop-limit %s;' % (self.term.hop_limit)) # flexible-match if self.term.flexible_match_range: config.Append('flexible-match-range {') for fm_opt in self.term.flexible_match_range: config.Append('%s %s;' % (fm_opt[0], fm_opt[1])) config.Append('}') config.Append('}') # end from { ... } #### # ACTIONS go below here #### # If the action is only one line, include it in the same line as "then " # statement. # For example, if the action is only accept, it should be: # "then accept;" rather than: # "then { # accept; # }" # self.CheckTerminatingAction() unique_actions = set(self.extra_actions) if not self.term.routing_instance: unique_actions.update(self.term.action) if self.term.encapsulate: unique_actions.add('encapsulate') if self.term.decapsulate: unique_actions.add('decapsulate') if len(unique_actions) <= 1: for action in [ self.term.logging, self.term.routing_instance, self.term.counter, self.term.policer, self.term.qos, self.term.loss_priority, self.term.dscp_set, self.term.next_ip, self.term.traffic_class_count, self.term.port_mirror ]: if action: try: unique_actions.update(action) except TypeError: unique_actions.add(action) if len(unique_actions) > 1: break if len(unique_actions) == 1: # b/21795531: Juniper device treats a set of IPv4 actions differently # than any other actions. # For example, if the term is in IPv4 and the action is only discard, # it should be: # "then { # discard; # }" rather than: # "then discard;" current_action = self.ACTIONS.get(unique_actions.pop(), 'next_ip') if (self.term_type == 'inet' and current_action in ['discard', 'reject', 'reject tcp-reset'] ) or (self.term_type == 'inet6' and current_action in ['reject', 'reject tcp-reset']): config.Append('then {') config.Append('%s;' % current_action) config.Append('}') elif current_action == 'next_ip': self.NextIpCheck(self.term.next_ip, self.term.name) config.Append('then {') if self.term.next_ip[0].version == 4: config.Append('next-ip %s;' % str(self.term.next_ip[0])) else: config.Append('next-ip6 %s;' % str(self.term.next_ip[0])) config.Append('}') elif current_action == 'encapsulate': config.Append('then {') config.Append('encapsulate %s;' % str(self.term.encapsulate)) config.Append('}') elif current_action == 'decapsulate': config.Append('then {') config.Append('decapsulate %s;' % str(self.term.decapsulate)) config.Append('}') else: config.Append('then %s;' % current_action) elif len(unique_actions) > 1: config.Append('then {') # logging if self.term.logging: for log_target in self.term.logging: if str(log_target) == 'local': config.Append('log;') else: config.Append('syslog;') if self.term.routing_instance: config.Append('routing-instance %s;' % self.term.routing_instance) if self.term.counter: config.Append('count %s;' % self.term.counter) if self.term.traffic_class_count: config.Append('traffic-class-count %s;' % self.term.traffic_class_count) oid_length = 128 if self.term.policer: config.Append('policer %s;' % self.term.policer) if len(self.term.policer) > oid_length: logging.warning('WARNING: %s is longer than %d bytes. Due to ' 'limitation in JUNOS, OIDs longer than %dB can ' 'cause SNMP timeout issues.', self.term.policer, oid_length, oid_length) if self.term.qos: config.Append('forwarding-class %s;' % self.term.qos) if self.term.port_mirror: config.Append('port-mirror;') if self.term.loss_priority: config.Append('loss-priority %s;' % self.term.loss_priority) if self.term.next_ip: self.NextIpCheck(self.term.next_ip, self.term.name) if self.term.next_ip[0].version == 4: config.Append('next-ip %s;' % str(self.term.next_ip[0])) else: config.Append('next-ip6 %s;' % str(self.term.next_ip[0])) if self.term.encapsulate: config.Append('encapsulate %s;' % str(self.term.encapsulate)) if self.term.decapsulate: config.Append('decapsulate %s;' % str(self.term.decapsulate)) for action in self.extra_actions: config.Append(action + ';') # If there is a routing-instance defined, skip reject/accept/etc actions. if not self.term.routing_instance: for action in self.term.action: config.Append(self.ACTIONS.get(action) + ';') # DSCP SET if self.term.dscp_set: if self.term_type == 'inet6': config.Append('traffic-class %s;' % self.term.dscp_set) else: config.Append('dscp %s;' % self.term.dscp_set) config.Append('}') # end then{...} config.Append('}') # end term accept-foo-to-bar { ... } return str(config) @staticmethod def NextIpCheck(next_ip, term_name): if len(next_ip) > 1: raise JuniperNextIpError('The following term has more ' 'than one next IP value: %s' % term_name) if next_ip[0].num_addresses > 1: raise JuniperNextIpError('The following term has a subnet ' 'instead of a host: %s' % term_name) def CheckTerminatingAction(self): action = set(self.term.action) if self.term.encapsulate: action.add(self.term.encapsulate) if self.term.decapsulate: action.add(self.term.decapsulate) if self.term.routing_instance: action.add(self.term.routing_instance) if len(action) > 1: raise JuniperMultipleTerminatingActionError( 'The following term has multiple terminating actions: %s' % self.term.name) def _MinimizePrefixes(self, include, exclude): """Calculate a minimal set of prefixes for Juniper match conditions. Args: include: Iterable of nacaddr objects, prefixes to match. exclude: Iterable of nacaddr objects, prefixes to exclude. Returns: A tuple (I,E) where I and E are lists containing the minimized versions of include and exclude, respectively. The order of each input list is preserved. """ # Remove any included prefixes that have EXACT matches in the # excluded list. Excluded prefixes take precedence on the router # regardless of the order in which the include/exclude are applied. exclude_set = set(exclude) include_result = [ip for ip in include if ip not in exclude_set] # Every address match condition on a Juniper firewall filter # contains an implicit "0/0 except" or "0::0/0 except". If an # excluded prefix is not contained within any less-specific prefix # in the included set, we can elide it. In other words, if the # next-less-specific prefix is the implicit "default except", # there is no need to configure the more specific "except". # # TODO(kbrint): this could be made more efficient with a Patricia trie. exclude_result = [] for exclude_prefix in exclude: for include_prefix in include_result: if exclude_prefix.subnet_of(include_prefix): exclude_result.append(exclude_prefix) break return include_result, exclude_result def _Comment(self, addr, exclude=False, line_length=132): """Returns address comment field if it exists. Args: addr: nacaddr.IPv4 object (?) exclude: bool - address excludes have different indentations line_length: integer - this is the length to which a comment will be truncated, no matter what. ie, a 1000 character comment will be truncated to line_length, and then split. if 0, the whole comment is kept. the current default of 132 is somewhat arbitrary. Returns: List of strings. Notes: This method tries to intelligently split long comments up. if we've managed to summarize 4 /32's into a /30, each with a nacaddr text field of something like 'foobar N', normal concatination would make the resulting rendered comment look in mondrian like source-address { ... 1.1.1.0/30; /* foobar1, foobar2, foobar3, foo bar4 */ b/c of the line splitting at 80 chars. this method will split the comments at word breaks and make the previous example look like source-address { .... 1.1.1.0/30; /* foobar1, foobar2, foobar3, ** foobar4 */ much cleaner. """ rval = [] if self.noverbose: return rval # indentation, for multi-line comments, ensures that subsquent lines # are correctly alligned with the first line of the comment. indentation = 0 if exclude: # len('1.1.1.1/32 except;') == 21 indentation = 21 + self._DEFAULT_INDENT else: # len('1.1.1.1/32;') == 14 indentation = 14 + self._DEFAULT_INDENT # length_eol is the width of the line; b/c of the addition of the space # and the /* characters, it needs to be a little less than the actual width # to keep from wrapping length_eol = 77 - indentation if isinstance(addr, (nacaddr.IPv4, nacaddr.IPv6, summarizer.DSMNet)): if addr.text: if line_length == 0: # line_length of 0 means that we don't want to truncate the comment. line_length = len(addr.text) # There should never be a /* or */, but be safe and ignore those # comments if addr.text.find('/*') >= 0 or addr.text.find('*/') >= 0: logging.debug('Malformed comment [%s] ignoring', addr.text) else: text = addr.text[:line_length] comment = ' /*' while text: # split the line if len(text) > length_eol: new_length_eol = text[:length_eol].rfind(' ') if new_length_eol <= 0: new_length_eol = length_eol else: new_length_eol = length_eol # what line am I gunna output? line = comment + ' ' + text[:new_length_eol].strip() # truncate what's left text = text[new_length_eol:] # setup the comment and indentation for the next go-round comment = ' ' * indentation + '**' rval.append(line) rval[-1] += ' */' else: # should we be paying attention to any other addr type? logging.debug('Ignoring non IPv4 or IPv6 address: %s', addr) return rval def _Group(self, group, lc=True): """If 1 item return it, else return [ item1 item2 ]. Args: group: a list. could be a list of strings (protocols) or a list of tuples (ports) lc: return a lower cased result for text. Default is True. Returns: rval: a string surrounded by '[' and '];' if len(group) > 1 or with just ';' appended if len(group) == 1 """ def _FormattedGroup(el, lc=True): """Return the actual formatting of an individual element. Args: el: either a string (protocol) or a tuple (ports) lc: return lower cased result for text. Default is True. Returns: string: either the lower()'ed string or the ports, hyphenated if they're a range, or by itself if it's not. """ if isinstance(el, str) or isinstance(el, str): if lc: return el else: return el.lower() elif isinstance(el, int): return str(el) # type is a tuple below here elif el[0] == el[1]: return '%d' % el[0] else: return '%d-%d' % (el[0], el[1]) if len(group) > 1: rval = '[ ' + ' '.join([_FormattedGroup(x) for x in group]) + ' ];' else: rval = _FormattedGroup(group[0]) + ';' return rval class Juniper(aclgenerator.ACLGenerator): """JCL rendering class. This class takes a policy object and renders the output into a syntax which is understood by juniper routers. Attributes: pol: policy.Policy object """ _PLATFORM = 'juniper' _DEFAULT_PROTOCOL = 'ip' _SUPPORTED_AF = frozenset(('inet', 'inet6', 'bridge', 'mixed')) _TERM = Term SUFFIX = '.jcl' def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'address', 'restrict_address_family', 'counter', 'decapsulate', 'destination_prefix', 'destination_prefix_except', 'dscp_except', 'dscp_match', 'dscp_set', 'encapsulate', 'ether_type', 'filter_term', 'flexible_match_range', 'forwarding_class', 'forwarding_class_except', 'fragment_offset', 'hop_limit', 'icmp_code', 'logging', 'loss_priority', 'next_ip', 'owner', 'packet_length', 'policer', 'port', 'port_mirror', 'precedence', 'protocol_except', 'qos', 'routing_instance', 'source_prefix', 'source_prefix_except', 'traffic_type', 'traffic_class_count', 'ttl'} supported_sub_tokens.update({ 'option': { 'established', 'first-fragment', 'is-fragment', # TODO(sneakywombat): add all options to lex. '.*', # make ArbitraryOptions work, yolo. 'sample', 'tcp-established', 'tcp-initial', 'inactive'} }) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.juniper_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) # Check for the position independent options and remove them from # the list. interface_specific = 'not-interface-specific' not in filter_options[1:] enable_dsmo = 'enable_dsmo' in filter_options[1:] noverbose = 'noverbose' in filter_options[1:] filter_enhanced_mode = 'filter_enhanced_mode' in filter_options[1:] filter_direction = None if 'ingress' in filter_options[1:]: filter_direction = 'ingress' elif 'egress' in filter_options[1:]: filter_direction = 'egress' interface_type = None if 'physical' in filter_options[1:]: interface_type = 'physical' elif 'loopback' in filter_options[1:]: interface_type = 'loopback' if not interface_specific: filter_options.remove('not-interface-specific') if enable_dsmo: filter_options.remove('enable_dsmo') if filter_enhanced_mode: filter_options.remove('filter_enhanced_mode') # default to inet4 filters filter_type = 'inet' if len(filter_options) > 1: filter_type = filter_options[1] if filter_type == 'mixed': filter_types_to_process = ['inet', 'inet6'] else: filter_types_to_process = [filter_type] for filter_type in filter_types_to_process: filter_name_suffix = '' # If mixed filter_type, will append 4 or 6 to the filter name if len(filter_types_to_process) > 1: if filter_type == 'inet': filter_name_suffix = '4' if filter_type == 'inet6': filter_name_suffix = '6' term_names = set() new_terms = [] for term in terms: # Ignore if the term is for a different AF if term.restrict_address_family and term.restrict_address_family != filter_type: continue # if inactive is set, deactivate the term and remove the option. if 'inactive' in term.option: term.inactive = True term.option.remove('inactive') term.name = self.FixTermLength(term.name) if term.name in term_names: raise JuniperDuplicateTermError('You have multiple terms named: %s' % term.name) term_names.add(term.name) term = self.FixHighPorts(term, af=filter_type) if not term: continue if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if 'is-fragment' in term.option and filter_type == 'inet6': raise JuniperFragmentInV6Error('The term %s uses "is-fragment" but ' 'is a v6 policy.' % term.name) new_terms.append(self._TERM(term, filter_type, enable_dsmo, noverbose, filter_direction, interface_type)) self.juniper_policies.append((header, filter_name + filter_name_suffix, filter_type, interface_specific, filter_enhanced_mode, new_terms)) def __str__(self): config = Config() for (header, filter_name, filter_type, interface_specific, filter_enhanced_mode, terms ) in self.juniper_policies: # add the header information config.Append('firewall {') config.Append('family %s {' % filter_type) config.Append('/*') # we want the acl to contain id and date tags, but p4 will expand # the tags here when we submit the generator, so we have to trick # p4 into not knowing these words. like taking c-a-n-d-y from a # baby. for line in aclgenerator.AddRepositoryTags('** '): config.Append(line) config.Append('**') for comment in header.comment: for line in comment.split('\n'): config.Append('** ' + line) config.Append('*/') config.Append('replace: filter %s {' % filter_name) if interface_specific: config.Append('interface-specific;') if filter_enhanced_mode: config.Append('enhanced-mode;') for term in terms: term_str = str(term) if term_str: config.Append(term_str, verbatim=True) config.Append('}') # filter { ... } config.Append('}') # family inet { ... } config.Append('}') # firewall { ... } return str(config) + '\n' capirca-2.0.9/capirca/lib/juniperevo.py000066400000000000000000000110151437377527500200560ustar00rootroot00000000000000# Copyright 2022 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Juniper EVO generator. This is a subclass of Juniper generator. Juniper EVO software (Junos EVO) uses the same syntax as regular Juniper (Junos) ACLs, with minor differences. This subclass effects those differences. """ from capirca.lib import aclgenerator from capirca.lib import juniper class Term(juniper.Term): """Single Juniper EVO term representation.""" _PLATFORM = 'juniperevo' _INGRESS = 'ingress' _EGRESS = 'egress' _INET6 = 'inet6' _PROTOCOL = 'protocol' _PROTOCOL_EXCEPT = 'protocol-except' _NEXT_HEADER = 'next-header' _NEXT_HEADER_EXCEPT = 'next-header-except' _PAYLOAD_PROTOCOL = 'payload-protocol' _PAYLOAD_PROTOCOL_EXCEPT = 'payload-protocol-except' def __str__(self): self._Ipv6ProtocolMatch() term_config = super().__str__() # Reset to original syntax. self._TERM_TYPE[self._INET6][self._PROTOCOL] = self._NEXT_HEADER self._TERM_TYPE[self._INET6][ self._PROTOCOL_EXCEPT] = self._NEXT_HEADER_EXCEPT return term_config def _Ipv6ProtocolMatch(self): """Use the correct syntax to match protocols after the IPv6 header. Refer to juniperevo.md in documentation for matching syntax. Returns: None Raises: FilterDirectionError: If a direction is not provided for the filter e.g. ingress or egress """ self.extension_headers = ['hop-by-hop', 'fragment'] # 'hopopt' is renamed to 'hop-by-hop' in juniper base class, add an # additional key with the same protocol number to aid renaming. self.PROTO_MAP['hop-by-hop'] = 0 if self.term_type == self._INET6: if self.filter_direction != self._INGRESS and self.filter_direction != self._EGRESS: raise FilterDirectionError('a direction must be specified for Junos ' 'EVO IPv6 filter; this is required to ' 'render the correct syntax when matching ' 'protocols headers that follow the IPv6 ' 'header') # Default to rendering filter for physical interfaces. if self.interface_type is None: self.interface_type = 'physical' # Ingress filter. if self.filter_direction == self._INGRESS: if self.interface_type == 'physical': if not any(header in self.term.protocol for header in self.extension_headers): self._TERM_TYPE[self._INET6][ self._PROTOCOL] = self._PAYLOAD_PROTOCOL if not any(header in self.term.protocol_except for header in self.extension_headers): self._TERM_TYPE[self._INET6][ self._PROTOCOL_EXCEPT] = self._PAYLOAD_PROTOCOL_EXCEPT if self.interface_type == 'loopback': self._TERM_TYPE[self._INET6][self._PROTOCOL] = self._PAYLOAD_PROTOCOL self._TERM_TYPE[self._INET6][ self._PROTOCOL_EXCEPT] = self._PAYLOAD_PROTOCOL_EXCEPT self.term.protocol = aclgenerator.ProtocolNameToNumber( self.term.protocol, self.extension_headers, self.PROTO_MAP) self.term.protocol_except = aclgenerator.ProtocolNameToNumber( self.term.protocol_except, self.extension_headers, self.PROTO_MAP) # Egress filter. if self.filter_direction == self._EGRESS: self._TERM_TYPE[self._INET6][self._PROTOCOL] = self._PAYLOAD_PROTOCOL self._TERM_TYPE[self._INET6][ self._PROTOCOL_EXCEPT] = self._PAYLOAD_PROTOCOL_EXCEPT self.term.protocol = aclgenerator.ProtocolNameToNumber( self.term.protocol, self.extension_headers, self.PROTO_MAP) self.term.protocol_except = aclgenerator.ProtocolNameToNumber( self.term.protocol_except, self.extension_headers, self.PROTO_MAP) class JuniperEvo(juniper.Juniper): """Juniper EVO generator.""" _PLATFORM = 'juniperevo' SUFFIX = '.evojcl' _TERM = Term class Error(Exception): pass class FilterDirectionError(Error): pass capirca-2.0.9/capirca/lib/junipermsmpc.py000066400000000000000000000650211437377527500204120ustar00rootroot00000000000000# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Juniper MS-MPC generator for capirca.""" import datetime import logging from capirca.lib import aclgenerator from capirca.lib import juniper from capirca.lib import nacaddr import six MAX_IDENTIFIER_LEN = 55 # It is really 63, but leaving room for added chars class Term(juniper.Term): """Representation of an individual Juniper MS-MPC term. The __str__ method must be implemented. Args: term policy.Term object """ _PLATFORM = 'msmpc' _DEFAULT_INDENT = 20 _ACTIONS = {'accept': 'accept', 'deny': 'discard', 'reject': 'reject'} # msmpc supports a limited number of protocol names # https://www.juniper.net/documentation/us/en/software/junos/security-policies/topics/ref/statement/applications-edit-protocol.html _SUPPORTED_PROTOCOL_NAMES = ( 'ah', 'egp', 'esp', 'gre', 'icmp', 'icmpv6', 'igmp', 'ipip', #'node', A pseudo-protocol which may require additional handling 'ospf', 'pim', 'rsvp', 'sctp', 'tcp', 'udp') def __init__(self, term, term_type, noverbose, filter_name): enable_dsmo = False super().__init__(term, term_type, enable_dsmo, noverbose) self.term = term self.term_type = term_type self.noverbose = noverbose self.filter_name = filter_name for prot in self.term.protocol: if prot not in self._SUPPORTED_PROTOCOL_NAMES: loc = self.term.protocol.index(prot) self.term.protocol[loc] = str(self.PROTO_MAP.get(prot, prot)) def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' if self.enable_dsmo: raise NotImplementedError('enable_dsmo not implemented for msmpc') ret_str = juniper.Config(indent=self._DEFAULT_INDENT) # COMMENTS # this deals just fine with multi line comments, but we could probably # output them a little cleaner; do things like make sure the # len(output) < 80, etc. Note, if 'noverbose' is set for the filter, skip # all comment processing. if not self.noverbose: if self.term.owner: self.term.comment.append('Owner: %s' % self.term.owner) if self.term.comment: ret_str.Append('/*') for comment in self.term.comment: for line in comment.split('\n'): ret_str.Append('** ' + line) ret_str.Append('*/') # Term verbatim output - this will skip over normal term creation # code. Warning generated from policy.py if appropriate. if self.term.verbatim: for next_term in self.term.verbatim: if next_term[0] == self._PLATFORM: ret_str.Append(str(next_term[1]), verbatim=True) return str(ret_str) # Determine whether there are any match conditions for the term. has_match_criteria = ( self.term.address or self.term.dscp_except or self.term.dscp_match or self.term.destination_address or self.term.destination_port or self.term.destination_prefix or self.term.destination_prefix_except or self.term.encapsulate or self.term.ether_type or self.term.flexible_match_range or self.term.forwarding_class or self.term.forwarding_class_except or self.term.fragment_offset or self.term.hop_limit or self.term.next_ip or self.term.port or self.term.precedence or self.term.protocol or self.term.protocol_except or self.term.source_address or self.term.source_port or self.term.source_prefix or self.term.source_prefix_except or self.term.traffic_type or self.term.ttl) suffixes = [] duplicate_term = False has_icmp = 'icmp' in self.term.protocol has_icmpv6 = 'icmpv6' in self.term.protocol has_v4_ip = self.term.GetAddressOfVersion( 'source_address', self.AF_MAP.get('inet')) or self.term.GetAddressOfVersion( 'source_address_exclude', self.AF_MAP.get('inet')) or self.term.GetAddressOfVersion( 'destination_address', self.AF_MAP.get('inet')) or self.term.GetAddressOfVersion( 'destination_address_exclude', self.AF_MAP.get('inet')) has_v6_ip = self.term.GetAddressOfVersion( 'source_address', self.AF_MAP.get('inet6')) or self.term.GetAddressOfVersion( 'source_address_exclude', self.AF_MAP.get('inet6')) or self.term.GetAddressOfVersion( 'destination_address', self.AF_MAP.get('inet6')) or self.term.GetAddressOfVersion( 'destination_address_exclude', self.AF_MAP.get('inet6')) if self.term_type == 'mixed': if not (has_v4_ip or has_v6_ip): suffixes = ['inet'] elif not has_v6_ip: suffixes = ['inet'] elif not has_v4_ip: suffixes = ['inet6'] else: suffixes = ['inet', 'inet6'] duplicate_term = True if not suffixes and self.term_type in ['inet', 'inet6']: suffixes = [self.term_type] for suffix in suffixes: if self.term_type == 'mixed' and (not (has_icmp and has_icmpv6)) and ( has_v4_ip and has_v6_ip): if (has_icmp and suffix != 'inet') or (has_icmpv6 and suffix != 'inet6'): continue source_address = self.term.GetAddressOfVersion('source_address', self.AF_MAP.get(suffix)) source_address_exclude = self.term.GetAddressOfVersion( 'source_address_exclude', self.AF_MAP.get(suffix)) source_address, source_address_exclude = self._MinimizePrefixes( source_address, source_address_exclude) destination_address = self.term.GetAddressOfVersion( 'destination_address', self.AF_MAP.get(suffix)) destination_address_exclude = self.term.GetAddressOfVersion( 'destination_address_exclude', self.AF_MAP.get(suffix)) destination_address, destination_address_exclude = self._MinimizePrefixes( destination_address, destination_address_exclude) if ((not source_address) and self.term.GetAddressOfVersion( 'source_address', self.AF_MAP.get('mixed')) and not source_address_exclude) or ( (not destination_address) and self.term.GetAddressOfVersion( 'destination_address', self.AF_MAP.get('mixed')) and not destination_address_exclude): continue if ((has_icmpv6 and not has_icmp and suffix == 'inet') or (has_icmp and not has_icmpv6 and suffix == 'inet6')) and self.term_type != 'mixed': logging.debug( self.NO_AF_LOG_PROTO.substitute( term=self.term.name, proto=', '.join(self.term.protocol), af=suffix)) return '' # NAME # if the term is inactive we have to set the prefix if self.term.inactive: term_prefix = 'inactive:' else: term_prefix = '' ret_str.Append( '%s term %s%s {' % (term_prefix, self.term.name, '-' + suffix if duplicate_term else '')) # We only need a "from {" clause if there are any conditions to match. if has_match_criteria: ret_str.Append('from {') # SOURCE ADDRESS if source_address or source_address_exclude: ret_str.Append('source-address {') if source_address: for saddr in source_address: for comment in self._Comment(saddr): ret_str.Append('%s' % comment) if saddr.version == 6 and 0 < saddr.prefixlen < 16: for saddr2 in saddr.subnets(new_prefix=16): ret_str.Append('%s;' % saddr2) else: if saddr == nacaddr.IPv6('0::0/0'): saddr = 'any-ipv6' elif saddr == nacaddr.IPv4('0.0.0.0/0'): saddr = 'any-ipv4' ret_str.Append('%s;' % saddr) # SOURCE ADDRESS EXCLUDE if source_address_exclude: for ex in source_address_exclude: for comment in self._Comment(ex): ret_str.Append('%s' % comment) if ex.version == 6 and 0 < ex.prefixlen < 16: for ex2 in ex.subnets(new_prefix=16): ret_str.Append('%s except;' % ex2) else: if ex == nacaddr.IPv6('0::0/0'): ex = 'any-ipv6' elif ex == nacaddr.IPv4('0.0.0.0/0'): ex = 'any-ipv4' ret_str.Append('%s except;' % ex) ret_str.Append('}') # source-address {...} # DESTINATION ADDRESS if destination_address or destination_address_exclude: ret_str.Append('destination-address {') if destination_address: for daddr in destination_address: for comment in self._Comment(daddr): ret_str.Append('%s' % comment) if daddr.version == 6 and 0 < daddr.prefixlen < 16: for daddr2 in daddr.subnets(new_prefix=16): ret_str.Append('%s;' % daddr2) else: if daddr == nacaddr.IPv6('0::0/0'): daddr = 'any-ipv6' elif daddr == nacaddr.IPv4('0.0.0.0/0'): daddr = 'any-ipv4' ret_str.Append('%s;' % daddr) # DESTINATION ADDRESS EXCLUDE if destination_address_exclude: for ex in destination_address_exclude: for comment in self._Comment(ex): ret_str.Append('%s' % comment) if ex.version == 6 and 0 < ex.prefixlen < 16: for ex2 in ex.subnets(new_prefix=16): ret_str.Append('%s except;' % ex2) else: if ex == nacaddr.IPv6('0::0/0'): ex = 'any-ipv6' elif ex == nacaddr.IPv4('0.0.0.0/0'): ex = 'any-ipv4' ret_str.Append('%s except;' % ex) ret_str.Append('}') # destination-address {...} # source prefix list if self.term.source_prefix or self.term.source_prefix_except: for pfx in self.term.source_prefix: ret_str.Append('source-prefix-list ' + pfx + ';') for epfx in self.term.source_prefix_except: ret_str.Append('source-prefix-list ' + epfx + ' except;') # destination prefix list if self.term.destination_prefix or self.term.destination_prefix_except: for pfx in self.term.destination_prefix: ret_str.Append('destination-prefix-list ' + pfx + ';') for epfx in self.term.destination_prefix_except: ret_str.Append('destination-prefix-list ' + epfx + ' except;') # APPLICATION if (self.term.source_port or self.term.destination_port or self.term.icmp_type or self.term.protocol): if hasattr(self.term, 'replacement_application_name'): ret_str.Append('application-sets ' + self.term.replacement_application_name + '-app;') else: ret_str.Append('application-sets ' + self.filter_name[:((MAX_IDENTIFIER_LEN) // 2)] + self.term.name[-((MAX_IDENTIFIER_LEN) // 2):] + '-app;') ret_str.Append('}') # from {...} ret_str.Append('then {') # ACTION for action in self.term.action: ret_str.Append(self._ACTIONS.get(str(action)) + ';') if self.term.logging and 'disable' not in [ x.value for x in self.term.logging ]: ret_str.Append('syslog;') ret_str.Append('}') # then {...} ret_str.Append('}') # term {...} return str(ret_str) class JuniperMSMPC(aclgenerator.ACLGenerator): """Juniper MSMPC rendering class. This class takes a policy object and renders output into a syntax which is understood ny Juniper routers with MS-MPC cards. Args: pol: policy.Policy object """ _PLATFORM = 'msmpc' SUFFIX = '.msmpc' _SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed')) _AF_MAP = {'inet': 4, 'inet6': 6, 'mixed': None} _AF_ICMP_MAP = {'icmp': 'inet', 'icmpv6': 'inet6'} _SUPPORTED_DIRECTION = { '': 'input-output', 'ingress': 'input', 'egress': 'output', } _OPTIONAL_SUPPORTED_KEYWORDS = frozenset([ 'expiration', ]) def __init__(self, pol, exp_info): self.applications = {} super().__init__(pol, exp_info) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= { 'destination_prefix', 'destination_prefix_except', 'icmp_code', 'logging', 'owner', 'source_prefix', 'source_prefix_except' } supported_sub_tokens.update({ 'option': { 'established', # TODO(sneakywombat): add all options to lex. '.*', # make ArbitraryOptions work, yolo. 'tcp-established', 'inactive' } }) return supported_tokens, supported_sub_tokens def _BuildPort(self, ports): """Transform specified ports into list and ranges. Args: ports: a policy terms list of ports Returns: port_list: list of ports and port ranges """ port_list = [] for p in ports: if p[0] == p[1]: port_list.append(str(p[0])) else: port_list.append('%s-%s' % (str(p[0]), str(p[1]))) return port_list def _GenerateApplications(self, filter_name): target = [] apps_set_list = [] target.append('applications {') done_apps = [] for app in sorted(self.applications[filter_name], key=lambda x: x['name']): app_list = [] if app in done_apps: continue if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']: # generate ICMP statements if app['icmp-type']: if app['timeout']: timeout = app['timeout'] else: timeout = 60 num_terms = len(app['protocol']) * len(app['icmp-type']) apps_set_list.append('application-set ' + app['name'] + '-app {') for i in range(num_terms): apps_set_list.append('application ' + app['name'] + '-app%d' % (i + 1) + ';') apps_set_list.append('}') # application-set {...} term_counter = 0 for i, code in enumerate(app['icmp-type']): for proto in app['protocol']: target.append('application ' + app['name'] + '-app%d' % (term_counter + 1) + ' {') if proto == 'icmp': target.append('application-protocol %s;' % proto) target.append('protocol %s;' % proto) target.append('%s-type %s;' % (proto, str(code))) if app['icmp-code']: target.append('%s-code %s;' % (proto, self._Group(app['icmp-code']))) if int(timeout): target.append('inactivity-timeout %s;' % int(timeout)) target.append('}') # application {...} term_counter += 1 # generate non-ICMP statements else: i = 1 apps_set_list.append('application-set ' + app['name'] + '-app {') for proto in app['protocol'] or ['']: for sport in app['sport'] or ['']: for dport in app['dport'] or ['']: chunks = [] if proto: chunks.append('protocol %s;' % proto) if sport and ('udp' in proto or 'tcp' in proto): chunks.append('source-port %s;' % sport) if dport and ('udp' in proto or 'tcp' in proto): chunks.append('destination-port %s;' % dport) if app['timeout']: chunks.append(' inactivity-timeout %d;' % int(app['timeout'])) if chunks: apps_set_list.append('application ' + app['name'] + '-app%d;' % i) app_list.append('application ' + app['name'] + '-app%d {' % i) for chunk in chunks: app_list.append(chunk) app_list.append('}') i += 1 apps_set_list.append('}') done_apps.append(app) if app_list: for item in app_list: target.append(item) for item in apps_set_list: target.append(item) target.append('}') # Return the output only if there is content inside of # the "applications {\n}" lines, otherwise return nothing. if len(target) > 2: return target else: return [] def _TranslatePolicy(self, pol, exp_info): current_date = datetime.date.today() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) self.junipermsmpc_policies = [] for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) filter_options.remove(filter_name) filter_direction = None filter_type = None noverbose = 'noverbose' in filter_options self.applications[filter_name] = [] if noverbose: # noverbose is a strict boolean, remove it # prior to iterating through the other options # that require additional processing. filter_options.remove('noverbose') for filter_opt in filter_options: # validate address families if filter_opt in self._SUPPORTED_AF: if not filter_type: filter_type = filter_opt continue else: raise ConflictingTargetOptionsError( 'only one address family can be ' 'specified per header "%s"' % ' '.join(filter_options)) # validate direction if filter_opt in self._SUPPORTED_DIRECTION.keys(): if not filter_direction: filter_direction = self._SUPPORTED_DIRECTION.get(filter_opt) continue else: raise ConflictingTargetOptionsError('only one direction can be ' 'specified per header "%s"' % ' '.join(filter_options)) raise UnsupportedHeaderError( 'MSMPC Generator currently does not support ' '%s as a header option "%s"' % (filter_opt, ' '.join(filter_options))) if not filter_direction: filter_direction = self._SUPPORTED_DIRECTION.get('') if not filter_type: filter_type = 'mixed' term_names = set() new_terms = [] for term in terms: # Application sets need to be unique system-wide, so we construct # a name from a combination of the filter and term names, shortening # to the roughly half of the max identifier length for each part. # When shortening, we take the start of the filter name and the end of # the term name in a hope that we omit the most common bits # like -inbound and accept-. modified_term_name = filter_name[:( (MAX_IDENTIFIER_LEN) // 2)] + term.name[-( (MAX_IDENTIFIER_LEN) // 2):] if term.stateless_reply: logging.warning( 'WARNING: Term %s is a stateless reply term and will not be ' 'rendered.', term.name) continue if set(['established', 'tcp-established']).intersection(term.option): logging.debug( 'Skipping established term %s because MSMPC is stateful.', term.name) continue # if inactive is set, deactivate the term and remove the option. if 'inactive' in term.option: term.inactive = True term.option.remove('inactive') if term.name in term_names: raise JuniperMSMPCFilterError('Duplicate term name') term_names.add(term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info( 'INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue new_term = Term(term, filter_type, noverbose, filter_name) new_terms.append(new_term) # Because MSMPC terms can contain inet and inet6 addresses. We have to # have ability to recover proper AF for ICMP type we need. # If protocol is empty or we cannot map to inet or inet6 we insert bogus # af_type name which will cause new_term.NormalizeIcmpTypes to fail. if not term.protocol: icmp_af_type = 'unknown_af_icmp' else: icmp_af_type = self._AF_ICMP_MAP.get(term.protocol[0], 'unknown_af_icmp') tmp_icmptype = new_term.NormalizeIcmpTypes(term.icmp_type, term.protocol, icmp_af_type) # NormalizeIcmpTypes returns [''] for empty, convert to [] for eval normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else [] # rewrites the protocol icmpv6 to icmp6 if 'icmpv6' in term.protocol: protocol = list(term.protocol) protocol[protocol.index('icmpv6')] = 'icmp6' else: protocol = term.protocol # MSMPC requires tcp and udp to specify ports, rather than imply all # ports if 'udp' in term.protocol or 'tcp' in term.protocol: if not term.source_port and not term.destination_port: term.destination_port = [[1, 65535]] new_application_set = { 'sport': self._BuildPort(term.source_port), 'dport': self._BuildPort(term.destination_port), 'protocol': protocol, 'icmp-type': normalized_icmptype, 'icmp-code': term.icmp_code, 'timeout': term.timeout } for application_set in self.applications[filter_name]: if all( item in list(application_set.items()) for item in new_application_set.items()): new_application_set = '' term.replacement_application_name = application_set['name'] break if (modified_term_name == application_set['name'] and new_application_set != application_set): raise ConflictingApplicationSetsError( 'Application set %s has a conflicting entry' % modified_term_name) if new_application_set: new_application_set['name'] = modified_term_name self.applications[filter_name].append(new_application_set) self.junipermsmpc_policies.append( (header, filter_name, filter_direction, new_terms)) def _Group(self, group, lc=True): """If 1 item return it, else return [ item1 item2 ]. Args: group: a list. could be a list of strings (protocols) or a list of tuples (ports) lc: return a lower cased result for text. Default is True. Returns: rval: a string surrounded by '[' and '];' if len(group) > 1 or with just ';' appended if len(group) == 1 """ def _FormattedGroup(el, lc=True): """Return the actual formatting of an individual element. Args: el: either a string (protocol) or a tuple (ports) lc: return lower cased result for text. Default is True. Returns: string: either the lower()'ed string or the ports, hyphenated if they're a range, or by itself if it's not. """ if isinstance(el, str): if not lc: return el else: return el.lower() elif isinstance(el, int): return str(el) # type is a tuple below here elif el[0] == el[1]: return '%d' % el[0] else: return '%d-%d' % (el[0], el[1]) if len(group) > 1: rval = '[ ' + ' '.join([_FormattedGroup(x, lc=lc) for x in group]) + ' ];' else: rval = _FormattedGroup(group[0], lc=lc) + ';' return rval def __str__(self): target = juniper.Config() for (header, filter_name, filter_direction, terms) in self.junipermsmpc_policies: target.Append('groups {') target.Append('replace:') target.Append('/*') # we want the acl to contain id and date tags, but p4 will expand # the tags here when we submit the generator, so we have to trick # p4 into not knowing these words. like taking c-a-n-d-y from a # baby. for line in aclgenerator.AddRepositoryTags('** '): target.Append(line) target.Append('**') for comment in header.comment: for line in comment.split('\n'): target.Append('** ' + line) target.Append('*/') target.Append('%s {' % filter_name) target.Append('services {') target.Append('stateful-firewall {') target.Append('rule %s {' % filter_name) target.Append('match-direction %s;' % filter_direction) for term in terms: term_str = str(term) if term_str: target.Append(term_str, verbatim=True) target.Append('}') # rule { ... } target.Append('}') # stateful-firewall { ... } target.Append('}') # services { ... } for line in self._GenerateApplications(filter_name): target.Append(line) target.Append('}') # filter_name { ... } target.Append('}') # groups { ... } target.Append('apply-groups %s;' % filter_name) return str(target) + '\n' class Error(Exception): pass class JuniperMSMPCFilterError(Error): pass class ConflictingApplicationSetsError(Error): pass class ConflictingTargetOptionsError(Error): pass class UnsupportedHeaderError(Error): pass capirca-2.0.9/capirca/lib/junipersrx.py000066400000000000000000001031051437377527500201030ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """SRX generator.""" # pylint: disable=super-init-not-called import collections import copy import datetime import itertools from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr import six ICMP_TERM_LIMIT = 8 def JunipersrxList(name, data): return '%s [ %s ];' % (name, ' '.join(data)) class Error(Exception): """generic error class.""" class UnsupportedFilterError(Error): pass class UnsupportedHeaderError(Error): pass class SRXDuplicateTermError(Error): pass class SRXVerbatimError(Error): pass class SRXOptionError(Error): pass class MixedAddrBookTypesError(Error): pass class ConflictingTargetOptionsError(Error): pass class ConflictingApplicationSetsError(Error): pass class IndentList(list): def __init__(self, indent, *args, **kwargs): self._indent = indent super().__init__(*args, **kwargs) def IndentAppend(self, size, data): self.append('%s%s' % (self._indent * size, data)) class Term(aclgenerator.Term): """Representation of an individual SRX term. This is mostly useful for the __str__() method. Args: obj: a policy.Term object filter_options: list of remaining target options (zones) """ ACTIONS = {'accept': 'permit', 'deny': 'deny', 'reject': 'reject', 'count': 'count', 'log': 'log', 'expresspath': 'services-offload', 'dscp': 'dscp'} def __init__(self, term, from_zone, to_zone, expresspath=False, verbose=True): super().__init__(term) self.term = term self.from_zone = from_zone self.to_zone = to_zone self.verbose = verbose if expresspath: self.term.action = [ a.replace('accept', 'expresspath') for a in self.term.action] def __str__(self): """Render config output from this term object.""" ret_str = IndentList(JuniperSRX.INDENT) # COMMENTS comment_max_width = 68 if self.term.owner and self.verbose: self.term.comment.append('Owner: %s' % self.term.owner) comments = aclgenerator.WrapWords(self.term.comment, comment_max_width) if comments and comments[0] and self.verbose: ret_str.IndentAppend(3, '/*') for line in comments: ret_str.IndentAppend(3, line) ret_str.IndentAppend(3, '*/') ret_str.IndentAppend(3, 'policy ' + self.term.name + ' {') ret_str.IndentAppend(4, 'match {') # SOURCE-ADDRESS if self.term.source_address: saddr_check = set() for saddr in self.term.source_address: saddr_check.add(saddr.parent_token) saddr_check = sorted(saddr_check) ret_str.IndentAppend(5, JunipersrxList('source-address', saddr_check)) else: ret_str.IndentAppend(5, 'source-address any;') # DESTINATION-ADDRESS if self.term.destination_address: daddr_check = [] for daddr in self.term.destination_address: daddr_check.append(daddr.parent_token) daddr_check = set(daddr_check) daddr_check = list(daddr_check) daddr_check.sort() ret_str.IndentAppend(5, JunipersrxList('destination-address', daddr_check)) else: ret_str.IndentAppend(5, 'destination-address any;') # APPLICATION if (not self.term.source_port and not self.term.destination_port and not self.term.icmp_type and not self.term.protocol): ret_str.IndentAppend(5, 'application any;') else: if hasattr(self.term, 'replacement_application_name'): ret_str.IndentAppend(5, 'application ' + self.term.replacement_application_name + '-app;') else: ret_str.IndentAppend(5, 'application ' + self.term.name + '-app;') # DSCP MATCH if self.term.dscp_match: ret_str.IndentAppend(5, JunipersrxList('dscp', self.term.dscp_match)) # DSCP EXCEPT if self.term.dscp_except: ret_str.IndentAppend(5, JunipersrxList('dscp-except', self.term.dscp_except)) # SOURCE-ZONE if self.term.source_zone: szone_check = set() for szone in self.term.source_zone: szone_check.add(szone) szone_check = sorted(szone_check) ret_str.IndentAppend(5, JunipersrxList('from-zone', szone_check)) # DESTINATION-ZONE if self.term.destination_zone: dzone_check = set() for dzone in self.term.destination_zone: dzone_check.add(dzone) dzone_check = sorted(dzone_check) ret_str.IndentAppend(5, JunipersrxList('to-zone', dzone_check)) ret_str.IndentAppend(4, '}') # ACTIONS for action in self.term.action: ret_str.IndentAppend(4, 'then {') # VPN target can be only specified when ACTION is accept if str(action) == 'accept' and self.term.vpn: ret_str.IndentAppend(5, self.ACTIONS.get( str(action)) + ' {') ret_str.IndentAppend(6, 'tunnel {') ret_str.IndentAppend(7, 'ipsec-vpn %s;' % self.term.vpn[0]) if self.term.vpn[1]: ret_str.IndentAppend(7, 'pair-policy %s;' % self.term.vpn[1]) ret_str.IndentAppend(6, '}') ret_str.IndentAppend(5, '}') else: ret_str.IndentAppend(5, self.ACTIONS.get(str(action)) + ';') # DSCP SET if self.term.dscp_set: ret_str.IndentAppend(5, 'dscp ' + self.term.dscp_set + ';') # LOGGING if self.term.logging: ret_str.IndentAppend(5, 'log {') for log_target in self.term.logging: if str(log_target) == 'log-both': ret_str.IndentAppend(6, 'session-init;') ret_str.IndentAppend(6, 'session-close;') else: if str(action) == 'accept': ret_str.IndentAppend(6, 'session-close;') else: ret_str.IndentAppend(6, 'session-init;') ret_str.IndentAppend(5, '}') # COUNTER if self.term.counter: ret_str.IndentAppend(5, 'count;') ret_str.IndentAppend(4, '}') ret_str.IndentAppend(3, '}') return '\n'.join(ret_str) def _Group(self, group): """If 1 item return it, else return [ item1 item2 ]. Args: group: a list. could be a list of strings (protocols) or a list of tuples (ports) Returns: rval: a string surrounded by '[' and '];' if len(group) > 1 or with just ';' appended if len(group) == 1 """ def _FormattedGroup(el): """Return the actual formatting of an individual element. Args: el: either a string (protocol) or a tuple (ports) Returns: string: either the lower()'ed string or the ports, hyphenated if they're a range, or by itself if it's not. """ if isinstance(el, str): return el.lower() elif isinstance(el, int): return str(el) # type is a tuple below here elif el[0] == el[1]: return '%d' % el[0] else: return '%d-%d' % (el[0], el[1]) if len(group) > 1: rval = '[ ' + ' '.join([_FormattedGroup(x) for x in group]) + ' ];' else: rval = _FormattedGroup(group[0]) + ';' return rval class JuniperSRX(aclgenerator.ACLGenerator): """SRX rendering class. This class takes a policy object and renders the output into a syntax which is understood by SRX firewalls. Args: pol: policy.Policy object """ _PLATFORM = 'srx' SUFFIX = '.srx' _SUPPORTED_AF = set(('inet', 'inet6', 'mixed')) _ZONE_ADDR_BOOK = 'address-book-zone' _GLOBAL_ADDR_BOOK = 'address-book-global' _ADDRESSBOOK_TYPES = set((_ZONE_ADDR_BOOK, _GLOBAL_ADDR_BOOK)) _EXPRESSPATH = 'expresspath' _NOVERBOSE = 'noverbose' _SUPPORTED_TARGET_OPTIONS = set((_ZONE_ADDR_BOOK, _GLOBAL_ADDR_BOOK, _EXPRESSPATH, _NOVERBOSE)) _AF_MAP = {'inet': (4,), 'inet6': (6,), 'mixed': (4, 6)} _AF_ICMP_MAP = {'icmp': 'inet', 'icmpv6': 'inet6'} INDENT = ' ' _MAX_HEADER_COMMENT_LENGTH = 71 # The SRX platform is limited in how many IP addresses can be used in # a single policy. _ADDRESS_LENGTH_LIMIT = 1023 # IPv6 are 32 bytes compared to IPv4, this is used as a multiplier. _IPV6_SIZE = 4 def __init__(self, pol, exp_info): self.srx_policies = [] self.addressbook = collections.OrderedDict() self.applications = [] self.ports = [] self.from_zone = '' self.to_zone = '' self.addr_book_type = set() super().__init__(pol, exp_info) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'counter', 'dscp_except', 'dscp_match', 'dscp_set', 'destination_zone', 'logging', 'option', 'owner', 'source_zone', 'timeout', 'verbatim', 'vpn'} supported_sub_tokens.update( {'action': {'accept', 'deny', 'reject', 'count', 'log', 'dscp'}, }) del supported_sub_tokens['option'] return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): # pylint: disable=attribute-defined-outside-init """Transform a policy object into a JuniperSRX object. Args: pol: policy.Policy object exp_info: print a info message when a term is set to expire in that many weeks Raises: UnsupportedFilterError: An unsupported filter was specified UnsupportedHeaderError: A header option exists that is not understood/usable SRXDuplicateTermError: Two terms were found with same name in same filter ConflictingTargetOptionsError: Two target options are conflicting in the header MixedAddrBookTypesError: Global and Zone address books in the same policy ConflictingApplicationSetsError: When two duplicate named terms have conflicting application entries """ current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) verbose = True if self._NOVERBOSE in filter_options[4:]: verbose = False # TODO(robankeny): Clean up option section. if (len(filter_options) < 4 or filter_options[0] != 'from-zone' or filter_options[2] != 'to-zone'): raise UnsupportedFilterError('SRX filter arguments must specify ' 'from-zone and to-zone.') # check if to-zone is not a supported target option if filter_options[1] in self._SUPPORTED_TARGET_OPTIONS: raise UnsupportedFilterError('to-zone %s cannot be the same as any ' 'valid SRX target-options' % (filter_options[1])) else: self.from_zone = filter_options[1] # check if from-zone is not a supported target option if filter_options[3] in self._SUPPORTED_TARGET_OPTIONS: raise UnsupportedFilterError('from-zone %s cannot be the same as any ' 'valid SRX target-options' % (filter_options[3])) else: self.to_zone = filter_options[3] # check if source-zone & destination-zone are only used with global policy if filter_options[1] != 'all' or filter_options[3] != 'all': for term in terms: if term.source_zone or term.destination_zone: raise UnsupportedFilterError('Term %s has either source-zone or ' 'destination-zone which can only be ' 'used with global policy' % term.name) # variables used to collect target-options and set defaults filter_type = '' # parse srx target options extra_options = filter_options[4:] if self._ADDRESSBOOK_TYPES.issubset(extra_options): raise ConflictingTargetOptionsError( 'only one address-book-type can ' 'be specified per header "%s"' % ' '.join(filter_options)) else: address_book_type = set( [self._ZONE_ADDR_BOOK, self._GLOBAL_ADDR_BOOK]).intersection(extra_options) if not address_book_type: address_book_type = {self._GLOBAL_ADDR_BOOK} self.addr_book_type.update(address_book_type) if len(self.addr_book_type) > 1: raise MixedAddrBookTypesError( 'Global and Zone address-book-types cannot ' 'be used in the same policy') if self.from_zone == 'all' and self.to_zone == 'all': if self._ZONE_ADDR_BOOK in self.addr_book_type: raise UnsupportedFilterError('Zone address books cannot be used ' 'with a global policy.') elif self.from_zone == 'all' or self.to_zone == 'all': raise UnsupportedFilterError('The zone name all is reserved for ' 'global policies.') if self._EXPRESSPATH in filter_options[4:]: self.expresspath = True else: self.expresspath = False for filter_opt in filter_options[4:]: # validate address families if filter_opt in self._SUPPORTED_AF: if not filter_type: filter_type = filter_opt else: raise ConflictingTargetOptionsError( 'only one address family can be ' 'specified per header "%s"' % ' '.join(filter_options)) elif filter_opt in self._SUPPORTED_TARGET_OPTIONS: continue else: raise UnsupportedHeaderError( 'SRX Generator currently does not support ' '%s as a header option "%s"' % (filter_opt, ' '.join(filter_options))) # if address-family and address-book-type have not been set then default if not filter_type: filter_type = 'mixed' term_dup_check = set() new_terms = [] self._FixLargePolices(terms, filter_type) for term in terms: # Only generate the term if it's for the appropriate platform. if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue if term.stateless_reply: logging.warning( "WARNING: Term %s in policy %s>%s is a stateless reply " "term and will not be rendered.", term.name, self.from_zone, self.to_zone) continue if set(['established', 'tcp-established']).intersection(term.option): logging.debug('Skipping established term %s because SRX is stateful.', term.name) continue term.name = self.FixTermLength(term.name) if term.name in term_dup_check: raise SRXDuplicateTermError('You have a duplicate term: %s' % term.name) term_dup_check.add(term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s>%s expires ' 'in less than two weeks.', term.name, self.from_zone, self.to_zone) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s>%s is expired.', term.name, self.from_zone, self.to_zone) continue # SRX address books leverage network token names for IPs. # When excluding addresses, we lose those distinct names so we need # to create a new unique name based off the term name before excluding. if term.source_address_exclude: # If we have a naked source_exclude, we need something to exclude from if not term.source_address: term.source_address = [nacaddr.IP('0.0.0.0/0', term.name.upper(), term.name.upper())] # Use the term name as the token & parent_token new_src_parent_token = term.name.upper() + '_SRC_EXCLUDE' new_src_token = new_src_parent_token for i in term.source_address_exclude: term.source_address = nacaddr.RemoveAddressFromList( term.source_address, i) for j in term.source_address: j.token = new_src_token j.parent_token = new_src_parent_token if term.destination_address_exclude: if not term.destination_address: term.destination_address = [nacaddr.IP('0.0.0.0/0', term.name.upper(), term.name.upper())] new_dst_parent_token = term.name.upper() + '_DST_EXCLUDE' new_dst_token = new_dst_parent_token for i in term.destination_address_exclude: term.destination_address = nacaddr.RemoveAddressFromList( term.destination_address, i) for j in term.destination_address: j.token = new_dst_token j.parent_token = new_dst_parent_token # SRX policies are controlled by addresses that are used within, so # policy can be at the same time inet and inet6. if self._GLOBAL_ADDR_BOOK in self.addr_book_type: for zone in self.addressbook: for unused_name, ips in sorted( self.addressbook[zone].items()): ips = [i for i in ips] if term.source_address == ips: term.source_address = ips if term.destination_address == ips: term.destination_address = ips # Filter source_address based on filter_type & add to address book if term.source_address: valid_addrs = [] for addr in term.source_address: if addr.version in self._AF_MAP[filter_type]: valid_addrs.append(addr) if not valid_addrs: logging.warning( 'WARNING: Term %s has 0 valid source IPs, skipping.', term.name) continue term.source_address = valid_addrs for addr in term.source_address: self._BuildAddressBook(self.from_zone, addr) # Filter destination_address based on filter_type & add to address book if term.destination_address: valid_addrs = [] for addr in term.destination_address: if addr.version in self._AF_MAP[filter_type]: valid_addrs.append(addr) if not valid_addrs: logging.warning( 'WARNING: Term %s has 0 valid destination IPs, skipping.', term.name) continue term.destination_address = valid_addrs for addr in term.destination_address: self._BuildAddressBook(self.to_zone, addr) new_term = Term(term, self.from_zone, self.to_zone, self.expresspath, verbose) new_terms.append(new_term) # Because SRX terms can contain inet and inet6 addresses. We have to # have ability to recover proper AF for ICMP type we need. # If protocol is empty or we cannot map to inet or inet6 we insert bogus # af_type name which will cause new_term.NormalizeIcmpTypes to fail. if not term.protocol: icmp_af_type = 'unknown_af_icmp' else: icmp_af_type = self._AF_ICMP_MAP.get( term.protocol[0], 'unknown_af_icmp') tmp_icmptype = new_term.NormalizeIcmpTypes( term.icmp_type, term.protocol, icmp_af_type) # NormalizeIcmpTypes returns [''] for empty, convert to [] for eval normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else [] # rewrites the protocol icmpv6 to icmp6 if 'icmpv6' in term.protocol: protocol = list(term.protocol) protocol[protocol.index('icmpv6')] = 'icmp6' else: protocol = term.protocol new_application_set = {'sport': self._BuildPort(term.source_port), 'dport': self._BuildPort(term.destination_port), 'protocol': protocol, 'icmp-type': normalized_icmptype, 'timeout': term.timeout} for application_set in self.applications: if all(item in list(application_set.items()) for item in new_application_set.items()): new_application_set = '' term.replacement_application_name = application_set['name'] break if (term.name == application_set['name'] and new_application_set != application_set): raise ConflictingApplicationSetsError( 'Application set %s has a conflicting entry' % term.name) if new_application_set: new_application_set['name'] = term.name self.applications.append(new_application_set) self.srx_policies.append((header, new_terms, filter_options)) def _FixLargePolices(self, terms, address_family): """Loops over all terms finding terms exceeding SRXs policy limit. Args: terms: List of terms from a policy. address_family: Tuple containing address family versions. See the following URL for more information http://www.juniper.net/techpubs/en_US/junos12.1x44/topics/reference/ general/address-address-sets-limitations.html """ def Chunks(l): """Splits a list of IP addresses into smaller lists based on byte size.""" return_list = [[]] counter = 0 index = 0 for i in l: # Size is split in half due to the max size being a sum of src and dst. if counter > (self._ADDRESS_LENGTH_LIMIT/2): counter = 0 index += 1 return_list.append([]) if i.version == 6: counter += self._IPV6_SIZE else: counter += 1 return_list[index].append(i) return return_list expanded_terms = [] for term in terms: if (term.AddressesByteLength( self._AF_MAP[address_family]) > self._ADDRESS_LENGTH_LIMIT): logging.warning('LARGE TERM ENCOUNTERED') src_chunks = Chunks(term.source_address) counter = 0 for chunk in src_chunks: for ip in chunk: ip.parent_token = 'src_' + term.name + str(counter) counter += 1 dst_chunks = Chunks(term.destination_address) counter = 0 for chunk in dst_chunks: for ip in chunk: ip.parent_token = 'dst_' + term.name + str(counter) counter += 1 src_dst_products = itertools.product(src_chunks, dst_chunks) counter = 0 for src_dst_list in src_dst_products: new_term = copy.copy(term) new_term.source_address = src_dst_list[0] new_term.destination_address = src_dst_list[1] new_term.name = new_term.name + '_' + str(counter) expanded_terms.append(new_term) counter += 1 else: expanded_terms.append(term) if expanded_terms: del terms[:] terms.extend(expanded_terms) def _BuildAddressBook(self, zone, address): """Create the address book configuration entries. Args: zone: the zone these objects will reside in address: a naming library address object """ if zone not in self.addressbook: self.addressbook[zone] = collections.defaultdict(list) name = address.parent_token for ip in self.addressbook[zone][name]: if ip.supernet_of(address): return if address.supernet_of(ip): for index, ip_addr in enumerate(self.addressbook[zone][name]): if ip_addr == ip: self.addressbook[zone][name][index] = address return self.addressbook[zone][name].append(address) def _SortAddressBookNumCheck(self, item): """Used to give a natural order to the list of acl entries. Args: item: string of the address book entry name Returns: returns the characters and number """ item_list = item.split('_') num = item_list.pop(-1) if isinstance(item_list[-1], int): set_number = item_list.pop(-1) num = int(set_number) * 1000 + int(num) alpha = '_'.join(item_list) if num: return (alpha, int(num)) return (alpha, 0) def _BuildPort(self, ports): """Transform specified ports into list and ranges. Args: ports: a policy terms list of ports Returns: port_list: list of ports and port ranges """ port_list = [] for i in ports: if i[0] == i[1]: port_list.append(str(i[0])) else: port_list.append('%s-%s' % (str(i[0]), str(i[1]))) return port_list def _GenerateAddressBook(self): """Creates address book.""" target = IndentList(self.INDENT) # create address books if address-book-type set to global if self._GLOBAL_ADDR_BOOK in self.addr_book_type: global_address_book = collections.defaultdict(list) target.IndentAppend(1, 'replace: address-book {') target.IndentAppend(2, 'global {') for zone in self.addressbook: for group in self.addressbook[zone]: for address in self.addressbook[zone][group]: global_address_book[group].append(address) names = sorted(global_address_book.keys()) for name in names: counter = 0 ips = nacaddr.SortAddrList(global_address_book[name]) ips = nacaddr.CollapseAddrList(ips) global_address_book[name] = ips for ip in ips: target.IndentAppend(4, 'address ' + name + '_' + str(counter) + ' ' + str(ip) + ';') counter += 1 for group in sorted(global_address_book.keys()): target.IndentAppend(4, 'address-set ' + group + ' {') counter = 0 for unused_addr in global_address_book[group]: target.IndentAppend(5, 'address ' + group + '_' + str(counter) + ';') counter += 1 target.IndentAppend(4, '}') target.IndentAppend(2, '}') target.IndentAppend(1, '}') else: target.IndentAppend(1, 'zones {') for zone in self.addressbook: target.IndentAppend(2, 'security-zone ' + zone + ' {') target.IndentAppend(3, 'replace: address-book {') # building individual addresses groups = sorted(self.addressbook[zone]) for group in groups: ips = nacaddr.SortAddrList(self.addressbook[zone][group]) ips = nacaddr.CollapseAddrList(ips) self.addressbook[zone][group] = ips count = 0 for address in self.addressbook[zone][group]: target.IndentAppend(4, 'address ' + group + '_' + str(count) + ' ' + str(address) + ';') count += 1 # building address-sets for group in groups: target.IndentAppend(4, 'address-set ' + group + ' {') count = 0 for address in self.addressbook[zone][group]: target.IndentAppend(5, 'address ' + group + '_' + str(count) + ';') count += 1 target.IndentAppend(4, '}') target.IndentAppend(3, '}') target.IndentAppend(2, '}') target.IndentAppend(1, '}') return target def _GenerateApplications(self): target = IndentList(self.INDENT) apps_set_list = IndentList(self.INDENT) target.append('replace: applications {') done_apps = [] for app in sorted(self.applications, key=lambda x: x['name']): app_list = IndentList(self.INDENT) if app in done_apps: continue if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']: # generate ICMP statements if app['icmp-type']: if app['timeout']: timeout = app['timeout'] else: timeout = 60 # SRX has a limit of 8 terms per application. To get around this, # we use application sets with applications that contain the terms # we need. num_terms = len(app['protocol']) * len(app['icmp-type']) if num_terms > ICMP_TERM_LIMIT: target.IndentAppend(1, 'application-set ' + app['name'] + '-app {') for i in range(num_terms): target.IndentAppend( 2, 'application ' + app['name'] + '-app%d' % (i + 1) + ';') target.IndentAppend(1, '}') else: target.IndentAppend(1, 'application ' + app['name'] + '-app {') term_counter = 0 for i, code in enumerate(app['icmp-type']): for proto in app['protocol']: # if we have more than 8 (ICMP_TERM_LIMIT) terms, we use an app # for each term. if num_terms > ICMP_TERM_LIMIT: target.IndentAppend( 1, 'application ' + app['name'] + '-app%d' % (term_counter + 1) + ' {') target.IndentAppend( 2, 'term t1 protocol %s %s-type %s inactivity-timeout %d;' % (proto, proto, str(code), int(timeout))) target.IndentAppend(1, '}') else: target.IndentAppend( 2, 'term t%d protocol %s %s-type %s inactivity-timeout %d;' % (i + 1, proto, proto, str(code), int(timeout))) term_counter += 1 if num_terms < ICMP_TERM_LIMIT: target.IndentAppend(1, '}') # generate non-ICMP statements else: i = 1 apps_set_list.IndentAppend(1, 'application-set ' + app['name'] + '-app {') for proto in app['protocol'] or ['']: for sport in app['sport'] or ['']: for dport in app['dport'] or ['']: chunks = [] if proto: # SRX does not like proto vrrp if proto == 'vrrp': proto = '112' chunks.append(' protocol %s' % proto) if sport: chunks.append(' source-port %s' % sport) if dport: chunks.append(' destination-port %s' % dport) if app['timeout']: chunks.append(' inactivity-timeout %d' % int(app['timeout'])) if chunks: apps_set_list.IndentAppend( 2, 'application ' + app['name'] + '-app%d;' % i ) app_list.IndentAppend( 1, 'application ' + app['name'] + '-app%d {' % i ) app_list.IndentAppend(2, 'term t%d' % i + ''.join(chunks) + ';') app_list.IndentAppend(1, '}') i += 1 apps_set_list.IndentAppend(1, '}') done_apps.append(app) if app_list: target.extend(app_list) if len(done_apps) == 0: target.clear() target.append('delete: applications;') return target target.extend(apps_set_list) target.append('}\n') return target def __str__(self): """Render the output of the JuniperSRX policy into config.""" target = IndentList(self.INDENT) target.append('security {') # ADDRESSBOOK target.extend(self._GenerateAddressBook()) # POLICIES target.IndentAppend(1, '/*') target.extend(aclgenerator.AddRepositoryTags(self.INDENT * 1)) target.IndentAppend(1, '*/') target.IndentAppend(1, 'replace: policies {') for (header, terms, filter_options) in self.srx_policies: if self._NOVERBOSE not in filter_options[4:]: target.IndentAppend(2, '/*') target.extend([self.INDENT * 2 + line for line in aclgenerator.WrapWords(header.comment, self._MAX_HEADER_COMMENT_LENGTH)]) target.IndentAppend(2, '*/') # ZONE DIRECTION if filter_options[1] == 'all' and filter_options[3] == 'all': target.IndentAppend(2, 'global {') else: target.IndentAppend(2, 'from-zone ' + filter_options[1] + ' to-zone ' + filter_options[3] + ' {') # GROUPS if header.apply_groups: target.IndentAppend(3, JunipersrxList('apply-groups', header.apply_groups)) # GROUPS EXCEPT if header.apply_groups_except: target.IndentAppend( 3, JunipersrxList('apply-groups-except', header.apply_groups_except) ) for term in terms: str_result = str(term) if str_result: target.append(str_result) target.IndentAppend(2, '}') target.IndentAppend(1, '}') target.append('}') # APPLICATIONS target.extend(self._GenerateApplications()) return '\n'.join(target) capirca-2.0.9/capirca/lib/k8s.py000066400000000000000000000300161437377527500163770ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Kubernetes NetworkPolicy resource generator. More information about Kubernetes NetworkPolicy: https://kubernetes.io/docs/concepts/services-networking/network-policies/ https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy/ """ import copy import datetime import logging import re from capirca.lib import aclgenerator import yaml class Error(Exception): """Generic error class.""" class K8sNetworkPolicyError(Error): """Raised with problems in formatting for Kubernetes NetworkPolicies.""" class ExceededAttributeCountError(Error): """Raised when the total attribute count of a policy is above the maximum.""" def IsDefaultDeny(term): """Returns true if a term is a default deny without IPs, ports, etc.""" skip_attrs = [ 'flattened', 'flattened_addr', 'flattened_saddr', 'flattened_daddr', 'action', 'comment', 'name', 'logging', 'direction' ] if 'deny' not in term.action: return False # This lc will look through all methods and attributes of the object. # It returns only the attributes that need to be looked at to determine if # this is a default deny. for i in [ a for a in dir(term) if not a.startswith('__') and a.islower() and not callable(getattr(term, a)) ]: if i in skip_attrs: continue v = getattr(term, i) if isinstance(v, str) and v: return False if isinstance(v, list) and v: return False return True def GetNextPriority(priority): """Get the priority for the next rule.""" return priority class Term(aclgenerator.Term): """Creates the term for the Kubernetes NetworkPolicy.""" _API_VERSION = 'networking.k8s.io/v1' _RESOURCE_KIND = 'NetworkPolicy' # Policy rule name has to match specific RE: # No more than 253 characters, beginning and ending # with a lowercase alphanumeric character with dashes, dots, and lowercase # alphanumerics between. # Details: https://kubernetes.io/docs/concepts/overview/working-with-objects/names _TERM_NAME_RE = re.compile(r'^[a-z0-9]([a-z0-9-\.]){0,251}[a-z0-9]$') _TERM_MAX_LENGTH = 253 # Protocols allowed are only tcp/udp/sctp # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#networkpolicyport-v1-networking-k8s-io PROTO_MAP = { 'tcp': 6, 'udp': 17, 'sctp': 132, 'all': -1, # Used for default deny } def __init__(self, term): super().__init__(term) self.term = term if 'deny' in self.term.action: if IsDefaultDeny(term): return else: raise K8sNetworkPolicyError( 'Kubernetes NetworkPolicy does not support explicit deny terms.') self._validateDirection() if self.term.source_port: raise K8sNetworkPolicyError( 'Kubernetes NetworkPolicy does not support source port restrictions.') # Raise an error if the flattening of address exclusions would result in # overly broad network access control policies if (self.term.source_address_exclude and self.term.source_address or self.term.destination_address_exclude and self.term.destination_address): self.term.FlattenAll(mutate=False) if not self.term.flattened_saddr and self.term.direction == 'INGRESS': logging.error( 'Kubernetes NetworkPolicy term %s no longer contains any source ' 'addresses after the prefixes in source_address_exclude were ' 'removed. Not rendering term.', self.term.name) self.term = None return if not self.term.flattened_daddr and self.term.direction == 'EGRESS': logging.error( 'Kubernetes NetworkPolicy term %s no longer contains any destination ' 'addresses after the prefixes in destination_address_exclude were ' 'removed. Not rendering term.', self.term.name) self.term = None return def __str__(self): """Convert term to a string.""" return yaml.safe_dump(self.ConvertToDict()) def _validateDirection(self): if self.term.direction == 'INGRESS': if not self.term.source_address: raise K8sNetworkPolicyError( 'Ingress rule missing required field "source-address"') if self.term.destination_address: raise K8sNetworkPolicyError('Ingress rules cannot include ' '"destination-address.') elif self.term.direction == 'EGRESS': if self.term.source_address: raise K8sNetworkPolicyError( 'Egress rules cannot include "source-address".') if not self.term.destination_address: raise K8sNetworkPolicyError( 'Egress rule missing required field "destination-address".') def ConvertToDict(self): """Convert term to a dictionary. This is used to get a dictionary describing this term which can be output easily as a YAML object. Returns: A dictionary that contains a complete Kubernetes NetworkPolicy resource Raises: K8sNetworkPolicyError: The term name is not valid. """ if not self.term: return {} if not self._TERM_NAME_RE.match(self.term.name): raise K8sNetworkPolicyError( 'Term name %s is not valid. See https://kubernetes.io/docs/concepts/overview/working-with-objects/names for more information' % (self.term.name)) resource_dict = { 'apiVersion': self._API_VERSION, 'kind': self._RESOURCE_KIND, 'metadata': { 'name': self.term.name, 'annotations': {}, }, 'spec': { 'podSelector': {}, 'policyTypes': [self.term.direction.capitalize()] }, } if self.term.comment: resource_dict['metadata']['annotations']['comment'] = ' '.join( self.term.comment) if self.term.owner: resource_dict['metadata']['annotations']['owner'] = self.term.owner # We only allow one kind of deny policy, and thats a default deny. Because # of that, we can quickly return an empty policy in the specified direction if 'deny' in self.term.action: return resource_dict peer_selectors = [] peer_selector_key = '' base_port_selector = {} if self.term.direction == 'INGRESS': for source_address in self.term.source_address: peer_selector = {'ipBlock': {'cidr': str(source_address)}} for exclude in self.term.source_address_exclude: if peer_selector['ipBlock'].get('except') is None: peer_selector['ipBlock']['except'] = [] peer_selector['ipBlock']['except'].append(str(exclude)) peer_selectors.append(peer_selector) peer_selector_key = 'from' else: for destination_address in self.term.destination_address: peer_selector = {'ipBlock': {'cidr': str(destination_address)}} for exclude in self.term.destination_address_exclude: if peer_selector['ipBlock'].get('except') is None: peer_selector['ipBlock']['except'] = [] peer_selector['ipBlock']['except'].append(str(exclude)) peer_selectors.append(peer_selector) peer_selector_key = 'to' # Build a base port selector list from ports base_port_selectors = [] for start, end in self.term.destination_port: if start == end: base_port_selector = {'port': start} else: base_port_selector = {'port': start, 'endPort': end} base_port_selectors.append(base_port_selector) # Use the ports info to make one selector per port pair per proto port_selectors = [] for proto in self.term.protocol: # If the list of ports is null, we still need to specify proto if not base_port_selectors: port_selectors.append({'protocol': proto.upper()}) continue for base_selector in base_port_selectors: current_selector = copy.deepcopy(base_selector) # NetworkPolicies require uppercased proto name current_selector['protocol'] = proto.upper() port_selectors.append(current_selector) resource_dict['spec'][self.term.direction.lower()] = [{ 'ports': port_selectors, peer_selector_key: peer_selectors, }] return resource_dict class K8s(aclgenerator.ACLGenerator): """A Kubernetes NetworkPolicy object.""" _API_VERSION = 'networking.k8s.io/v1' _RESOURCE_KIND = 'NetworkPolicyList' _PLATFORM = 'k8s' SUFFIX = '.yml' _SUPPORTED_AF = frozenset(('mixed')) _GOOD_DIRECTION = ['INGRESS', 'EGRESS'] _OPTIONAL_SUPPORTED_KEYWORDS = frozenset(['expiration']) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, _ = super()._BuildTokens() # add extra things supported_tokens |= {'expiration', 'owner'} # remove unsupported things supported_tokens -= {'icmp_type', 'source-port', 'verbatim'} # easier to make a new structure supported_sub_tokens = {'action': {'accept', 'deny'}} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.network_policies = [] total_rule_count = 0 current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) direction = 'INGRESS' if filter_options: for i in self._GOOD_DIRECTION: if i in filter_options: direction = i filter_options.remove(i) term_names = set() for term in terms: if term.stateless_reply: logging.warning( 'WARNING: Term %s in policy %s is a stateless reply ' 'term and will not be rendered.', term.name, filter_name) continue if not term.comment: term.comment = header.comment if direction == 'EGRESS': term.name += '-e' term.name = self.FixTermLength(term.name) if term.name in term_names: raise K8sNetworkPolicyError('Duplicate term name %s' % term.name) term_names.add(term.name) term.direction = direction if term.expiration: if term.expiration <= current_date: logging.warning( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if term.expiration <= exp_info_date: logging.info( 'INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.option: raise K8sNetworkPolicyError( 'Kubernetes NetworkPolicy does not support term options.') # Only generate the term if it's for the appropriate platform if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue term_dict = Term(term).ConvertToDict() if term_dict: total_rule_count += 1 self.network_policies.append(term_dict) logging.info('Total rule count of policy %s is: %d', filter_name, total_rule_count) def __str__(self): if not self.network_policies: return '' list_resource = { 'apiVersion': self._API_VERSION, 'kind': self._RESOURCE_KIND, 'items': self.network_policies, } return yaml.safe_dump(list_resource) capirca-2.0.9/capirca/lib/nacaddr.py000066400000000000000000000352761437377527500173030ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A subclass of the ipaddress library that includes comments for ipaddress.""" import collections import ipaddress import itertools from typing import Union import capirca.utils.iputils as iputils def IP(ip, comment='', token='', strict=True): """Take an ip string and return an object of the correct type. Args: ip: the ip address. comment: option comment field token: option token name where this address was extracted from strict: If strict should be used in ipaddress object. Returns: ipaddress.IPv4 or ipaddress.IPv6 object or raises ValueError. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. """ if isinstance(ip, ipaddress._BaseNetwork): # pylint disable=protected-access imprecise_ip = ip else: imprecise_ip = ipaddress.ip_network(ip, strict=strict) if imprecise_ip.version == 4: return IPv4(ip, comment, token, strict=strict) elif imprecise_ip.version == 6: return IPv6(ip, comment, token, strict=strict) raise ValueError('Provided IP string "%s" is not a valid v4 or v6 address' % ip) # TODO(robankeny) remove once at 3.7 @staticmethod def _is_subnet_of(a, b): # pylint: disable=invalid-name try: # Always false if one is v4 and the other is v6. if a.version != b.version: raise TypeError('%s and %s are not of the same version' % (a, b)) return (b.network_address <= a.network_address and b.broadcast_address >= a.broadcast_address) except AttributeError: raise TypeError( 'Unable to test subnet containment between %s and %s' % (a, b)) class IPv4(ipaddress.IPv4Network): """This subclass allows us to keep text comments related to each object.""" def __init__(self, ip_string, comment='', token='', strict=True): self.text = comment self.token = token self.parent_token = token # Using a tuple of IP integer/prefixlength is significantly faster than # using the BaseNetwork object for recreating the IP network if isinstance(ip_string, ipaddress._BaseNetwork): # pylint disable=protected-access ip = (ip_string.network_address._ip, ip_string.prefixlen) # pylint disable=protected-access # pytype: disable=attribute-error else: ip = ip_string super().__init__(ip, strict) def subnet_of(self, other): """Return True if this network is a subnet of other.""" if self.version != other.version: return False return self._is_subnet_of(self, other) def supernet_of(self, other): """Return True if this network is a supernet of other.""" if self.version != other.version: return False return self._is_subnet_of(other, self) def __deepcopy__(self, memo): result = self.__class__(self) result.text = self.text result.token = self.token result.parent_token = self.parent_token return result def AddComment(self, comment=''): """Append comment to self.text, comma separated. Don't add the comment if it's the same as self.text. Args: comment: comment to be added. """ if self.text: if comment and comment not in self.text: self.text += ', ' + comment else: self.text = comment def supernet(self, prefixlen_diff=1): """Override ipaddress.IPv4 supernet so we can maintain comments. See ipaddress.IPv4.Supernet for complete documentation. Args: prefixlen_diff: Prefix length difference. Returns: An IPv4 object Raises: PrefixlenDiffInvalidError: Raised when prefixlen - prefixlen_diff results in a negative number. """ if self.prefixlen == 0: return self if self.prefixlen - prefixlen_diff < 0: raise PrefixlenDiffInvalidError( 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % ( self.prefixlen, prefixlen_diff)) ret_addr = IPv4(ipaddress.IPv4Network.supernet(self, prefixlen_diff), comment=self.text, token=self.token) return ret_addr # Backwards compatibility name from v1. Supernet = supernet _is_subnet_of = _is_subnet_of class IPv6(ipaddress.IPv6Network): """This subclass allows us to keep text comments related to each object.""" def __init__(self, ip_string, comment='', token='', strict=True): self.text = comment self.token = token self.parent_token = token # Using a tuple of IP integer/prefixlength is significantly faster than # using the BaseNetwork object for recreating the IP network if isinstance(ip_string, ipaddress._BaseNetwork): # pylint disable=protected-access ip = (ip_string.network_address._ip, ip_string.prefixlen) # pylint disable=protected-access # pytype: disable=attribute-error else: ip = ip_string super().__init__(ip, strict) def subnet_of(self, other): """Return True if this network is a subnet of other.""" if self.version != other.version: return False return self._is_subnet_of(self, other) def supernet_of(self, other): """Return True if this network is a supernet of other.""" if self.version != other.version: return False return self._is_subnet_of(other, self) def __deepcopy__(self, memo): result = self.__class__(self) result.text = self.text result.token = self.token result.parent_token = self.parent_token return result def supernet(self, prefixlen_diff=1): """Override ipaddress.IPv6Network supernet so we can maintain comments. See ipaddress.IPv6Network.Supernet for complete documentation. Args: prefixlen_diff: Prefix length difference. Returns: An IPv4 object Raises: PrefixlenDiffInvalidError: Raised when prefixlen - prefixlen_diff results in a negative number. """ if self.prefixlen == 0: return self if self.prefixlen - prefixlen_diff < 0: raise PrefixlenDiffInvalidError( 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % ( self.prefixlen, prefixlen_diff)) ret_addr = IPv6(ipaddress.IPv6Network.supernet(self, prefixlen_diff), comment=self.text, token=self.token) return ret_addr # Backwards compatibility name from v1. Supernet = supernet _is_subnet_of = _is_subnet_of def AddComment(self, comment=''): """Append comment to self.text, comma separated. Don't add the comment if it's the same as self.text. Args: comment: comment to be added. """ if self.text: if comment and comment not in self.text: self.text += ', ' + comment else: self.text = comment IPType = Union[IPv4, IPv6] def _InNetList(adders, ip): """Returns True if ip is contained in adders.""" for addr in adders: if ip.subnet_of(addr): return True return False def IsSuperNet(supernets, subnets): """Returns True if subnets are fully consumed by supernets.""" for net in subnets: if not _InNetList(supernets, net): return False return True def CollapseAddrListPreserveTokens(addresses): """Collapse an array of IPs only when their tokens are the same. Args: addresses: list of ipaddress.IPNetwork objects. Returns: list of ipaddress.IPNetwork objects. """ ret_array = [] for grp in itertools.groupby(sorted(addresses, key=lambda x: x.parent_token), lambda x: x.parent_token): ret_array.append(CollapseAddrList(list(grp[1]))) dedup_array = [] i = 0 while len(ret_array) > i: ip = ret_array.pop(0) k = 0 to_add = True while k < len(dedup_array): if IsSuperNet(dedup_array[k], ip): to_add = False break elif IsSuperNet(ip, dedup_array[k]): del dedup_array[k] k += 1 if to_add: dedup_array.append(ip) return [i for sublist in dedup_array for i in sublist] def _SafeToMerge(address, merge_target, check_addresses): """Determine if it's safe to merge address into merge target. Checks given address against merge target and a list of check_addresses if it's OK to roll address into merge target such that it not less specific than any of the check_addresses. See description of why ir is important within public function CollapseAddrList. Args: address: Address that is being merged. merge_target: Merge candidate address. check_addresses: A dict networks_address->addrs to compare specificity with. Returns: True if safe to merge, False otherwise. """ for check_address in check_addresses.get(address.network_address, []): if merge_target.netmask <= check_address.netmask < address.netmask: return False return True def _CollapseAddrListInternal(addresses, complements_by_network): """Collapses consecutive netblocks until reaching a fixed point. Example: ip1 = ipaddress.IPv4Network('1.1.0.0/24') ip2 = ipaddress.IPv4Network('1.1.1.0/24') ip3 = ipaddress.IPv4Network('1.1.2.0/24') ip4 = ipaddress.IPv4Network('1.1.3.0/24') ip5 = ipaddress.IPv4Network('1.1.4.0/24') ip6 = ipaddress.IPv4Network('1.1.0.1/22') _CollapseAddrListInternal([ip1, ip2, ip3, ip4, ip5, ip6]) -> [IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')] Note, this shouldn't be called directly, but is called via CollapseAddrList([]) Args: addresses: List of IPv4 or IPv6 objects complements_by_network: Dict of IPv4 or IPv6 objects indexed by network_address, that if present will be considered to avoid harmful optimizations. Returns: List of IPv4 or IPv6 objects (depending on what we were passed) """ ret_array = [] for addr in addresses: addr_is_fresh = True while addr_is_fresh: addr_is_fresh = False if not ret_array: ret_array.append(addr) continue prev_addr = ret_array[-1] if not _SafeToMerge(addr, prev_addr, complements_by_network): ret_array.append(addr) elif prev_addr.supernet_of(addr): # Preserve addr's comment, then subsume it. prev_addr.AddComment(addr.text) elif (prev_addr.version == addr.version and prev_addr.prefixlen == addr.prefixlen and # It's faster to compare integers than IP objects prev_addr.broadcast_address._ip + 1 == addr.network_address._ip and # pylint disable=protected-access # Generating Supernet is relatively intensive compared to doing bit # operations (prev_addr.netmask._ip << 1) & prev_addr.network_address._ip == # pylint disable=protected-access prev_addr.network_address._ip): # pylint disable=protected-access # Preserve addr's comment, then merge with it. prev_addr.AddComment(addr.text) addr = ret_array.pop().Supernet() addr_is_fresh = True else: ret_array.append(addr) return ret_array def CollapseAddrList(addresses, complement_addresses=None): """Collapse an array of IP objects. Example: CollapseAddrList( [IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')] Note: this works just as well with IPv6 addresses too. On platforms that support exclude semantics with most specific match, this method should _always_ be called with complement addresses supplied. Not doing so can lead to *reversal* of intent. Consider this case: destination-address:: 10.0.0.0/8, 10.0.0.0/10 destination-exclude:: 10.0.0.0/9 Without optimization, 10.0.0.1 will _match_. With optimization, most specific prefix will _not_ match, reversing the intent. Supplying complement_addresses allows this method to consider those implications. Args: addresses: list of ipaddress.IPNetwork objects complement_addresses: list of ipaddress.IPNetwork objects that, if present, will be considered to avoid harmful optimizations. Returns: list of ipaddress.IPNetwork objects """ complements_dict = collections.defaultdict(list) address_set = set([a.network_address for a in addresses]) for ca in complement_addresses or []: if ca.network_address in address_set: complements_dict[ca.network_address].append(ca) return _CollapseAddrListInternal( sorted(addresses, key=ipaddress.get_mixed_type_key), complements_dict) def SortAddrList(addresses): """Return a sorted list of nacaddr objects.""" return sorted(addresses, key=ipaddress.get_mixed_type_key) def RemoveAddressFromList(superset, exclude): """Remove a single address from a list of addresses. Args: superset: a List of nacaddr IPv4 or IPv6 addresses exclude: a single nacaddr IPv4 or IPv6 address Returns: a List of nacaddr IPv4 or IPv6 addresses """ ret_array = [] for addr in superset: if exclude == addr or addr.subnet_of(exclude): pass elif exclude.version == addr.version and exclude.subnet_of(addr): # this could be optimized except that one group uses this # code with ipaddrs (instead of nacaddrs). ret_array.extend(IP(x) for x in iputils.exclude_address(addr, exclude)) else: ret_array.append(addr) return SortAddrList(ret_array) def AddressListExclude(superset, excludes, collapse_addrs=True): """Remove a list of addresses from another list of addresses. Args: superset: a List of nacaddr IPv4 or IPv6 addresses excludes: a List nacaddr IPv4 or IPv6 addresses collapse_addrs: whether or not to collapse contiguous CIDRs togethe Returns: a List of nacaddr IPv4 or IPv6 addresses """ if collapse_addrs: superset = CollapseAddrList(superset)[::-1] excludes = CollapseAddrList(excludes)[::-1] else: superset = sorted(superset, reverse=True) excludes = sorted(excludes, reverse=True) ret_array = [] while superset and excludes: if superset[-1].overlaps(excludes[-1]): ip = superset.pop() superset.extend( reversed(RemoveAddressFromList([ip], excludes[-1]))) elif superset[-1]._get_networks_key() < excludes[-1]._get_networks_key(): # pylint: disable=protected-access ret_array.append(superset.pop()) else: excludes.pop() if collapse_addrs: return CollapseAddrList(ret_array + superset) else: return sorted(set(ret_array + superset)) ExcludeAddrs = AddressListExclude class PrefixlenDiffInvalidError(ipaddress.NetmaskValueError): """Holdover from ipaddr v1.""" if __name__ == '__main__': pass capirca-2.0.9/capirca/lib/naming.py000066400000000000000000000540241437377527500171500ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Parse naming definition files. Network access control applications use definition files which contain information about networks and services. This naming class will provide an easy interface into using these definitions. Sample usage with definition files contained in ./acl/defs: defs = Naming('acl/defs/') services = defs.GetService('DNS') returns ['53/tcp', '53/udp', ...] networks = defs.GetNet('INTERNAL') returns a list of nacaddr.IPv4 object The definition files are contained in a single directory and may consist of multiple files ending in .net or .svc extensions, indicating network or service definitions respectively. The format of the files consists of a 'token' value, followed by a list of values and optional comments, such as: INTERNAL = 10.0.0.0/8 # RFC-1918 172.16.0.0/12 # RFC-1918 192.168.0.0/16 # RFC-1918 or DNS = 53/tcp 53/udp """ import glob import os import re from absl import logging from capirca.lib import nacaddr from capirca.lib import port as portlib class Error(Exception): """Create our own base error class to be inherited by other error classes.""" class NamespaceCollisionError(Error): """Used to report on duplicate symbol names found while parsing.""" class BadNetmaskTypeError(Error): """Used to report on duplicate symbol names found while parsing.""" class NoDefinitionsError(Error): """Raised if no definitions are found.""" class ParseError(Error): """Raised if an error occurs during parsing.""" class UndefinedAddressError(Error): """Raised if an address is referenced but not defined.""" class UndefinedServiceError(Error): """Raised if a service is referenced but not defined.""" class UndefinedPortError(Error): """Raised if a port/protocol pair has not been defined.""" class UnexpectedDefinitionTypeError(Error): """An unexpected/unknown definition type was used.""" class NamingSyntaxError(Error): """A general syntax error for the definition.""" class _ItemUnit: """This class is a container for an index key and a list of associated values. An ItemUnit will contain the name of either a service or network group, and a list of the associated values separated by spaces. Attributes: name: A string representing a unique token value. items: a list of strings containing values for the token. """ def __init__(self, symbol): self.name = symbol self.items = [] class Naming: """Object to hold naming objects from NETWORK and SERVICES definition files. Attributes: current_symbol: The current token being handled while parsing data. services: A collection of all of the current service item tokens. networks: A collection of all the current network item tokens. unseen_services: Undefined service entries. unseen_networks: Undefined network entries. port_re: Regular Expression matching valid port entries. """ def __init__(self, naming_dir=None, naming_file=None, naming_type=None): """Set the default values for a new Naming object.""" self.current_symbol = None self.services = {} self.networks = {} self.unseen_services = {} self.unseen_networks = {} self.port_re = re.compile(r'(^\d+-\d+|^\d+)\/\w+$|^[\w\d-]+$', re.IGNORECASE | re.DOTALL) self.token_re = re.compile(r'(^[-_A-Z0-9]+$)', re.IGNORECASE) if naming_file and naming_type: filename = os.path.sep.join([naming_dir, naming_file]) with open(filename, 'r') as file_handle: self._ParseFile(file_handle, naming_type) elif naming_dir: self._Parse(naming_dir, 'services') self._CheckUnseen('services') self._Parse(naming_dir, 'networks') self._CheckUnseen('networks') def _CheckUnseen(self, def_type): if def_type == 'services': if self.unseen_services: raise UndefinedServiceError('%s %s' % ( 'The following tokens were nested as a values, but not defined', self.unseen_services)) if def_type == 'networks': if self.unseen_networks: raise UndefinedAddressError('%s %s' % ( 'The following tokens were nested as a values, but not defined', self.unseen_networks)) def GetIpParents(self, query): """Return network tokens that contain IP in query. Args: query: an ip string ('10.1.1.1') or nacaddr.IP object Returns: A sorted list of unique parent tokens. """ base_parents = [] recursive_parents = [] # convert string to nacaddr, if arg is ipaddr then convert str() to nacaddr if (not isinstance(query, nacaddr.IPv4) and not isinstance(query, nacaddr.IPv6)): if query[:1].isdigit(): query = nacaddr.IP(query) # Get parent token for an IP if isinstance(query, nacaddr.IPv4) or isinstance(query, nacaddr.IPv6): for token in self.networks: for item in self.networks[token].items: item = item.split('#')[0].strip() if not item[:1].isdigit(): continue try: supernet = nacaddr.IP(item, strict=False) if supernet.supernet_of(query): base_parents.append(token) except ValueError: # item was not an IP pass # Get parent token for another token else: for token in self.networks: for item in self.networks[token].items: item = item.split('#')[0].strip() if item[:1].isalpha() and item == query: base_parents.append(token) # look for nested tokens for bp in base_parents: done = False for token in self.networks: if bp in [item.split('#')[0].strip() for item in self.networks[token].items]: # ignore IPs, only look at token values if bp[:1].isalpha(): if bp not in recursive_parents: recursive_parents.append(bp) recursive_parents.extend(self.GetIpParents(bp)) done = True # if no nested tokens, just append value if not done: if bp[:1].isalpha() and bp not in recursive_parents: recursive_parents.append(bp) return sorted(list(set(recursive_parents))) def GetServiceParents(self, query): """Given a query token, return list of services definitions with that token. Args: query: a service token name. Returns: List of service definitions containing the token. """ return self._GetParents(query, self.services) def GetNetParents(self, query): """Given a query token, return list of network definitions with that token. Args: query: a network token name. Returns: A list of network definitions containing the token. """ return self._GetParents(query, self.networks) def _GetParents(self, query, query_group): """Given a naming item dict, return any tokens containing the value. Args: query: a service or token name, such as 53/tcp or DNS query_group: either services or networks dict Returns: Returns a list of definitions containing the token in desired group. """ base_parents = [] recursive_parents = [] # collect list of tokens containing query for token in query_group: if query in [item.split('#')[0].strip() for item in query_group[token].items]: base_parents.append(token) if not base_parents: return [] # iterate through tokens containing query, doing recursion if necessary for bp in base_parents: for token in query_group: if bp in query_group[token].items and bp not in recursive_parents: recursive_parents.append(bp) recursive_parents.extend(self._GetParents(bp, query_group)) if bp not in recursive_parents: recursive_parents.append(bp) return recursive_parents def GetNetChildren(self, query): """Given a query token, return list of network definitions tokens within provided token. This will only return children, not descendants of provided token. Args: query: a network token name. Returns: A list of network definitions tokens within this token. """ return self._GetChildren(query, self.networks) def _GetChildren(self, query, query_group): """Given a naming item dict, return tokens (not IPs) contained within this value. Args: query: a token name query_group: networks dict Returns: Returns a list of definitions tokens within (children) target token. """ children = [] if query in query_group: for item in query_group[query].items: child = item.split('#')[0].strip() # Determine if item a token, then it's a child if not self._IsIpFormat(child): children.append(child) return children def _IsIpFormat(self, item): """Helper function for _GetChildren to detect if string is IP format. Args: item: string either a IP or token. Returns: True if string is a IP False if string is not a IP """ try: item = item.strip() nacaddr.IP(item, strict=False) return True except ValueError: return False def GetServiceNames(self): """Returns the list of all known service names.""" return list(self.services.keys()) def GetService(self, query): """Given a service name, return a list of associated ports and protocols. Args: query: Service name symbol or token. Returns: A list of service values such as ['80/tcp', '443/tcp', '161/udp', ...] Raises: UndefinedServiceError: If the service name isn't defined. """ expandset = set() already_done = set() data = [] service_name = '' data = query.split('#') # Get the token keyword and remove any comment service_name = data[0].split()[0] # strip and cast from list to string if service_name not in self.services: raise UndefinedServiceError('\nNo such service: %s' % query) already_done.add(service_name) for next_item in self.services[service_name].items: # Remove any trailing comment. service = next_item.split('#')[0].strip() # Recognized token, not a value. if '/' not in service: # Make sure we are not descending into recursion hell. if service not in already_done: already_done.add(service) try: expandset.update(self.GetService(service)) except UndefinedServiceError as e: # One of the services in query is undefined, refine the error msg. raise UndefinedServiceError('%s (in %s)' % (e, query)) else: expandset.add(service) return sorted(expandset) def GetPortParents(self, query, proto): """Returns a list of all service tokens containing the port/protocol pair. Args: query: port number ('22') as str proto: protocol name ('tcp') as str Returns: A list of service tokens: ['SSH', 'HTTPS'] Raises: UndefinedPortError: If the port/protocol pair isn't used in any service tokens. """ # turn the given port and protocol into a PortProtocolPair object given_ppp = portlib.PPP(query + '/' + proto) base_parents = [] matches = set() # check each service token to see if it's a PPP or a nested group. # if it's a PPP, see if there's a match with given_ppp # otherwise, add nested group to a list to recurisvely check later. # if there's no match, do nothing. for service_token in self.services: for port_child in self.services[service_token].items: ppp = portlib.PPP(port_child) # check for exact match if ppp.is_single_port and ppp == given_ppp: matches.add(service_token) # check if it's within ppp's port range elif ppp.is_range and given_ppp in ppp: matches.add(service_token) # if it's a nested token, add to a list to recurisvely # check later. elif ppp.nested: if service_token not in base_parents: base_parents.append(service_token) # break down the nested service tokens into PPP objects and check # against given_ppp for bp in base_parents: for port_child in self.GetService(bp): ppp = portlib.PPP(port_child) # check for exact match if ppp.is_single_port and ppp == given_ppp: matches.add(bp) # check if it's within ppp's port range elif ppp.is_range and given_ppp in ppp: matches.add(bp) # error if the port/protocol pair is not found. if not matches: raise UndefinedPortError( '%s/%s is not found in any service tokens' % (query, proto)) return sorted(matches) def GetServiceByProto(self, query, proto): """Given a service name, return list of ports in the service by protocol. Args: query: Service name to lookup. proto: A particular protocol to restrict results by, such as 'tcp'. Returns: A list of service values of type 'proto', such as ['80', '443', ...] Raises: UndefinedServiceError: If the service name isn't defined. """ services_set = set() proto = proto.upper() data = [] servicename = '' data = query.split('#') # Get the token keyword and remove any comment servicename = data[0].split()[0] # strip and cast from list to string if servicename not in self.services: raise UndefinedServiceError('%s %s' % ('\nNo such service,', servicename)) for service in self.GetService(servicename): if service and '/' in service: parts = service.split('/') if parts[1].upper() == proto: services_set.add(parts[0]) return sorted(services_set) def GetNetAddr(self, token): """Given a network token, return a list of nacaddr.IPv4 or nacaddr.IPv6 objects. Args: token: A name of a network definition, such as 'INTERNAL' Returns: A list of nacaddr.IPv4 or nacaddr.IPv6 objects. Raises: UndefinedAddressError: if the network name isn't defined. """ return self.GetNet(token) def GetNet(self, query): """Expand a network token into a list of nacaddr.IPv4 or nacaddr.IPv6 objects. Args: query: Network definition token which may include comment text Raises: BadNetmaskTypeError: Results when an unknown netmask_type is specified. Acceptable values are 'cidr', 'netmask', and 'hostmask'. Returns: List of nacaddr.IPv4 or nacaddr.IPv6 objects Raises: UndefinedAddressError: for an undefined token value """ returnlist = [] data = [] token = '' data = query.split('#') # Get the token keyword and remove any comment token = data[0].split()[0] # Remove whitespace and cast from list to string if token not in self.networks: raise UndefinedAddressError('%s %s' % ('\nUNDEFINED:', str(token))) for i in self.networks[token].items: comment = '' if i.find('#') > -1: (net, comment) = i.split('#', 1) else: net = i net = net.strip() if self.token_re.match(net): returnlist.extend(self.GetNet(net)) else: try: # TODO(robankeny): Fix using error to continue processing. addr = nacaddr.IP(net, strict=False) addr.text = comment.lstrip() addr.token = token returnlist.append(addr) except ValueError: # if net was something like 'FOO', or the name of another token which # needs to be dereferenced, nacaddr.IP() will return a ValueError returnlist.extend(self.GetNet(net)) for i in returnlist: i.parent_token = token return returnlist def _Parse(self, defdirectory, def_type): """Parse files of a particular type for tokens and values. Given a directory name and the type (services|networks) to process, grab all the appropriate files in that directory and parse them for definitions. Args: defdirectory: Path to directory containing definition files. def_type: Type of definitions to parse Raises: NoDefinitionsError: if no definitions are found. """ file_names = [] get_files = {'services': lambda: glob.glob(defdirectory + '/*.svc'), 'networks': lambda: glob.glob(defdirectory + '/*.net')} if def_type in get_files: file_names = get_files[def_type]() else: raise NoDefinitionsError('Definitions type %s is unknown.' % def_type) if not file_names: raise NoDefinitionsError('No definition files for %s in %s found.' % (def_type, defdirectory)) for current_file in file_names: try: with open(current_file, 'r') as file_handle: self._ParseFile(file_handle, def_type) except IOError as error_info: raise NoDefinitionsError('%s' % error_info) def _ParseFile(self, file_handle, def_type): for line in file_handle: self._ParseLine(line, def_type) def ParseServiceList(self, data): """Take an array of service data and import into class. This method allows us to pass an array of data that contains service definitions that are appended to any definitions read from files. Args: data: array of text lines containing service definitions. """ for line in data: self._ParseLine(line, 'services') def ParseNetworkList(self, data): """Take an array of network data and import into class. This method allows us to pass an array of data that contains network definitions that are appended to any definitions read from files. Args: data: array of text lines containing net definitions. """ for line in data: self._ParseLine(line, 'networks') def _ParseLine(self, line, definition_type): """Parse a single line of a service definition file. This routine is used to parse a single line of a service definition file, building a list of 'self.services' objects as each line of the file is iterated through. Args: line: A single line from a service definition files. definition_type: Either 'networks' or 'services' Raises: UnexpectedDefinitionTypeError: called with unexpected type of definitions. NamespaceCollisionError: when overlapping tokens are found. ParseError: If errors occur NamingSyntaxError: Syntax error parsing config. """ if definition_type not in ['services', 'networks']: raise UnexpectedDefinitionTypeError('%s %s' % ( 'Received an unexpected definition type:', definition_type)) line = line.strip() if not line or line.startswith('#'): # Skip comments and blanks. return comment = '' if line.find('#') > -1: # if there is a comment, save it (line, comment) = line.split('#', 1) line_parts = line.split('=') # Split on var = val lines. # the value field still has the comment at this point # If there was '=', then do var and value if len(line_parts) > 1: current_symbol = line_parts[0].strip() # varname left of '=' if not self.token_re.match(current_symbol): logging.info('\nService name does not match recommended criteria: %s\nOnly A-Z, a-z, 0-9, -, and _ allowed' % current_symbol) self.current_symbol = current_symbol if definition_type == 'services': for port in line_parts[1].strip().split(): if not self.port_re.match(port): raise NamingSyntaxError('%s: %s' % ( 'The following line has a syntax error', line)) if self.current_symbol in self.services: raise NamespaceCollisionError('%s %s' % ( '\nMultiple definitions found for service: ', self.current_symbol)) elif definition_type == 'networks': if self.current_symbol in self.networks: raise NamespaceCollisionError('%s %s' % ( '\nMultiple definitions found for service: ', self.current_symbol)) self.unit = _ItemUnit(self.current_symbol) if definition_type == 'services': self.services[self.current_symbol] = self.unit # unseen_services is a list of service TOKENS found in the values # of newly defined services, but not previously defined themselves. # When we define a new service, we should remove it (if it exists) # from the list of unseen_services. if self.current_symbol in self.unseen_services: self.unseen_services.pop(self.current_symbol) elif definition_type == 'networks': self.networks[self.current_symbol] = self.unit if self.current_symbol in self.unseen_networks: self.unseen_networks.pop(self.current_symbol) else: raise ParseError('Unknown definitions type.') values = line_parts[1] # No '=', so this is a value only line else: values = line_parts[0] # values for previous var are continued this line for value_piece in values.split(): if not value_piece: continue if not self.current_symbol: break if comment: self.unit.items.append(value_piece + ' # ' + comment) else: self.unit.items.append(value_piece) # token? if value_piece[0].isalpha() and ':' not in value_piece: if definition_type == 'services': # already in top definitions list? if value_piece not in self.services: # already have it as an unused value? if value_piece not in self.unseen_services: self.unseen_services[value_piece] = True if definition_type == 'networks': if value_piece not in self.networks: if value_piece not in self.unseen_networks: self.unseen_networks[value_piece] = True capirca-2.0.9/capirca/lib/nftables.py000066400000000000000000000756531437377527500175100ustar00rootroot00000000000000# Copyright 2023 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """NFtables policy generator for capirca.""" import collections import copy import datetime import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr # NFTables and capirca have conflicting definitions of 'address family' # In capirca: # 'mixed' refers to a 'mixed address policy IPv4/IPv6' # 'inet6' refers to IPv6 only. # 'inet' refers to IPv4 only. # In nftables: # 'inet' refers to mixed IPv4/IPv6 policies. # 'ip6' IPv6 only. # 'ip' IPv4 only. # Therefore; we use static global variables in this generator to refer to the # real intent, values are the NFtable AF format. ip4 = 'ip' ip6 = 'ip6' mixed = 'inet' def TabSpacer(number_spaces, string): """Configuration indentation utility function.""" blank_space = ' ' return (blank_space * number_spaces) + string def Add(statement): """Prefix space appending utility to handle text joins.""" if statement: return TabSpacer(1, statement) else: return statement def ChainFormat(kind, name, ruleset): """Builds a chain in NFtables configuration format. Args: kind: type string (chain or counter) name: name to give the chain. ruleset: the list returned from RulesetGenerator function. Returns: chain_strings: multi-line string nftable configuration for the chain. """ header_sp = 4 content_sp = 8 chain_output = [] chain_output.append(TabSpacer(header_sp, '%s %s {' % (kind, name))) for line in ruleset: chain_output.append(TabSpacer(content_sp, line)) chain_output.append(TabSpacer(header_sp, '}')) return '\n'.join(chain_output) class Error(Exception): """Base error class.""" class TermError(Error): """Raised when a term is not valid.""" class HeaderError(Error): """Raised when a header is not valid.""" class UnsupportedFilterTypeError(Error): """Raised when an unsupported filter type is specified.""" class UnsupportedExpressionError(Error): """Raised when an unsupported expression is specified.""" class Term(aclgenerator.Term): """Representation of an individual NFT term. This is primarily useful for Nftables.__str__() method. Args: term policy.Term object """ _ALLOWED_PROTO_NAME = frozenset([ 'tcp', 'udp', 'icmp', 'esp', 'udp', 'ah', 'comp', 'udplite', 'dccp', 'sctp', 'icmpv6' ]) _ACTIONS = {'accept': 'accept', 'deny': 'drop'} def __init__(self, term, nf_af, nf_hook, verbose=True): """Individual instances of a Term for NFtables. Args: term: Term data. nf_af: nftables table type IPv4 only (ip), IPv6 (ip6) or dual-stack (inet). nf_hook: INPUT or OUTPUT (packet processing/direction of traffic). verbose: used for comment handling. """ self.term = term self.address_family = nf_af self.hook = nf_hook self.verbose = verbose def MapICMPtypes(self, af, term_icmp_types): """Normalize certain ICMP_TYPES for NFTables rendering. If we encounter certain keyword values in policy.Term.ICMP_TYPE keywords, we override them with NFTable specific values in order for rendered policy to be semantically correct with what NFT expects. https://www.netfilter.org/projects/nftables/manpage.html Function is used inside PortsAndProtocols. Args: af: address family. term_icmp_types: ICMP types keywords. Returns: normalized list of icmp_types. """ ICMP_TYPE_REMAP = { 6: { 'multicast-listener-query': 'mld-listener-query', 'multicast-listener-report': 'mld-listener-report', 'multicast-listener-done': 'mld-listener-done', 'router-solicit': 'nd-router-solicit', 'router-advertisement': 'nd-router-advert', 'neighbor-solicit': 'nd-neighbor-solicit', 'neighbor-advertisement': 'nd-neighbor-advert', 'redirect-message': 'nd-redirect', 'inverse-neighbor-discovery-solicitation': 'ind-neighbor-solicit', 'inverse-neighbor-discovery-advertisement': 'ind-neighbor-advert', 'version-2-multicast-listener-report': 'mld2-listener-report', }, 4: { # IPv4 exceptions below 'unreachable': 'destination-unreachable', 'information-request': 'info-request', 'information-reply': 'info-reply', 'mask-request': 'address-mask-request', 'mask-reply': 'address-mask-reply', } } for item in term_icmp_types: if af == ip4: # IPv4 ICMP if item in ICMP_TYPE_REMAP[4]: # Replace with NFT expected value. term_icmp_types[term_icmp_types.index(item)] = ICMP_TYPE_REMAP[4].get( item) if af == ip6: # IPv6 ICMP if item in ICMP_TYPE_REMAP[6]: # Replace with NFT expected value. term_icmp_types[term_icmp_types.index(item)] = ICMP_TYPE_REMAP[6].get( item) return term_icmp_types def CreateAnonymousSet(self, data): """Build a nftables anonymous set from some elements. Anonymous are formatted using curly braces then some data. These sets are bound to a rule, have no specific name and cannot be updated. Args: data: a list of strings to format. Returns: formatted string of items as anonymous set. """ nfset = [] if isinstance(data, str): # Handle single string. No params. return data if len(data) == 1: # Handle a list of a single element. nfset = data[0] return nfset if len(data) > 1: nfset = ', '.join(data) return '{{ {0} }}'.format(nfset) def PortsAndProtocols(self, address_family, protocol, src_ports, dst_ports, icmp_type): """Handling protocol specific NFTable statements. Args: address_family: term address family. protocol: term protocol. src_ports: raw term source port. dst_ports: raw term dest port. icmp_type: special ICMP type flag. Returns: list of statements related to ports and protocols. """ def PortStatement(protocol, source, destination): """NFT port statement. Returns empty if no ports defined.""" ports_list = [] # SOURCE PORTS. if source: ports_list.append('%s sport %s' % (protocol, self.CreateAnonymousSet(source))) # DESTINATION PORTS. if destination: ports_list.append('%s dport %s' % (protocol, self.CreateAnonymousSet(destination))) # Normalize ports into single nft statement. if ports_list: ports_statement = ' '.join(ports_list) else: ports_statement = '' return ports_statement # end PortStatement. ip_protocol = copy.deepcopy(protocol) ip6_protocol = copy.deepcopy(protocol) # Normalize term.ports objects. src_p = self._Group(src_ports) dst_p = self._Group(dst_ports) statement_lines = [] # Normalize ICMP types. # TODO: Call self.NormalizeIcmpTypes. icmp_type = self.MapICMPtypes(address_family, icmp_type) if address_family == 'ip': # IPv4 stuff. if icmp_type and ('icmp' in ip_protocol): if len(icmp_type) > 1: statement_lines.append('icmp type' + Add(self.CreateAnonymousSet(icmp_type))) else: statement_lines.append('icmp type' + Add(icmp_type)) ip_protocol.remove('icmp') if 'icmpv6' in ip_protocol: # No IPv6 protocols in IPv4 family. ip_protocol.remove('icmpv6') if ip_protocol: # Multi-protocol and zero-ports. if len(ip_protocol) > 1 and not (src_ports and dst_ports): statement_lines.append('ip protocol' + Add(self.CreateAnonymousSet(ip_protocol))) else: for proto in ip_protocol: if (src_ports and dst_ports): statement_lines.append(PortStatement(proto, src_p, dst_p)) else: statement_lines.append('ip protocol' + Add(proto)) if address_family == 'ip6': # IPv6 stuff. if icmp_type and ('icmpv6' in ip6_protocol): if len(icmp_type) > 1: statement_lines.append('icmpv6 type' + Add(self.CreateAnonymousSet(icmp_type))) else: statement_lines.append('icmpv6 type' + Add(icmp_type)) ip6_protocol.remove('icmpv6') if 'icmp' in ip6_protocol: # No IPv4 protocols in IPv6 family. ip6_protocol.remove('icmp') if ip6_protocol: # NFT IPv6 protocol matching is complex. Using 'ip6 nexthdr' only # matches if ipv6 packet does not contain any extension headers. # we use meta l4proto here to walk down the headers until real transport # protocol is found. This allows us to use Sets here too. # https://wiki.nftables.org/wiki-nftables/index.php/Matching_packet_headers if len(ip6_protocol) > 1 and not (src_ports and dst_ports): statement_lines.append('meta l4proto' + Add(self.CreateAnonymousSet(ip6_protocol))) else: # We avoid using th (transport header), instead we use single # statements for each protocol. for proto in ip6_protocol: if (src_ports or dst_ports): statement_lines.append(PortStatement(proto, src_p, dst_p)) else: # Single proto, no ports. statement_lines.append('meta l4proto' + Add(proto)) return statement_lines def _OptionsHandler(self, term): """Term 'option' handler. Function used to evaluate term.logging and also term.option values. Then it builds any statement that would be appended before a veredict. Results of this function are then used in GroupExpressions() to combine a final valid NFTables chain. Args: term: capirca Term data. Returns: list of statements related to generator options. """ options = [] # Stateful firewall, Accept only NEW traffic for the specific term. # Base chain already allows all return traffic of # state (ESTABLISHED, RELATED) # This should prevent invalid, untracked packets from being accepted. if 'deny' not in term.action and not term.icmp_type: options.append('ct state new') # 'logging' handling. if term.logging: # str() trick to circumvent VarType class attr comparison checks. if 'disable' not in str(term.logging): # Simple syslogging implementation. options.append('log prefix "%s"' % term.name) # 'counter' handling. # https://wiki.nftables.org/wiki-nftables/index.php/Counters # We don't use named counters here because we already structure NFT ruleset # in child chains per each rule. So simply looking at term_child_chain is # easy to tell the counter stats for that ruleset. if term.counter: options.append('counter') # Build the final statement to be returned. if options: return ' '.join(options) else: return '' def GroupExpressions( self, int_expr, address_expr, pp_expr, options, verdict, comment ): """Combines all expressions with a verdict (decision). The inputs are already pre-sanitized by RulesetGenerator. NFTables processes rules from left-to-right - ending in a verdict. We form our ruleset then towards the end append any term.options from _OptionsHandler. Args: int_expr: RulesetGenerator source or destination interface str. address_expr: pre-processed list of nftable statements of network addresses. pp_expr: pre-processed list of nftables protocols and ports. options: string value to append before verdict for NFT special options. verdict: action to take on resulting final statement (allow/deny). comment: term.comment string adhering to NFT limits. Returns: list of strings representing valid nftables statements. """ statement = [] if address_expr: for addr in address_expr: if pp_expr: for pstat in pp_expr: if pstat.startswith('icmp type') or addr.startswith('ip '): # Handle IPv4 ports and proto statements. if addr.startswith('ip '): statement.append(addr + Add(pstat) + Add(options) + Add(verdict)) elif pstat.startswith('icmpv6 type') or addr.startswith('ip6'): if addr.startswith('ip6'): statement.append(addr + Add(pstat) + Add(options) + Add(verdict)) else: statement.append(addr + Add(options) + Add(verdict)) elif pp_expr: # Handle statement without addresses but has ports & protocols. for pstat in pp_expr: statement.append(pstat + Add(options) + Add(verdict)) else: # If no addresses or ports & protocol. Verdict only statement. statement.append((Add(options) + Add(verdict))) # source/destination interface handling always to be done at the end. if int_expr: # 'statement' is a list because join to another list in RulesetGenerator. statement[0] = int_expr + Add(statement[0]) # Handling of comments should always be done after verdict statement. if comment: statement[0] = statement[0] + Add(comment) return statement def _AddrStatement(self, address_family, src_addr, dst_addr): """Builds an NFTables address statement. Args: address_family: NFTables address family. src_addr: prefiltered list of src addresses. dst_addr: prefiltered list of dst addresses. Returns: list of strings representing valid nftables address statements (IPv4/6). """ address_statement = [] src_addr_book = self._AddressClassifier(src_addr) dst_addr_book = self._AddressClassifier(dst_addr) if src_addr and dst_addr: # Condition where term has both defined. if address_family == 'ip': if src_addr_book['ip'] and dst_addr_book['ip']: address_statement.append( 'ip saddr ' + self.CreateAnonymousSet(src_addr_book['ip']) + ' ' + 'ip daddr ' + self.CreateAnonymousSet(dst_addr_book['ip'])) if address_family == 'ip6': if src_addr_book['ip6'] and dst_addr_book['ip6']: address_statement.append( 'ip6 saddr ' + self.CreateAnonymousSet(src_addr_book['ip6']) + ' ' + 'ip6 daddr ' + self.CreateAnonymousSet(dst_addr_book['ip6'])) elif src_addr: # Term has only src defined. if address_family == 'ip': if src_addr_book['ip']: address_statement.append('ip saddr ' + self.CreateAnonymousSet(src_addr_book['ip'])) if address_family == 'ip6': if src_addr_book['ip6']: address_statement.append( 'ip6 saddr ' + self.CreateAnonymousSet(src_addr_book['ip6'])) elif dst_addr: if address_family == 'ip': if dst_addr_book['ip']: address_statement.append('ip daddr ' + self.CreateAnonymousSet(dst_addr_book['ip'])) if address_family == 'ip6': if dst_addr_book['ip6']: address_statement.append( 'ip6 daddr ' + self.CreateAnonymousSet(dst_addr_book['ip6'])) return address_statement def RulesetGenerator(self, term): """Generate string rules of a given Term. Rules are constructed from Terms() and are contained within chains. This function generates rules that will be present inside a regular (non-base) chain. Each item in list represents a line break for later parsing. Args: term: term data. Returns: list of strings. Representing a ruleset for later formatting. """ term_ruleset = [] unique_term_ruleset = [] comment = '' # COMMENT handling. if self.verbose: comment = 'comment ' + aclgenerator.TruncateWords( self.term.comment, Nftables.COMMENT_CHAR_LIMIT) # INTERFACE (source/destination) handling. if term.source_interface: interface = 'iifname' + Add(term.source_interface) elif term.destination_interface: interface = 'oifname' + Add(term.destination_interface) else: interface = '' # OPTIONS / LOGGING / COUNTERS opt = self._OptionsHandler(term) # STATEMENT VERDICT / ACTION. verdict = self._ACTIONS[self.term.action[0]] address_families = [self.address_family ] if self.address_family != mixed else [ip4, ip6] for address_family in address_families: # ADDRESS handling. address_list = self._AddrStatement(address_family, self.term.source_address, self.term.destination_address) # Check if we're dealing with a term of a different IP family that needs # to be skipped. if not address_list and ( self.term.source_address or self.term.destination_address): continue # PORTS and PROTOCOLS handling. proto_and_ports = self.PortsAndProtocols(address_family, self.term.protocol, self.term.source_port, self.term.destination_port, self.term.icmp_type) # Do not render ICMP types if IP family mismatch. if ((address_family == 'ip6' and 'icmp' in self.term.protocol) or (address_family == 'ip' and ('icmpv6' in self.term.protocol) or 'icmp6' in self.term.protocol)): continue # TODO: If verdict is not supported, drop nftable_rule for it. nftable_rule = self.GroupExpressions( interface, address_list, proto_and_ports, opt, verdict, comment ) term_ruleset.extend(nftable_rule) # Ensure that chain statements contain no duplicates rules. unique_term_ruleset = [ i for n, i in enumerate(term_ruleset) if i not in term_ruleset[:n]] return unique_term_ruleset def _AddressClassifier(self, address_to_classify): """Organizes network addresses according to IP family in a dict. Args: address_to_classify: nacaddr.IP list of network addresses. Returns: dictionary of network addresses classified by AF. """ addresses = collections.defaultdict(list) for addr in address_to_classify: if addr.version == 4: addresses['ip'].append(str(addr)) if addr.version == 6: addresses['ip6'].append(str(addr)) return addresses def _Group(self, group): """If 1 item return it, else return [ item1 item2 ]. Args: group: a list. could be a list of strings (protocols) or a list of tuples (ports) Returns: rval: a string surrounded by '[' and ']' """ def _FormatPorts(port): if isinstance(port, int): return str(port) elif port[0] == port[1]: return '%d' % port[0] else: # port range return '%d-%d' % (port[0], port[1]) if len(group) > 1: rval = [_FormatPorts(x) for x in group] elif len(group) == 1: rval = _FormatPorts(group[0]) else: # Ports undefined/empty. rval = '' return rval def __str__(self): """Terms printing function. Each term is expressed as its own chain. Later referenced to a parent chain with filter directionality (input/output). """ if self.term.platform: if 'nftables' not in self.term.platform: return '' if self.term.platform_exclude: if 'nftables' in self.term.platform_exclude: return '' return ChainFormat('chain', self.term.name, self.RulesetGenerator(self.term)) class Nftables(aclgenerator.ACLGenerator): """A NFtables policy object.""" _PLATFORM = 'nftables' SUFFIX = '.nft' _HEADER_AF = frozenset(('inet', 'inet6', 'mixed')) _SUPPORTED_HOOKS = frozenset(('input', 'output')) _HOOK_PRIORITY_DEFAULT = 0 _BASE_CHAIN_PREFIX = 'root' _LOGGING = set() _OPTIONAL_SUPPORTED_KEYWORDS = frozenset([ 'expiration', ]) COMMENT_CHAR_LIMIT = 126 _AF_MAP = {'inet': (4,), 'inet6': (6,), 'mixed': (4, 6)} # Below mapping converts capirca HEADER native to nftables table. # In Nftables 'inet' contains both IPv4 and IPv6 addresses and rules. NF_TABLE_AF_MAP = {'inet': 'ip', 'inet6': 'ip6', 'mixed': 'inet'} def _BuildTokens(self): """NFTables generator list of supported tokens and sub tokens. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() # Set of supported keywords for a given platform. Values should be in # undercase form, eg, icmp_type (not icmp-type) supported_tokens = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'name', # obj attribute, not token 'option', 'protocol', 'platform', 'platform_exclude', 'source_interface', # NFT iifname 'source_address', 'source_address_exclude', 'source_port', 'destination_interface', # NFT oifname 'translated', # obj attribute, not token 'stateless_reply', } # These keys must be also listed in supported_tokens. # Keys should be in undercase form, eg, icmp_type (not icmp-type). Values # should be in dash form, icmp-type (not icmp_type) supported_sub_tokens = { 'option': { 'established', 'tcp-established', }, 'action': { 'accept', 'deny', }, 'icmp_type': set( list(Term.ICMP_TYPE[4].keys()) + list(Term.ICMP_TYPE[6].keys())) } return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): """Translates a Capirca policy file into NFtables specific data structure. Reads a POL file, filters for NFTables specific data, parses each term and populates the nftables_policies list. Args: pol: A Policy() object representing a given POL file. exp_info: An int that specifies number of weeks until policy expires. Raises: TermError: Raised when policy term requirements are not met. """ self.nftables_policies = [] pol_counter = 0 current_date = datetime.date.today() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions('nftables') nf_af, nf_hook, nf_priority, filter_policy_default_action, verbose = self._ProcessHeader( filter_options) # Base chain determine name based on iteration of header. base_chain_name = self._BASE_CHAIN_PREFIX + str(pol_counter) child_chains = collections.defaultdict(dict) term_names = set() new_terms = [] for term in terms: if term.name in term_names: raise TermError('Duplicate term name') term_names.add(term.name) if term.source_interface and term.destination_interface: raise TermError( 'Incorrect interface on term. Must be either be a source or' ' destination, not both.' ) continue if term.stateless_reply: logging.warning( 'WARNING: Term %s is a stateless reply ' 'term and will not be rendered.', term.name) continue # This generator is stateful, we don't do stateless rules. # Stateful firewalls don't require a reverse rule/term; thus skip. if 'established' in term.option: logging.warning( 'WARNING: Term %s is a established ' 'term and will not be rendered.', term.name) continue if 'tcp-established' in term.option: logging.warning( 'WARNING: Term %s is a tcp-established ' 'term and will not be rendered.', term.name) continue if term.expiration: if term.expiration <= exp_info_date: logging.info( 'INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, nf_af) if term.expiration <= current_date: logging.warning( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, nf_af) continue # Handle address excludes before building nft address book dict. for i in term.source_address_exclude: term.source_address = nacaddr.RemoveAddressFromList( term.source_address, i) for i in term.destination_address_exclude: term.destination_address = nacaddr.RemoveAddressFromList( term.destination_address, i) new_terms.append(Term(term, nf_af, nf_hook, verbose)) # Instantiate object to call function from Term() term_object = Term(term, nf_af, nf_hook, verbose) child_chains[base_chain_name].update( {term.name: term_object.RulesetGenerator(term)}) pol_counter += 1 self.nftables_policies.append( (header, base_chain_name, nf_af, nf_hook, nf_priority, filter_policy_default_action, verbose, child_chains)) def _ProcessHeader(self, header_options): """Capirca policy header processing. Args: header_options: capirca policy header data (filter_options) Raises: HeaderError: Raised when the policy header format requirements are not met. Returns: netfilter_family: x. filter_options[0] netfilter_hook: x. filter_options[1].lower() netfilter_priority: numbers = [x for x in filter_options if x.isdigit()] policy_default_action: nftable action to take on unmatched packets. verbose: header and term verbosity. """ if len(header_options) < 2: raise HeaderError('Invalid header for Nftables. Required fields missing.') # First header element should dictate type of policy. if header_options[0] not in Nftables._HEADER_AF: raise HeaderError('Invalid address family in header: %s. Supported: %s' % (header_options[0], Nftables._HEADER_AF)) netfilter_family = self.NF_TABLE_AF_MAP.get(header_options[0]) policy_default_action = 'drop' if 'ACCEPT' in header_options: policy_default_action = 'accept' netfilter_hook = header_options[1].lower() if netfilter_hook not in self._SUPPORTED_HOOKS: raise HeaderError( '%s is not a supported nftables hook. Supported hooks: %s' % (netfilter_hook, list(self._SUPPORTED_HOOKS))) if len(header_options) >= 2: numbers = [x for x in header_options if x.isdigit()] if not numbers: netfilter_priority = self._HOOK_PRIORITY_DEFAULT logging.info( 'INFO: NFtables priority not specified in header.' 'Defaulting to %s', self._HOOK_PRIORITY_DEFAULT) if len(numbers) == 1: # A single integer value is used to set priority. netfilter_priority = numbers[0] if len(numbers) > 1: raise HeaderError('Too many integers in header.') verbose = True if 'noverbose' in header_options: verbose = False header_options.remove('noverbose') return netfilter_family, netfilter_hook, netfilter_priority, policy_default_action, verbose def _ConfigurationDictionary(self, nft_pol): """NFTables configuration object. Organizes policies into a data structure that can keep relationships with NFTables address family (tables) and the parent base chain (+ child chains). Args: nft_pol: Object containing pre-processed data from _TranslatePolicy. Returns: nftables: dictionary of dictionaries NFTables policy object. """ nftables = collections.defaultdict(dict) for (header, base_chain_name, nf_af, nf_hook, nf_priority, filter_policy_default_action, verbose, child_chains) in nft_pol: base_chain_comment = '' # TODO: If child_chain ruleset is empty don't store term. if verbose: base_chain_comment = header.comment nftables[nf_af][base_chain_name] = { 'hook': nf_hook, 'comment': base_chain_comment, 'priority': nf_priority, 'policy': filter_policy_default_action, 'rules': child_chains, } return nftables def __str__(self): """Render the policy as Nftables configuration.""" nft_config = [] configuration = self._ConfigurationDictionary(self.nftables_policies) for address_family in configuration: nft_config.append('table %s filtering_policies {' % address_family) base_chain_dict = configuration[address_family] for item in base_chain_dict: # TODO: If we ever add NFTables 'named counters' it would go here. for k, v in base_chain_dict[item]['rules'][item].items(): nft_config.append(ChainFormat('chain', k, v)) # base chain header and contents. nft_config.append(TabSpacer(4, 'chain %s {' % item)) if base_chain_dict[item]['comment']: # Due to Nftables limits on comments, we handle this twice. # First time we comment it out so .nft file is human-readable. nft_config.append( TabSpacer(8, '#' + ' '.join(base_chain_dict[item]['comment']))) nft_config.append( TabSpacer( 8, 'type filter hook %s priority %s; policy %s;' % (base_chain_dict[item]['hook'], base_chain_dict[item]['priority'], base_chain_dict[item]['policy']))) # Add policy header comment after stateful firewall rule. if base_chain_dict[item]['comment']: nft_config.append(TabSpacer(8, 'ct state established,related accept' + Add('comment') + Add(aclgenerator.TruncateWords( base_chain_dict[item]['comment'], self.COMMENT_CHAR_LIMIT)))) else: # stateful firewall: allows reply traffic. nft_config.append(TabSpacer(8, 'ct state established,related accept')) # Reference the child chains with jump. for child_chain in base_chain_dict[item]['rules'][item].keys(): nft_config.append(TabSpacer(8, 'jump %s' % child_chain)) nft_config.append(TabSpacer(4, '}')) # chain_end nft_config.append('}') # table_end # Terminating newline. nft_config.append('\n') return '\n'.join(nft_config) capirca-2.0.9/capirca/lib/nsxv.py000066400000000000000000000540371437377527500167010ustar00rootroot00000000000000# Copyright 2015 The Capirca Project Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Nsxv generator.""" import datetime import re import xml from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr import six _ACTION_TABLE = { 'accept': 'allow', 'deny': 'deny', 'reject': 'reject', 'reject-with-tcp-rst': 'reject', # tcp rst not supported } _XML_TABLE = { 'actionStart': '', 'actionEnd': '', 'srcIpv4Start': 'Ipv4Address', 'srcIpv4End': '', 'destIpv4Start': 'Ipv4Address', 'destIpv4End': '', 'protocolStart': '', 'protocolEnd': '', 'serviceStart': '', 'serviceEnd': '', 'appliedToStart': 'SecurityGroup', 'appliedToEnd': '', 'srcPortStart': '', 'srcPortEnd': '', 'destPortStart': '', 'destPortEnd': '', 'icmpTypeStart': '', 'icmpTypeEnd': '', 'logTrue': 'true', 'logFalse': 'false', 'sectionStart': '
', 'sectionEnd': '
', 'nameStart': '', 'nameEnd': '', 'srcIpv6Start': 'Ipv6Address', 'srcIpv6End': '', 'destIpv6Start': 'Ipv6Address', 'destIpv6End': '', 'noteStart': '', 'noteEnd': '', } _NSXV_SUPPORTED_KEYWORDS = [ 'name', 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'expiration', 'logging' ] # generic error class class Error(Exception): """Generic error class.""" pass class UnsupportedNsxvAccessListError(Error): """Raised when we're give a non named access list.""" pass class NsxvAclTermError(Error): """Raised when there is a problem in a nsxv access list.""" pass class NsxvDuplicateTermError(Error): """Raised when there is a duplicate.""" pass class Term(aclgenerator.Term): """Creates a single ACL Term for Nsxv.""" def __init__(self, term, filter_type, applied_to=None, af=4): self.term = term # Our caller should have already verified the address family. assert af in (4, 6) self.af = af self.filter_type = filter_type self.applied_to = applied_to def __str__(self): """Convert term to a rule string. Returns: A rule as a string. Raises: NsxvAclTermError: When unknown icmp-types are specified """ # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if 'nsxv' not in self.term.platform: return '' if self.term.platform_exclude: if 'nsxv' in self.term.platform_exclude: return '' ret_str = [''] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.af == 6 and 'icmp' in self.term.protocol) or (self.af == 4 and 'icmpv6' in self.term.protocol)): logging.debug(self.NO_AF_LOG_PROTO.substitute(term=self.term.name, proto=self.term.protocol, af=self.filter_type)) return '' # Term verbatim is not supported if self.term.verbatim: raise NsxvAclTermError( 'Verbatim are not implemented in standard ACLs') # Term option is not supported if self.term.option: for opt in [str(single_option) for single_option in self.term.option]: if((opt.find('tcp-established') == 0) or (opt.find('established') == 0)): return '' else: raise NsxvAclTermError( 'Option are not implemented in standard ACLs') # check for keywords Nsxv does not support term_keywords = self.term.__dict__ unsupported_keywords = [] for key in term_keywords: if term_keywords[key]: # translated is obj attribute not keyword if ('translated' not in key) and (key not in _NSXV_SUPPORTED_KEYWORDS): unsupported_keywords.append(key) if unsupported_keywords: logging.warning('WARNING: The keywords %s in Term %s are not supported ' 'in Nsxv ', unsupported_keywords, self.term.name) name = '%s%s%s' % (_XML_TABLE.get('nameStart'), self.term.name, _XML_TABLE.get('nameEnd')) notes = '' if self.term.comment: for comment in self.term.comment: notes = '%s%s' %(notes, comment) notes = '%s%s%s' % (_XML_TABLE.get('noteStart'), notes, _XML_TABLE.get('noteEnd')) # protocol protocol = None if self.term.protocol: protocol = list(map(self.PROTO_MAP.get, self.term.protocol, self.term.protocol)) # icmp-types icmp_types = [''] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, self.af) # for mixed filter type get both IPV4address and IPv6Address af_list = [] if self.filter_type == 'mixed': af_list = [4, 6] else: af_list = [self.af] source_address = None destination_address = None source_addr = [] destination_addr = [] source_v4_addr = [] source_v6_addr = [] dest_v4_addr = [] dest_v6_addr = [] for af in af_list: # source address if self.term.source_address: source_address = self.term.GetAddressOfVersion('source_address', af) source_address_exclude = self.term.GetAddressOfVersion( 'source_address_exclude', af) if source_address_exclude: source_address = nacaddr.ExcludeAddrs( source_address, source_address_exclude) if source_address: if af == 4: source_v4_addr = source_address else: source_v6_addr = source_address source_addr = source_v4_addr + source_v6_addr # destination address if self.term.destination_address: destination_address = self.term.GetAddressOfVersion( 'destination_address', af) destination_address_exclude = self.term.GetAddressOfVersion( 'destination_address_exclude', af) if destination_address_exclude: destination_address = nacaddr.ExcludeAddrs( destination_address, destination_address_exclude) if destination_address: if af == 4: dest_v4_addr = destination_address else: dest_v6_addr = destination_address destination_addr = dest_v4_addr + dest_v6_addr # Check for mismatch IP for source and destination address for mixed filter if self.filter_type == 'mixed': if source_addr and destination_addr: if source_v4_addr and not dest_v4_addr: source_addr = source_v6_addr elif source_v6_addr and not dest_v6_addr: source_addr = source_v4_addr elif dest_v4_addr and not source_v4_addr: destination_addr = dest_v6_addr elif dest_v6_addr and not source_v6_addr: destination_addr = dest_v4_addr if not source_addr or not destination_addr: logging.warning('Term %s will not be rendered as it has IPv4/IPv6 ' 'mismatch for source/destination for mixed address ' 'family.', self.term.name) return '' # ports source_port = None destination_port = None if self.term.source_port: source_port = self.term.source_port if self.term.destination_port: destination_port = self.term.destination_port # logging log = 'false' if self.term.logging: log = 'true' sources = '' if source_addr: sources = '' for saddr in source_addr: # inet4 if isinstance(saddr, nacaddr.IPv4): if saddr.num_addresses > 1: saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'), saddr.with_prefixlen, _XML_TABLE.get('srcIpv4End'),) else: saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'), saddr.network_address, _XML_TABLE.get('srcIpv4End')) sources = '%s%s' %(sources, saddr) # inet6 if isinstance(saddr, nacaddr.IPv6): if saddr.num_addresses > 1: saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv6Start'), saddr.with_prefixlen, _XML_TABLE.get('srcIpv6End'),) else: saddr = '%s%s%s' % ( _XML_TABLE.get('srcIpv6Start'), saddr.network_address, _XML_TABLE.get('srcIpv6End')) sources = '%s%s' %(sources, saddr) sources = '%s%s' %(sources, '') destinations = '' if destination_addr: destinations = '' for daddr in destination_addr: # inet4 if isinstance(daddr, nacaddr.IPv4): if daddr.num_addresses > 1: daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'), daddr.with_prefixlen, _XML_TABLE.get('destIpv4End'),) else: daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'), daddr.network_address, _XML_TABLE.get('destIpv4End')) destinations = '%s%s' %(destinations, daddr) # inet6 if isinstance(daddr, nacaddr.IPv6): if daddr.num_addresses > 1: daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'), daddr.with_prefixlen, _XML_TABLE.get('destIpv6End'),) else: daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'), daddr.network_address, _XML_TABLE.get('destIpv6End')) destinations = '%s%s' %(destinations, daddr) destinations = '%s%s' %(destinations, '') services = [] if protocol: services.append('') for proto in protocol: if proto != 'any': services.append(self._ServiceToString(proto, source_port, destination_port, icmp_types)) services.append('') service = '' for s in services: service = '%s%s' % (service, s) # applied_to applied_to_list = '' if self.applied_to: applied_to_list = '' applied_to_element = '%s%s%s' % (_XML_TABLE.get('appliedToStart'), self.applied_to, _XML_TABLE.get('appliedToEnd')) applied_to_list = '%s%s' %(applied_to_list, applied_to_element) applied_to_list = '%s%s' %(applied_to_list, '') # action action = '%s%s%s' % (_XML_TABLE.get('actionStart'), _ACTION_TABLE.get(str(self.term.action[0])), _XML_TABLE.get('actionEnd')) ret_lines = [] ret_lines.append('%s%s%s%s%s%s%s' % (log, name, action, sources, destinations, service, applied_to_list, notes)) # remove any trailing spaces and replace multiple spaces with singles stripped_ret_lines = [re.sub(r'\s+', ' ', x).rstrip() for x in ret_lines] ret_str.extend(stripped_ret_lines) return ''.join(ret_str) def _ServiceToString(self, proto, sports, dports, icmp_types): """Converts service to string. Args: proto: str, protocl sports: str list or none, the source port dports: str list or none, the destination port icmp_types: icmp-type numeric specification (if any) Returns: Service definition. """ service = '' # for icmp and icmpv6 if proto == 1 or proto == 58: # handle icmp protocol for icmp_type in icmp_types: icmp_service = '%s%s%s%s' % (_XML_TABLE.get('serviceStart'), _XML_TABLE.get('protocolStart'), proto, _XML_TABLE.get('protocolEnd')) # handle icmp types if icmp_type: icmp_type = '%s%s%s' %(_XML_TABLE.get('icmpTypeStart'), str(icmp_type), _XML_TABLE.get('icmpTypeEnd')) icmp_service = '%s%s' % (icmp_service, icmp_type) icmp_service = '%s%s' % (icmp_service, _XML_TABLE.get('serviceEnd')) service = '%s%s' % (service, icmp_service) else: # handle other protocols service = '%s%s%s%s' % (_XML_TABLE.get('serviceStart'), _XML_TABLE.get('protocolStart'), proto, _XML_TABLE.get('protocolEnd')) # handle source ports if sports: str_sport = [] for sport in sports: if sport[0] != sport[1]: str_sport.append('%s-%s' % (sport[0], sport[1])) else: str_sport.append('%s' % (sport[0])) service = '%s%s%s%s' % (service, _XML_TABLE.get('srcPortStart'), ', '.join(str_sport), _XML_TABLE.get('srcPortEnd')) # handle destination ports if dports: str_dport = [] for dport in dports: if dport[0] != dport[1]: str_dport.append('%s-%s' % (dport[0], dport[1])) else: str_dport.append('%s' % (dport[0])) service = '%s%s%s%s' % (service, _XML_TABLE.get('destPortStart'), ', '.join(str_dport), _XML_TABLE.get('destPortEnd')) service = '%s%s' % (service, _XML_TABLE.get('serviceEnd')) return service class Nsxv(aclgenerator.ACLGenerator): """Nsxv rendering class. This class takes a policy object and renders the output into a syntax which is understood by nsxv policy. Attributes: pol: policy.Policy object Raises: UnsupportedNsxvAccessListError: Raised when we're give a non named access list. """ _PLATFORM = 'nsxv' _DEFAULT_PROTOCOL = 'ip' SUFFIX = '.nsx' _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration', 'logging', ]) _FILTER_OPTIONS_DICT = {} def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'logging'} supported_sub_tokens.update({'action': {'accept', 'deny', 'reject', 'reject-with-tcp-rst'}}) del supported_sub_tokens['option'] return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.nsxv_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) if len(filter_options) >= 2: filter_name = filter_options[1] # get filter type, section id and applied To self._ParseFilterOptions(filter_options) filter_type = self._FILTER_OPTIONS_DICT['filter_type'] applied_to = self._FILTER_OPTIONS_DICT['applied_to'] term_names = set() new_terms = [] for term in terms: # Check for duplicate terms if term.name in term_names: raise NsxvDuplicateTermError('There are multiple terms named: %s' % term.name) term_names.add(term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue # Get the mapped action value # If there is no mapped action value term is not rendered mapped_action = _ACTION_TABLE.get(str(term.action[0])) if not mapped_action: logging.warning('WARNING: Action %s in Term %s is not valid and ' 'will not be rendered.', term.action, term.name) continue term.name = self.FixTermLength(term.name) if filter_type == 'inet': af = 'inet' term = self.FixHighPorts(term, af=af) if not term: continue new_terms.append(Term(term, filter_type, applied_to, 4)) if filter_type == 'inet6': af = 'inet6' term = self.FixHighPorts(term, af=af) if not term: continue new_terms.append(Term(term, filter_type, applied_to, 6)) if filter_type == 'mixed': if 'icmpv6' not in term.protocol: inet_term = self.FixHighPorts(term, 'inet') if not inet_term: continue new_terms.append(Term(inet_term, filter_type, applied_to, 4)) else: inet6_term = self.FixHighPorts(term, 'inet6') if not inet6_term: continue new_terms.append(Term(inet6_term, filter_type, applied_to, 6)) self.nsxv_policies.append((header, filter_name, [filter_type], new_terms)) def _ParseFilterOptions(self, filter_options): """Parses the target in header for filter type, section_id and applied_to. Args: filter_options: list of remaining target options Returns: A dictionary that contains fields necessary to create the firewall rule. Raises: UnsupportedNsxvAccessListError: Raised when we're give a non named access list. """ # check for filter type if not 2 <= len(filter_options) <= 5: raise UnsupportedNsxvAccessListError( 'Invalid Number of options specified: %d. Required options ' 'are: filter type and section name. Platform: %s' % ( len(filter_options), self._PLATFORM)) # mandatory section_name section_name = filter_options[0] # mandatory filter_type = filter_options[1] # a mixed filter outputs both ipv4 and ipv6 acls in the same output file good_filters = ['inet', 'inet6', 'mixed'] # check if filter type is renderable if filter_type not in good_filters: raise UnsupportedNsxvAccessListError( 'Access list type %s not supported by %s (good types: %s)' % ( filter_type, self._PLATFORM, str(good_filters))) section_id = 0 applied_to = None filter_opt_len = len(filter_options) if filter_opt_len > 2: for index in range(2, filter_opt_len): if index == 2 and filter_options[2] != 'securitygroup': section_id = filter_options[2] continue if filter_options[index] == 'securitygroup': if index + 1 <= filter_opt_len - 1: applied_to = filter_options[index + 1] break else: raise UnsupportedNsxvAccessListError( 'Security Group Id is not provided for %s' % (self._PLATFORM)) self._FILTER_OPTIONS_DICT['section_name'] = section_name self._FILTER_OPTIONS_DICT['filter_type'] = filter_type self._FILTER_OPTIONS_DICT['section_id'] = section_id self._FILTER_OPTIONS_DICT['applied_to'] = applied_to def __str__(self): """Render the output of the Nsxv policy.""" target_header = [] target = [] # add the p4 tags target.append('') for (_, _, _, terms) in self.nsxv_policies: section_name = six.ensure_str(self._FILTER_OPTIONS_DICT['section_name']) # check section id value section_id = self._FILTER_OPTIONS_DICT['section_id'] if not section_id or section_id == 0: logging.warning('WARNING: Section-id is 0. A new Section is created ' 'for %s. If there is any existing section, it ' 'will remain unreferenced and should be removed ' 'manually.', section_name) target.append('
' % (section_name.strip(' \t\n\r'))) else: target.append('
' % (section_id, section_name.strip(' \t\n\r'))) # now add the terms for term in terms: term_str = str(term) if term_str: target.append(term_str) # ensure that the header is always first target = target_header + target target.append('%s' % (_XML_TABLE.get('sectionEnd'))) target.append('\n') target_as_xml = xml.dom.minidom.parseString(''.join(target)) # TODO(robankeny) utf encoding with refactoring after migration to py3 return target_as_xml.toprettyxml(indent=' ') capirca-2.0.9/capirca/lib/openconfig.py000066400000000000000000000207121437377527500200230ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Openconfig yang ACL generator. More information about the Openconfig ACL model schema: http://ops.openconfig.net/branches/models/master/openconfig-acl.html """ import copy import datetime import ipaddress import json import logging import re from typing import Dict, Any from capirca.lib import aclgenerator from capirca.lib import nacaddr from collections import defaultdict import six class Error(Exception): """Generic error class.""" class OcFirewallError(Error): """Raised with problems in formatting for OpenConfig firewall.""" class ExceededAttributeCountError(Error): """Raised when the total attribute count of a policy is above the maximum.""" # Graceful handling of dict heierarchy for OpenConfig JSON. def RecursiveDict(): return defaultdict(RecursiveDict) class Term(aclgenerator.Term): """Creates the term for the OpenConfig firewall.""" ACTION_MAP = {'accept': 'ACCEPT', 'deny': 'DROP', 'reject': 'REJECT'} # OpenConfig ip-protocols always will resolve to an 8-bit int, but these # common names are more convenient in a policy file. _ALLOW_PROTO_NAME = frozenset( ['tcp', 'udp', 'icmp', 'esp', 'ah', 'ipip', 'sctp']) AF_RENAME = { 4: 'ipv4', 6: 'ipv6', } def __init__(self, term, inet_version='inet'): super().__init__(term) self.term = term self.inet_version = inet_version # Combine (flatten) addresses with their exclusions into a resulting # flattened_saddr, flattened_daddr, flattened_addr. self.term.FlattenAll() def __str__(self): """Convert term to a string.""" rules = self.ConvertToDict() json.dumps(rules, indent=2) def ConvertToDict(self): """Convert term to a dictionary. This is used to get a dictionary describing this term which can be output easily as an Openconfig JSON blob. It represents an "acl-entry" message from the OpenConfig ACL schema. Returns: A list of dictionaries that contains all fields necessary to create or update a OpenConfig acl-entry. """ term_dict = RecursiveDict() # Rules will hold all exploded acl-entry dictionaries. rules = [] # Convert the integer to the proper openconfig schema name str, ipv4/ipv6. term_af = self.AF_MAP.get(self.inet_version) family = self.AF_RENAME[term_af] # Action action = self.ACTION_MAP[self.term.action[0]] term_dict['actions'] = {} term_dict['actions']['config'] = {} term_dict['actions']['config']['forwarding-action'] = action # Ballot fatigue handling for 'any'. saddrs = self.term.GetAddressOfVersion('flattened_saddr', term_af) if not saddrs: saddrs = ['any'] daddrs = self.term.GetAddressOfVersion('flattened_daddr', term_af) if not daddrs: daddrs = ['any'] sports = self.term.source_port if not sports: sports = [(0,0)] dports = self.term.destination_port if not dports: dports = [(0,0)] protos = self.term.protocol if not protos: protos = ['none'] ace_dict = copy.deepcopy(term_dict) # Source Addresses for saddr in saddrs: if saddr != 'any': ace_dict[family]['config']['source-address'] = str(saddr) # Destination Addresses for daddr in daddrs: if daddr != 'any': ace_dict[family]['config']['destination-address'] = str(daddr) # Source Port for start, end in sports: # 'any' starts and ends with zero. if not start == end == 0: if start == end: ace_dict[family]['transport']['config']['source-port'] = int(start) else: ace_dict[family]['transport']['config']['source-port'] = '%d..%d' % (start, end) # Destination Port for start, end in dports: if not start == end == 0: if start == end: ace_dict[family]['transport']['config']['destination-port'] = int(start) else: ace_dict[family]['transport']['config']['destination-port'] = '%d..%d' % (start, end) # Protocol for proto in protos: if isinstance(proto, str): if proto != 'none': try: proto_num = self.PROTO_MAP[proto] except KeyError: raise OcFirewallError( 'Protocol %s unknown. Use an integer.', proto) ace_dict[family]['config']['protocol'] = proto_num rules.append(copy.deepcopy(ace_dict)) else: proto_num = proto ace_dict[family]['config']['protocol'] = proto_num # This is the business end of ace explosion. # A dict is a reference type, so deepcopy is atually required. rules.append(copy.deepcopy(ace_dict)) return rules class OpenConfig(aclgenerator.ACLGenerator): """A OpenConfig firewall policy object.""" _PLATFORM = 'openconfig' SUFFIX = '.oacl' _SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed')) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() # Remove unsupported things supported_tokens -= {'platform', 'platform_exclude', 'icmp-type', 'verbatim'} # OpenConfig ACL model only supports these three forwarding actions. supported_sub_tokens['action'] = {'accept', 'deny', 'reject'} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.oc_policies = [] total_rule_count = 0 current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM) filter_name = header.FilterName(self._PLATFORM) # Options are anything after the platform name in the target message of # the policy header, [1:]. # Get the address family if set. address_family = 'inet' for i in self._SUPPORTED_AF: if i in filter_options: address_family = i filter_options.remove(i) for term in terms: if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue # TODO(b/196430344): Add support for options such as # established/rst/first-fragment if term.option: raise OcFirewallError( 'OpenConfig firewall does not support term options.') # Handle mixed for each indvidual term as inet and inet6. # inet/inet6 are treated the same. term_address_families = [] if address_family == 'mixed': term_address_families = ['inet', 'inet6'] else: term_address_families = [address_family] for term_af in term_address_families: t = Term(term, term_af) for rule in t.ConvertToDict(): total_rule_count += 1 self.oc_policies.append(rule) logging.info('Total rule count of policy %s is: %d', filter_name, total_rule_count) def __str__(self): out = '%s\n\n' % (json.dumps(self.oc_policies, indent=2, separators=(six.ensure_str(','), six.ensure_str(': ')), sort_keys=True)) return out capirca-2.0.9/capirca/lib/packetfilter.py000066400000000000000000000466211437377527500203600ustar00rootroot00000000000000# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """PacketFilter (PF) generator.""" import collections import copy import datetime from typing import cast from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr class Error(Exception): """Base error class.""" class DuplicateTermError(Error): """Raised when duplication of term names are detected.""" class DuplicateShortenedTableNameError(Error): """Raised when a duplicate shortened table name is found.""" class UnsupportedProtoError(Error): """Raised when a protocol is not supported.""" class Term(aclgenerator.Term): """Generate PacketFilter policy terms.""" # Validate that term does not contain any fields we do not # support. This prevents us from thinking that our output is # correct in cases where we've omitted fields from term. _PLATFORM = 'packetfilter' _ACTION_TABLE = { 'accept': 'pass', 'deny': 'block drop', 'reject': 'block return', 'next': 'pass', } # Moving the log keyword into an member variable allows subclasses to override # it to support logging options outside of the scope of the capirca policy # spec, e.g., on platform-specific options such as packetfilter's # "log (all, to pflogN)" per-direction. _LOG_TABLE = { '': 'log', 'in': 'log', 'out': 'log', } _QUICK_TABLE = { 'accept': 'quick', 'deny': 'quick', 'reject': 'quick', 'next': '', } _DIRECTION_TABLE = { '': '', 'in': 'in', 'out': 'out', } _TCP_FLAGS_TABLE = { 'syn': 'S', 'ack': 'A', 'fin': 'F', 'rst': 'R', 'urg': 'U', 'psh': 'P', 'all': 'SAFRUP', 'none': 'NONE', } _PROTO_TABLE = { 'icmpv6': 'ipv6-icmp', } _UNSUPPORTED_PROTOS = ['hopopt'] def __init__(self, term, filter_name, stateful=True, af='inet', direction=''): """Setup a new term. Args: term: A policy.Term object to represent in packetfilter. filter_name: The name of the filter chan to attach the term to. stateful: Whether to keep firewall state for the term. af: Which address family ('inet' or 'inet6') to apply the term to. direction: What direction the term applies to ('in', 'out' or both). Raises: aclgenerator.UnsupportedFilterError: Filter is not supported. """ super().__init__(term) self.term = term # term object self.filter = filter_name # actual name of filter self.options = [] self.default_action = 'deny' self.af = af self.stateful = stateful self.direction = direction def __str__(self): """Render config output from this term object.""" # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' ret_str = [] self._SetDefaultAction() # Create a new term ret_str.append('\n# term %s' % self.term.name) comments = aclgenerator.WrapWords(self.term.comment, 80) # append comments to output if comments and comments[0]: for line in comments: ret_str.append('# %s' % str(line)) if str(self.term.action[0]) not in self._ACTION_TABLE: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\n', self.term.name, self.term.action[0], 'action not currently supported.')) if self.direction and str(self.direction) not in self._DIRECTION_TABLE: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\n', self.term.name, self.term.direction, 'direction not currently supported.')) # protocol if self.term.protocol: protocol = self.term.protocol else: protocol = [] # source address term_saddrs = self._CheckAddressAf(self.term.source_address) if not term_saddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.af)) return '' term_saddr = self._GenerateAddrStatement( term_saddrs, self.term.source_address_exclude) # destination address term_daddrs = self._CheckAddressAf(self.term.destination_address) if not term_daddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.af)) return '' term_daddr = self._GenerateAddrStatement( term_daddrs, self.term.destination_address_exclude) # ports source_port = [] destination_port = [] if self.term.source_port: source_port = self._GeneratePortStatement(self.term.source_port) if self.term.destination_port: destination_port = self._GeneratePortStatement(self.term.destination_port) # icmp-type icmp_types = [''] if self.term.icmp_type: if protocol == ['icmp']: af = 'inet' elif protocol == ['icmpv6']: af = 'inet6' else: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'icmp protocol is not defined or not supported.')) icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, protocol, af) # options tcp_flags_set = [] tcp_flags_check = [] for next_opt in [str(x) for x in self.term.option]: for next_flag in self._TCP_FLAGS_TABLE: if next_opt.find(next_flag) == 0: if protocol != ['tcp']: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'tcp flags may only be specified with tcp protocol.')) tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag)) tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag)) # If tcp-established is set, override any of the flags above with the # S/SA flags. Issue an error if flags are specified with 'established'. for opt in [str(x) for x in self.term.option]: if opt == 'established' or opt == 'tcp-established': if tcp_flags_set or tcp_flags_check: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'tcp flags may not be specified with tcp-established.')) # We need to set 'flags A/A' for established regardless of whether or # not we're stateful: # - if we stateful, the default is 'flags S/SA' which prevent writing # rules for reply packets. # - if we're stateless, this is the only way to do it. if not protocol or 'tcp' in protocol: tcp_flags_set.append(self._TCP_FLAGS_TABLE.get('ack')) tcp_flags_check.append(self._TCP_FLAGS_TABLE.get('ack')) # The default behavior of pf is 'keep state flags S/SA'. If we're not # stateless, and if flags have not been specified explicitly via options, # append that here. Note that pf allows appending flags for udp and icmp; # they are just ignored, as long as TCP is in the proto. This lets you # doing things like 'proto { tcp udp icmp } flags S/SA' and have the flags # only applied to the tcp bits that match. However, the policy description # language prohibits setting flags on non-TCP, since it doesn't make sense # on all platforms. if ((not protocol or protocol == ['tcp']) and self.stateful and not tcp_flags_set and not tcp_flags_check): tcp_flags_set.append(self._TCP_FLAGS_TABLE.get('syn')) tcp_flags_check.append(self._TCP_FLAGS_TABLE.get('syn')) tcp_flags_check.append(self._TCP_FLAGS_TABLE.get('ack')) ret_str.extend(self._FormatPart( self.term.action[0], self.direction, self.term.logging, self.term.interface, self.af, protocol, term_saddr, source_port, term_daddr, destination_port, tcp_flags_set, tcp_flags_check, icmp_types, self.options, self.stateful,)) return '\n'.join(str(v) for v in ret_str if v) def _CheckAddressAf(self, addrs): """Verify that the requested address-family matches the address's family.""" if not addrs: return ['any'] if self.af == 'mixed': return addrs af_addrs = [] af = self.NormalizeAddressFamily(self.af) for addr in addrs: if addr.version == af: af_addrs.append(addr) return af_addrs def _FormatPart(self, action, direction, log, interface, af, proto, src_addr, src_port, dst_addr, dst_port, tcp_flags_set, tcp_flags_check, icmp_types, options, stateful): """Format the string which will become a single PF entry.""" line = ['%s' % self._ACTION_TABLE.get(action)] if direction: line.append(direction) quick = self._QUICK_TABLE.get(action) if quick: line.append('%s' % quick) if log: logaction = self._LOG_TABLE.get(direction) if logaction: line.append(logaction) else: line.append('log') if interface: line.append('on %s' % interface) if af != 'mixed': line.append(af) if proto: line.append(self._GenerateProtoStatement(proto)) line.append('from %s' % src_addr) if src_port: line.append('port %s' % src_port) line.append('to %s' % dst_addr) if dst_port: line.append('port %s' % dst_port) if tcp_flags_set and tcp_flags_check: line.append('flags') line.append('%s/%s' % (''.join(tcp_flags_set), ''.join(tcp_flags_check))) if 'icmp' in proto and icmp_types: type_strs = [str(icmp_type) for icmp_type in icmp_types] type_strs = ', '.join(type_strs) if type_strs: line.append('icmp-type { %s }' % type_strs) if 'icmpv6' in proto and icmp_types: type_strs = [str(icmp_type) for icmp_type in icmp_types] type_strs = ', '.join(type_strs) if type_strs: line.append('icmp6-type { %s }' % type_strs) if options: line.extend(options) if not stateful: line.append('no state') elif action in ['accept', 'next']: line.append('keep state') return [' '.join(line)] def _GenerateProtoStatement(self, protocols): proto = '' if protocols: protocols = copy.deepcopy(protocols) for i, proto in enumerate(protocols): if proto in self._UNSUPPORTED_PROTOS: raise UnsupportedProtoError try: protocols[i] = self._PROTO_TABLE[proto] except KeyError: pass proto = 'proto { %s }' % ' '.join(protocols) return proto def _GenerateAddrStatement(self, addrs, exclude_addrs): addresses = set() if addrs != ['any']: parent_token_set = set() for addr in addrs: addr = cast(nacaddr.IPType, addr) parent_token_set.add(addr.parent_token) for token in parent_token_set: addresses.add('<%s>' % token[:31]) else: addresses.add('any') if exclude_addrs != ['any']: parent_token_set = set() for addr in exclude_addrs: addr = cast(nacaddr.IPType, addr) parent_token_set.add(addr.parent_token) for token in parent_token_set: addresses.add('!<%s>' % token[:31]) return '{ %s }' % ', '.join(sorted(addresses)) def _GeneratePortStatement(self, ports): port_list = [] for port_tuple in ports: if port_tuple[0] == port_tuple[1]: port_list.append(str(port_tuple[0])) else: port_list.append('%s:%s' % (port_tuple[0], port_tuple[1])) return '{ %s }' % ( ' '.join(list(collections.OrderedDict.fromkeys(port_list)))) def _SetDefaultAction(self): """If term does not specify action, use filter default action.""" if not self.term.action: self.term.action[0].value = self.default_action class PacketFilter(aclgenerator.ACLGenerator): """Generates filters and terms from provided policy object.""" _DEF_MAX_LENGTH = 31 _PLATFORM = 'packetfilter' _DEFAULT_PROTOCOL = 'all' SUFFIX = '.pf' _TERM = Term def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'logging', 'destination_interface', 'source_interface'} supported_sub_tokens.update({ 'action': {'accept', 'deny', 'reject', 'next'}, 'option': { 'established', 'tcp-established', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all'}, }) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.pf_policies = [] self.address_book = {} self.def_short_to_long = {} current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) good_afs = ['inet', 'inet6', 'mixed'] good_options = ['in', 'out', 'nostate'] all_protocols_stateful = True for header, terms in pol.filters: filter_type = None if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM)[1:] filter_name = header.FilterName(self._PLATFORM) direction = '' # ensure all options after the filter name are expected for opt in filter_options: if opt not in good_afs + good_options: raise aclgenerator.UnsupportedTargetOptionError('%s %s %s %s' % ( '\nUnsupported option found in', self._PLATFORM, 'target definition:', opt)) # pf will automatically add 'keep state flags S/SA' to all TCP connections # by default. if 'nostate' in filter_options: all_protocols_stateful = False if 'in' in filter_options: direction = 'in' elif 'out' in filter_options: direction = 'out' # Check for matching af for address_family in good_afs: if address_family in filter_options: # should not specify more than one AF in options if filter_type is not None: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\nMay only specify one of', good_afs, 'in filter options:', filter_options)) filter_type = address_family if filter_type is None: filter_type = 'inet' # add the terms new_terms = [] term_names = set() for term in terms: term.name = self.FixTermLength(term.name) if term.name in term_names: raise DuplicateTermError( 'You have a duplicate term: %s' % term.name) term_names.add(term.name) for source_addr in term.source_address: src_token = source_addr.parent_token[:self._DEF_MAX_LENGTH] if (src_token in self.def_short_to_long and self.def_short_to_long[src_token] != source_addr.parent_token): raise DuplicateShortenedTableNameError( 'There is a shortened name conflict between names %s and %s ' '(different named objects would conflict when shortened to %s)' % (self.def_short_to_long[src_token], source_addr.parent_token, src_token)) else: self.def_short_to_long[src_token] = source_addr.parent_token if src_token not in self.address_book: self.address_book[src_token] = set([source_addr]) else: self.address_book[src_token].add(source_addr) for dest_addr in term.destination_address: dst_token = dest_addr.parent_token[:self._DEF_MAX_LENGTH] if (dst_token in self.def_short_to_long and self.def_short_to_long[dst_token] != dest_addr.parent_token): raise DuplicateShortenedTableNameError( 'There is a shortened name conflict between names %s and %s ' '(different named objects would conflict when shortened to %s)' %(self.def_short_to_long[dst_token], dest_addr.parent_token, dst_token)) else: self.def_short_to_long[dst_token] = dest_addr.parent_token if dst_token not in self.address_book: self.address_book[dst_token] = set([dest_addr]) else: self.address_book[dst_token].add(dest_addr) if not term: continue if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if term.destination_interface and term.source_interface: raise Error("packetfilter only supports destination interface XOR source interface per term") term_direction = direction if term.destination_interface: term_direction = 'out' term.interface = term.destination_interface elif term.source_interface: term_direction = 'in' term.interface = term.source_interface else: term.interface = None new_terms.append(self._TERM(term, filter_name, all_protocols_stateful, filter_type, term_direction)) self.pf_policies.append((header, filter_name, filter_type, new_terms)) def __str__(self): """Render the output of the PF policy into config.""" target = [] pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:]) # Create address table. for name in sorted(self.address_book): v4 = sorted([x for x in self.address_book[name] if x.version == 4]) v6 = sorted([x for x in self.address_book[name] if x.version == 6]) entries = ',\\\n'.join(str(x) for x in v4 + v6) target.append('table <%s> {%s}' % (name, entries)) # pylint: disable=unused-variable for (header, filter_name, filter_type, terms) in self.pf_policies: # Add comments for this filter target.append('# %s %s Policy' % (pretty_platform, header.FilterName(self._PLATFORM))) # reformat long text comments, if needed comments = aclgenerator.WrapWords(header.comment, 70) if comments and comments[0]: for line in comments: target.append('# %s' % line) target.append('#') # add the p4 tags target.extend(aclgenerator.AddRepositoryTags('# ')) target.append('# ' + filter_type) # add the terms for term in terms: term_str = str(term) if term_str: target.append(term_str) target.append('') return '\n'.join(target) capirca-2.0.9/capirca/lib/paloaltofw.py000066400000000000000000001213351437377527500200470ustar00rootroot00000000000000# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Palo Alto Firewall generator.""" import collections import copy import datetime import logging import re from xml.dom import minidom import xml.etree.ElementTree as etree from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import policy class Error(Exception): """generic error class.""" class UnsupportedFilterError(Error): pass class UnsupportedHeaderError(Error): pass class PaloAltoFWDuplicateTermError(Error): pass class PaloAltoFWUnsupportedProtocolError(Error): pass class PaloAltoFWVerbatimError(Error): pass class PaloAltoFWOptionError(Error): pass class PaloAltoFWDuplicateServiceError(Error): pass class PaloAltoFWNameTooLongError(Error): pass class PaloAltoFWBadIcmpTypeError(Error): pass class ServiceMap: """Manages service names across a single policy instance.""" def __init__(self): self.entries = {} def get_service_name(self, term_name, src_ports, ports, protocol, prefix=None): """Returns service name based on the provided ports and protocol.""" if (src_ports, ports, protocol) in self.entries: return self.entries[(src_ports, ports, protocol)]["name"] if prefix is None: prefix = "service-" service_name = "%s%s-%s" % (prefix, term_name, protocol) if len(service_name) > 63: raise PaloAltoFWNameTooLongError( "Service name must be 63 characters max: %s" % service_name) for _, service in self.entries.items(): if service["name"] == service_name: raise PaloAltoFWDuplicateServiceError( "You have a duplicate service. A service named %s already exists." % service_name) self.entries[(src_ports, ports, protocol)] = {"name": service_name} return service_name class Rule: """Extend the Term() class for PaloAlto Firewall Rules.""" def __init__(self, from_zone, to_zone, term, service_map): # Palo Alto Firewall rule keys MAX_ZONE_LENGTH = 31 if not from_zone or not to_zone: raise PaloAltoFWOptionError("Source or destination zone is empty.") if len(from_zone) > MAX_ZONE_LENGTH: x = "Source zone must be %d characters max: %s" % (MAX_ZONE_LENGTH, from_zone) raise PaloAltoFWNameTooLongError(x) if len(to_zone) > MAX_ZONE_LENGTH: x = "Destination zone must be %d characters max: %s" % (MAX_ZONE_LENGTH, to_zone) raise PaloAltoFWNameTooLongError(x) self.options = [] while term is not None: x, term = self.TermToOptions(from_zone, to_zone, term, service_map) self.options.append(x) @staticmethod def TermToOptions(from_zone, to_zone, term, service_map): """Convert term to Palo Alto security rule options.""" options = {} options["from_zone"] = [from_zone] options["to_zone"] = [to_zone] options["description"] = [] options["source"] = [] options["destination"] = [] options["application"] = [] options["service"] = [] options["logging"] = [] ACTIONS = { "accept": "allow", "deny": "deny", "reject": "reset-client", "reject-with-tcp-rst": "reset-client", } new_term = None def pan_ports(ports): x = [] for tup in ports: if len(tup) > 1 and tup[0] != tup[1]: x.append(str(tup[0]) + "-" + str(tup[1])) else: x.append(str(tup[0])) return tuple(x) # COMMENT if term.comment: options["description"] = term.comment # LOGGING if term.logging: for item in term.logging: if item.value in ["disable"]: options["logging"] = ["disable"] break elif item.value in ["log-both"]: options["logging"].append("log-start") options["logging"].append("log-end") elif item.value in ["True", "true", "syslog", "local"]: options["logging"].append("log-end") # SOURCE-ADDRESS if term.source_address: saddr_check = set() for saddr in term.source_address: saddr_check.add(saddr.parent_token) saddr_check = sorted(saddr_check) for addr in saddr_check: options["source"].append(str(addr)) # missing source handled during XML document generation # DESTINATION-ADDRESS if term.destination_address: daddr_check = set() for daddr in term.destination_address: daddr_check.add(daddr.parent_token) daddr_check = sorted(daddr_check) for addr in daddr_check: options["destination"].append(str(addr)) # missing destination handled during XML document generation # ACTION if term.action: options["action"] = ACTIONS[term.action[0]] if term.option: options["option"] = term.option if term.pan_application: for pan_app in term.pan_application: options["application"].append(pan_app) if term.source_port or term.destination_port: src_ports = pan_ports(term.source_port) if term.destination_port: ports = pan_ports(term.destination_port) else: ports = pan_ports([("0", "65535")]) # check to see if this service already exists for p in term.protocol: service_name = service_map.get_service_name(term.name, src_ports, ports, p) if service_name not in options["service"]: options["service"].append(service_name) elif "tcp" in term.protocol or "udp" in term.protocol: services = {"tcp", "udp"} & set(term.protocol) others = set(term.protocol) - services if others: logging.info( "INFO: Term %s in policy %s>%s contains port-less %s " "with non-port protocol(s). Moving %s to a new term.", term.name, from_zone, to_zone, ', '.join(list(services)), ', '.join(list(others))) new_term = copy.deepcopy(term) new_term.protocol = list(others) term.protocol = list(services) options["application"] = [] for p in term.protocol: ports = pan_ports([("0", "65535")]) # use prefix "" to avoid service name clash with term named "any" service_name = service_map.get_service_name("any", (), ports, p, "") if service_name not in options["service"]: options["service"].append(service_name) if term.protocol: # Add certain protocol names as application in the application list # if missing. for proto_name in term.protocol: if (proto_name in ["igmp", "sctp", "gre"] and proto_name not in options["application"]): options["application"].append(proto_name) elif proto_name in ("ah", "esp"): ipsec_app_proto = "ipsec-%s" % proto_name if ipsec_app_proto not in options["application"]: options["application"].append(ipsec_app_proto) return options, new_term class PaloAltoFW(aclgenerator.ACLGenerator): """PaloAltoFW rendering class.""" _PLATFORM = "paloalto" SUFFIX = ".xml" _SUPPORTED_AF = set(("inet", "inet6", "mixed")) _AF_MAP = {"inet": (4,), "inet6": (6,), "mixed": (4, 6)} _TERM_MAX_LENGTH = 63 _APPLICATION_NAME_MAX_LENGTH = 31 _TERM_PREFIX_LENGTH = 24 _SUPPORTED_PROTO_NAMES = [ "tcp", "udp", "icmp", "icmpv6", "sctp", "igmp", "gre", "ah", "esp", ] _MAX_RULE_DESCRIPTION_LENGTH = 1024 _MAX_TAG_COMMENTS_LENGTH = 1023 _TAG_NAME_FORMAT = "{from_zone}_{to_zone}_policy-comment-{num}" _MAX_RULE_SRC_DST_MEMBERS = 65535 _ABBREVIATION_TABLE = [ # Service abbreviations first. ("experiment", "EXP"), ("wifi-radius", "W-R"), ("customer", "CUST"), ("server", "SRV"), # Next, common routing terms ("global", "GBL"), ("google", "GOOG"), ("service", "SVC"), ("router", "RTR"), ("internal", "INT"), ("external", "EXT"), ("transit", "TRNS"), ("management", "MGMT"), # State info ("established", "EST"), ("unreachable", "UNR"), ("fragment", "FRAG"), ("accept", "ACC"), ("discard", "DISC"), ("reject", "REJ"), ("replies", "RPLS"), ("reply", "RPL"), ("request", "REQ"), # ICMP types specific ("inverse", "INV"), ("neighbor", "NBR"), ("discovery", "DSCVR"), ("advertisement", "ADV"), ("solicitation", "SOL"), ("multicast", "MCAST"), ("certification", "CERT"), ("listener", "LSNR"), ("address", "ADDR"), ] INDENT = " " def __init__(self, pol, exp_info): self.pafw_policies = [] self.addressbook = collections.OrderedDict() self.applications = [] self.application_refs = {} self.application_groups = [] self.pan_applications = [] self.ports = [] self.from_zone = "" self.to_zone = "" self.policy_name = "" self.config = None self.service_map = ServiceMap() super().__init__(pol, exp_info) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens = { "action", "comment", "destination_address", "destination_address_exclude", "destination_port", "expiration", "icmp_type", "logging", "name", "option", "owner", "platform", "platform_exclude", "protocol", "source_address", "source_address_exclude", "source_port", "stateless_reply", "timeout", "pan_application", "translated", } supported_sub_tokens.update({ "action": {"accept", "deny", "reject", "reject-with-tcp-rst"}, "option": {"established", "tcp-established"}, }) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): """Transform a policy object into a PaloAltoFW object. Args: pol: policy.Policy object exp_info: print a info message when a term is set to expire in that many weeks Raises: UnsupportedFilterError: An unsupported filter was specified UnsupportedHeaderError: A header option exists that is not understood/usable PaloAltoFWDuplicateTermError: Two terms were found with same name in same filter PaloAltoFWBadIcmpTypeError: The referenced ICMP type is not supported by the policy term. PaloAltoFWUnsupportedProtocolError: The term contains unsupporter protocol name. """ current_date = datetime.date.today() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) first_addr_obj = None for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue # The filter_options is a list of options from header, e.g. # ['from-zone', 'internal', 'to-zone', 'external'] filter_options = header.FilterOptions(self._PLATFORM) if (len(filter_options) < 4 or filter_options[0] != "from-zone" or filter_options[2] != "to-zone"): raise UnsupportedFilterError( "Palo Alto Firewall filter arguments must specify from-zone and " "to-zone.") self.from_zone = filter_options[1] self.to_zone = filter_options[3] # The filter_type values are either inet, inet6, or mixed. Later, the # code analyzes source and destination IP addresses and determines whether # it is an appropriate type for the filter_type value. if len(filter_options) > 4: filter_type = filter_options[4] else: filter_type = "inet" if filter_type not in self._SUPPORTED_AF: raise UnsupportedHeaderError( 'Palo Alto Firewall Generator invalid address family option: "%s"' '; expect {%s}' % (filter_type, '|'.join(self._SUPPORTED_AF))) valid_addr_obj = ["addr-obj", "no-addr-obj"] if len(filter_options) > 5 and filter_options[5] not in valid_addr_obj: raise UnsupportedHeaderError( 'Palo Alto Firewall Generator invalid address objects option: "%s"' '; expect {%s}' % (filter_options[5], '|'.join(valid_addr_obj))) no_addr_obj = True if (len(filter_options) > 5 and filter_options[5] == "no-addr-obj") else False unique_term_prefixes = True if ( len(filter_options) > 6 and filter_options[6] == "unique-term-prefixes") else False if first_addr_obj is None: first_addr_obj = no_addr_obj if first_addr_obj != no_addr_obj: raise UnsupportedHeaderError( "Cannot mix addr-obj and no-addr-obj header option in " "a single policy file") term_dup_check = set() new_terms = [] for term in terms: if term.stateless_reply: logging.warning( "WARNING: Term %s in policy %s>%s is a stateless reply " "term and will not be rendered.", term.name, self.from_zone, self.to_zone) continue if "established" in term.option: logging.warning( "WARNING: Term %s in policy %s>%s is a established " "term and will not be rendered.", term.name, self.from_zone, self.to_zone) continue if "tcp-established" in term.option: logging.warning( "WARNING: Term %s in policy %s>%s is a tcp-established " "term and will not be rendered.", term.name, self.from_zone, self.to_zone) continue # Verify platform specific terms. Skip whole term if platform does not # match. if term.platform: if self._PLATFORM not in term.platform: continue if term.platform_exclude: if self._PLATFORM in term.platform_exclude: continue if unique_term_prefixes: # Prefix hash of from_zone and to_zone to the term name to get mostly # unique names across different policies. # This is not crytopgraphically guaranteed. # This modified term name should not exceed _TERM_MAX_LENGTH. term.name = self.HexDigest(self.from_zone + self.to_zone, self._TERM_PREFIX_LENGTH) + "-" + term.name term.name = self.FixTermLength(term.name) if term.name in term_dup_check: raise PaloAltoFWDuplicateTermError("You have a duplicate term: %s" % term.name) term_dup_check.add(term.name) services = {"tcp", "udp"} & set(term.protocol) others = set(term.protocol) - services if others and term.pan_application: raise UnsupportedFilterError( "Term %s contains non tcp, udp protocols with pan-application: %s: %s" "\npan-application can only be used with protocols tcp, udp" % (term.name, ', '.join(term.pan_application), ', '.join(term.protocol))) if term.expiration: if term.expiration <= exp_info_date: logging.info( "INFO: Term %s in policy %s>%s expires " "in less than two weeks.", term.name, self.from_zone, self.to_zone) if term.expiration <= current_date: logging.warning( "WARNING: Term %s in policy %s>%s is expired and " "will not be rendered.", term.name, self.from_zone, self.to_zone) continue for i in term.source_address_exclude: term.source_address = nacaddr.RemoveAddressFromList( term.source_address, i) for i in term.destination_address_exclude: term.destination_address = nacaddr.RemoveAddressFromList( term.destination_address, i) # Count the number of occurencies of a particular version of the # address family, i.e. v4/v6 in source and destination IP addresses. afc = { 4: { "src": 0, "dst": 0 }, 6: { "src": 0, "dst": 0 }, } # Determine the address families in the source and destination # addresses references in the term. Next, determine IPv4 and IPv6 # traffic flow patterns. exclude_address_family = [] flows = [] src_any = False dst_any = False if not term.source_address: src_any = True if not term.destination_address: dst_any = True for addr in term.source_address: afc[addr.version]["src"] += 1 for addr in term.destination_address: afc[addr.version]["dst"] += 1 for v in [4, 6]: if src_any and dst_any: flows.append("ip%d-ip%d" % (v, v)) continue if (afc[v]["src"] == 0 and not src_any) and (afc[v]["dst"] == 0 and not dst_any): continue if (afc[v]["src"] > 0 or src_any) and (afc[v]["dst"] > 0 or dst_any): flows.append("ip%d-ip%d" % (v, v)) continue if (afc[v]["src"] > 0 or src_any) and afc[v]["dst"] == 0: flows.append("ip%d-src-only" % v) flows.append("ip%d-only" % v) continue if afc[v]["src"] == 0 and (afc[v]["dst"] > 0 or dst_any): flows.append("ip%d-dst-only" % v) flows.append("ip%d-only" % v) if filter_type == "inet": if "icmpv6" in term.protocol: logging.warning( "WARNING: Term %s in policy %s>%s references ICMPv6 protocol, " "term will not be rendered.", term.name, self.from_zone, self.to_zone) continue if "ip4-ip4" not in flows: logging.warning( "WARNING: Term %s in policy %s>%s has one or more invalid " "src-dest combinations %s, term will not be rendered.", term.name, self.from_zone, self.to_zone, flows) continue # exclude IPv6 addresses exclude_address_family.append(6) elif filter_type == "inet6": if "icmp" in term.protocol: logging.warning( "WARNING: Term %s in policy %s>%s references ICMP protocol, " "term and will not be rendered.", term.name, self.from_zone, self.to_zone) continue if "ip6-ip6" not in flows: logging.warning( "WARNING: Term %s in policy %s>%s has one or more invalid " "src-dest combinations %s, term will not be rendered.", term.name, self.from_zone, self.to_zone, flows) continue exclude_address_family.append(4) elif filter_type == "mixed": if "ip4-ip4" in flows and "ip6-ip6" not in flows: exclude_address_family.append(6) pass elif "ip6-ip6" in flows and "ip4-ip4" not in flows: exclude_address_family.append(4) pass elif "ip4-ip4" in flows and "ip6-ip6" in flows: pass elif "ip4-only" in flows and "ip6-only" in flows: logging.warning( "WARNING: Term %s in policy %s>%s has source and destinations " "of different address families %s, term will not be " "rendered.", term.name, self.from_zone, self.to_zone, filter(lambda p: re.search(p, "(src|dst)-only"), flows)) continue else: logging.warning( "WARNING: Term %s in policy %s>%s has invalid src-dest " "combinations %s, the term will be rendered without them.", term.name, self.from_zone, self.to_zone, filter(lambda p: re.search(p, "(src|dst)-only"), flows)) if "ip4-ip4" in flows: exclude_address_family.append(6) else: exclude_address_family.append(4) # Substitute large IPv6 ranges (/1, /2) with equivalent subnets. # Do this separately from address book building, or during policy # translation to account for both address-objects and no-address-objects if term.source_address: saddr_split = [] for saddr in term.source_address: if saddr.version == 6 and 0 < saddr.prefixlen < 3: for saddr2 in saddr.subnets(new_prefix=3): saddr2.parent_token = saddr.parent_token saddr_split.append(saddr2) else: saddr_split.append(saddr) term.source_address = saddr_split if term.destination_address: daddr_split = [] for daddr in term.destination_address: if daddr.version == 6 and 0 < daddr.prefixlen < 3: for daddr2 in daddr.subnets(new_prefix=3): daddr2.parent_token = daddr.parent_token daddr_split.append(daddr2) else: daddr_split.append(daddr) term.destination_address = daddr_split # Build address book for the addresses referenced in the term. for addr in term.source_address: if addr.version in exclude_address_family: continue self._BuildAddressBook(self.from_zone, addr) for addr in term.destination_address: if addr.version in exclude_address_family: continue self._BuildAddressBook(self.to_zone, addr) # Handle ICMP/ICMPv6 terms. if term.icmp_type and ("icmp" not in term.protocol and "icmpv6" not in term.protocol): raise UnsupportedFilterError( "Palo Alto Firewall filter must have ICMP or ICMPv6 protocol " + "specified when using icmp_type keyword") for icmp_version in ["icmp", "icmpv6"]: if ("icmp" not in term.protocol and "icmpv6" not in term.protocol): # the protocol is not ICMP or ICMPv6 break if icmp_version not in term.protocol: # skip if this icmp_version isn't in the term protocol. continue if icmp_version == "icmp" and "ip4-ip4" not in flows: # skip if there is no ip4 to ipv4 communication continue if icmp_version == "icmpv6" and "ip6-ip6" not in flows: # skip if there is no ip4 to ipv4 communication continue if icmp_version == "icmp": if filter_type == "inet6": continue if not term.icmp_type: term.pan_application.append("icmp") continue icmp_type_keyword = "ident-by-icmp-type" # The risk level 4 is the default PANOS' risk level for ICMP. risk_level = 4 else: if filter_type == "inet": continue if not term.icmp_type: term.pan_application.append("ipv6-icmp") continue icmp_type_keyword = "ident-by-icmp6-type" # The risk level 2 is the default PANOS' risk level for ICMPv6. risk_level = 2 # The term contains ICMP types for term_icmp_type_name in term.icmp_type: if icmp_version == "icmp": icmp_app_name = "icmp-%s" % term_icmp_type_name # This is to abbreviate the Application name where possible. # The limit is defined by _APPLICATION_NAME_MAX_LENGTH = 31. if len(icmp_app_name) > self._APPLICATION_NAME_MAX_LENGTH: icmp_app_name = self.FixTermLength( icmp_app_name, True, True, self._APPLICATION_NAME_MAX_LENGTH) if term_icmp_type_name not in policy.Term.ICMP_TYPE[4]: raise PaloAltoFWBadIcmpTypeError( "term with bad icmp type: %s, icmp_type: %s" % (term.name, term_icmp_type_name)) term_icmp_type = policy.Term.ICMP_TYPE[4][term_icmp_type_name] else: icmp_app_name = "icmp6-%s" % term_icmp_type_name # This is to abbreviate the Application name where possible. # The limit is defined by _APPLICATION_NAME_MAX_LENGTH = 31. if len(icmp_app_name) > self._APPLICATION_NAME_MAX_LENGTH: icmp_app_name = self.FixTermLength( icmp_app_name, True, True, self._APPLICATION_NAME_MAX_LENGTH) if term_icmp_type_name not in policy.Term.ICMP_TYPE[6]: raise PaloAltoFWBadIcmpTypeError( "term with bad icmp type: %s, icmp_type: %s" % (term.name, term_icmp_type_name)) term_icmp_type = policy.Term.ICMP_TYPE[6][term_icmp_type_name] if icmp_app_name not in self.application_refs: # the custom icmp application doesn't already exist app_entry = { "category": "networking", "subcategory": "ip-protocol", "technology": "network-protocol", "description": icmp_app_name, "default": { icmp_type_keyword: "%d" % term_icmp_type, }, "risk": "%d" % risk_level, } self.application_refs[icmp_app_name] = app_entry self.applications.append(icmp_app_name) # always add the ICMP application to the term, it either already # existed due to a previous policy, or it was created in the # previous loop. if icmp_app_name not in term.pan_application: term.pan_application.append(icmp_app_name) # Filter out unsupported protocols for proto_name in term.protocol: if proto_name in self._SUPPORTED_PROTO_NAMES: continue raise PaloAltoFWUnsupportedProtocolError( "protocol %s is not supported" % proto_name) if term.icmp_type: if set(term.protocol) == {'icmp', 'icmpv6'}: raise UnsupportedFilterError('%s %s' % ( 'icmp-type specified for both icmp and icmpv6 protocols' ' in a single term:', term.name)) if term.protocol != ['icmp'] and term.protocol != ['icmpv6']: raise UnsupportedFilterError('%s %s' % ( 'icmp-type specified for non-icmp protocols in term:', term.name)) new_terms.append(term) # Create a ruleset. It contains the rules for the terms defined under # a single header on a particular platform. ruleset = {} for term in new_terms: current_rule = Rule(self.from_zone, self.to_zone, term, self.service_map) if len(current_rule.options) > 1: for i, v in enumerate(current_rule.options): name = "%s-%d" % (term.name, i+1) name = self.FixTermLength(name) ruleset[name] = v else: ruleset[term.name] = current_rule.options[0] self.pafw_policies.append((header, ruleset, filter_options)) def _BuildAddressBook(self, zone, address): """Create the address book configuration entries. Args: zone: the zone these objects will reside in address: a naming library address object """ if zone not in self.addressbook: self.addressbook[zone] = collections.OrderedDict() if address.parent_token not in self.addressbook[zone]: # Store addresses as keys to enable quick lookups. # key: address, value: name self.addressbook[zone][address.parent_token] = collections.OrderedDict() name = address.parent_token if address in self.addressbook[zone][name]: return counter = len(self.addressbook[zone][address.parent_token]) name = "%s_%s" % (name, str(counter)) self.addressbook[zone][address.parent_token][address] = name def _SortAddressBookNumCheck(self, item): """Used to give a natural order to the list of acl entries. Args: item: string of the address book entry name Returns: returns the characters and number """ item_list = item.split("_") num = item_list.pop(-1) if isinstance(item_list[-1], int): set_number = item_list.pop(-1) num = int(set_number) * 1000 + int(num) alpha = "_".join(item_list) if num: return (alpha, int(num)) return (alpha, 0) def _BuildPort(self, ports): """Transform specified ports into list and ranges. Args: ports: a policy terms list of ports Returns: port_list: list of ports and port ranges """ port_list = [] for i in ports: if i[0] == i[1]: port_list.append(str(i[0])) else: port_list.append("%s-%s" % (str(i[0]), str(i[1]))) return port_list def __str__(self): """Render the output of the PaloAltoFirewall policy into config.""" # IPv4 addresses are normalized into the policy as IPv6 addresses # using ::. The 0.0.0.0-255.255.255.255 range is # equivalent to ::0/96 which will only match IPv4 addresses; when # negated it will match only IPv6 addresses. # Used for address families inet and inet6 when source and # destination address are not specified (any any). ANY_IPV4_RANGE = "0.0.0.0-255.255.255.255" add_any_ipv4 = False address_book_names_dict = {} address_book_groups_dict = {} for zone in self.addressbook: # building individual addresses dictionary groups = sorted(self.addressbook[zone]) for group in groups: for address, name in self.addressbook[zone][group].items(): if name in address_book_names_dict: if address_book_names_dict[name].supernet_of(address): continue address_book_names_dict[name] = address # building individual address-group dictionary for nested_group in groups: group_names = [] for address, name in self.addressbook[zone][nested_group].items(): group_names.append(name) address_book_groups_dict[nested_group] = group_names # sort address books and address sets address_book_groups_dict = collections.OrderedDict( sorted(address_book_groups_dict.items())) address_book_keys = sorted( list(address_book_names_dict.keys()), key=self._SortAddressBookNumCheck) # INITAL CONFIG config = etree.Element("config", { "version": "8.1.0", "urldb": "paloaltonetworks" }) devices = etree.SubElement(config, "devices") device_entry = etree.SubElement(devices, "entry", {"name": "localhost.localdomain"}) vsys = etree.SubElement(device_entry, "vsys") vsys_entry = etree.SubElement(vsys, "entry", {"name": "vsys1"}) # APPLICATION app_entries = etree.Element("application") for app_name in self.applications: if app_name not in self.application_refs: # this is not a custom application. continue app = self.application_refs[app_name] app_entry = etree.SubElement(app_entries, "entry", {"name": app_name}) for k in self.application_refs[app_name]: if isinstance(app[k], (str)): etree.SubElement(app_entry, k).text = app[k] elif isinstance(app[k], (dict)): if k == "default": default_props = etree.SubElement(app_entry, "default") else: continue for prop in app[k]: if k == "default" and prop in [ "ident-by-icmp-type", "ident-by-icmp6-type" ]: icmp_type_props = etree.SubElement(default_props, prop) etree.SubElement(icmp_type_props, "type").text = app[k][prop] else: pass vsys_entry.append(app_entries) # APPLICATION GROUPS etree.SubElement(vsys_entry, "application-group") # SERVICES service = etree.SubElement(vsys_entry, "service") for k, v in self.service_map.entries.items(): entry = etree.SubElement(service, "entry", {"name": v["name"]}) proto0 = etree.SubElement(entry, "protocol") proto = etree.SubElement(proto0, k[2]) # destination port port = etree.SubElement(proto, "port") tup = str(k[1])[1:-1] if tup[-1] == ",": tup = tup[:-1] port.text = tup.replace("'", "").replace(", ", ",") # source port if len(k[0]): sport = etree.SubElement(proto, "source-port") tup = str(k[0])[1:-1] if tup[-1] == ",": tup = tup[:-1] sport.text = tup.replace("'", "").replace(", ", ",") # RULES rulebase = etree.SubElement(vsys_entry, "rulebase") security = etree.SubElement(rulebase, "security") rules = etree.SubElement(security, "rules") tag = etree.Element("tag") tag_num = 0 # pylint: disable=unused-variable for (header, pa_rules, filter_options) in self.pafw_policies: tag_name = None if header.comment: comment = " ".join(header.comment).strip() if comment: tag_num += 1 # max tag len 127, max zone len 31 tag_name = self._TAG_NAME_FORMAT.format( from_zone=filter_options[1], to_zone=filter_options[3], num=tag_num) tag_entry = etree.SubElement(tag, "entry", {"name": tag_name}) comments = etree.SubElement(tag_entry, "comments") if len(comment) > self._MAX_TAG_COMMENTS_LENGTH: logging.warning( "WARNING: tag %s comments exceeds maximum " "length %d, truncated.", tag_name, self._MAX_TAG_COMMENTS_LENGTH) comments.text = comment[:self._MAX_TAG_COMMENTS_LENGTH] no_addr_obj = True if (len(filter_options) > 5 and filter_options[5] == "no-addr-obj") else False for name, options in pa_rules.items(): entry = etree.SubElement(rules, "entry", {"name": name}) if options["description"]: descr = etree.SubElement(entry, "description") x = " ".join(options["description"]) if len(x) > self._MAX_RULE_DESCRIPTION_LENGTH: logging.warning( "WARNING: rule %s description exceeds maximum " "length %d, truncated.", name, self._MAX_RULE_DESCRIPTION_LENGTH) descr.text = x[:self._MAX_RULE_DESCRIPTION_LENGTH] to = etree.SubElement(entry, "to") for x in options["to_zone"]: member = etree.SubElement(to, "member") member.text = x from_ = etree.SubElement(entry, "from") for x in options["from_zone"]: member = etree.SubElement(from_, "member") member.text = x af = filter_options[4] if len(filter_options) > 4 else "inet" max_src_dst = 0 source = etree.SubElement(entry, "source") if not options["source"]: member = etree.SubElement(source, "member") if not options["destination"] and af != "mixed": # only inet and inet6 use the any-ipv4 object member.text = "any-ipv4" add_any_ipv4 = True else: member.text = "any" else: for x in options["source"]: if no_addr_obj: for group in address_book_groups_dict[x]: member = etree.SubElement(source, "member") member.text = str(address_book_names_dict[group]) max_src_dst += 1 else: member = etree.SubElement(source, "member") member.text = x max_src_dst += 1 if max_src_dst > self._MAX_RULE_SRC_DST_MEMBERS: raise UnsupportedFilterError( "term %s source members exceeds maximum of %d: %d" % (name, self._MAX_RULE_SRC_DST_MEMBERS, max_src_dst)) max_src_dst = 0 dest = etree.SubElement(entry, "destination") if not options["destination"]: member = etree.SubElement(dest, "member") if options["source"]: member.text = "any" else: if af != "mixed": # only inet and inet6 use the any-ipv4 object member.text = "any-ipv4" if af == "inet6": for x in ["negate-source", "negate-destination"]: negate = etree.SubElement(entry, x) negate.text = "yes" else: member.text = "any" else: for x in options["destination"]: if no_addr_obj: for group in address_book_groups_dict[x]: member = etree.SubElement(dest, "member") member.text = str(address_book_names_dict[group]) max_src_dst += 1 else: member = etree.SubElement(dest, "member") member.text = x max_src_dst += 1 if max_src_dst > self._MAX_RULE_SRC_DST_MEMBERS: raise UnsupportedFilterError( "term %s destination members exceeds maximum of %d: %d" % (name, self._MAX_RULE_SRC_DST_MEMBERS, max_src_dst)) # service section of a policy rule. service = etree.SubElement(entry, "service") if not options["service"] and not options["application"]: member = etree.SubElement(service, "member") member.text = "any" elif not options["service"] and options["application"]: # Adds custom applications. member = etree.SubElement(service, "member") member.text = "application-default" else: # Adds services. for x in options["service"]: member = etree.SubElement(service, "member") member.text = x # ACTION action = etree.SubElement(entry, "action") action.text = options["action"] # check whether the rule is interzone if list(set(options["from_zone"]).difference(options["to_zone"])): type_ = etree.SubElement(entry, "rule-type") type_.text = "interzone" elif not options["from_zone"] and not options["to_zone"]: type_ = etree.SubElement(entry, "rule-type") type_.text = "interzone" # APPLICATION app = etree.SubElement(entry, "application") if not options["application"]: member = etree.SubElement(app, "member") member.text = "any" else: for x in options["application"]: member = etree.SubElement(app, "member") member.text = x if tag_name is not None: rules_tag = etree.SubElement(entry, "tag") member = etree.SubElement(rules_tag, "member") member.text = tag_name # LOGGING if options["logging"]: if "disable" in options["logging"]: log = etree.SubElement(entry, "log-start") log.text = "no" log = etree.SubElement(entry, "log-end") log.text = "no" if "log-start" in options["logging"]: log = etree.SubElement(entry, "log-start") log.text = "yes" if "log-end" in options["logging"]: log = etree.SubElement(entry, "log-end") log.text = "yes" if no_addr_obj: address_book_groups_dict = {} address_book_keys = {} # ADDRESS addr_group = etree.SubElement(vsys_entry, "address-group") for group, address_list in address_book_groups_dict.items(): entry = etree.SubElement(addr_group, "entry", {"name": group}) static = etree.SubElement(entry, "static") for name in address_list: member = etree.SubElement(static, "member") member.text = name addr = etree.SubElement(vsys_entry, "address") for name in address_book_keys: entry = etree.SubElement(addr, "entry", {"name": name}) desc = etree.SubElement(entry, "description") desc.text = name ip = etree.SubElement(entry, "ip-netmask") ip.text = str(address_book_names_dict[name]) if add_any_ipv4: entry = etree.SubElement(addr, "entry", {"name": "any-ipv4"}) desc = etree.SubElement(entry, "description") desc.text = ("Object to match all IPv4 addresses; " "negate to match all IPv6 addresses.") range = etree.SubElement(entry, "ip-range") range.text = ANY_IPV4_RANGE vsys_entry.append(tag) self.config = config document = etree.tostring(config, encoding="UTF-8") dom = minidom.parseString(document.decode("UTF-8")) return dom.toprettyxml(indent=self.INDENT) capirca-2.0.9/capirca/lib/pcap.py000066400000000000000000000365741437377527500166340ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pcap filter generator. This generate a pcap packet filter expression that either: 1) Matches (i.e., captures), the packets that match the ACCEPT clauses specified in a given policy, or 2) Matches the packets that match opposite of that, i.e., the DENY or REJECT clauses. Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much else past the standard addres, port, and protocol conditions. Note that this is still alpha and will likely require more testing prior to having more confidence in it. Stolen liberally from packetfilter.py. """ import datetime from absl import logging from capirca.lib import aclgenerator class Error(Exception): """Base error class.""" class UnsupportedActionError(Error): """Raised when we see an unsupported action.""" class UnsupportedTargetOptionError(Error): """Raised when we see an unsupported option.""" class Term(aclgenerator.Term): """Generate pcap filter to match a policy term.""" _PLATFORM = 'pcap' _ACTION_TABLE = { 'accept': '', 'deny': '', 'reject': '', 'next': '', } _TCP_FLAGS_TABLE = { 'syn': 'tcp-syn', 'ack': 'tcp-ack', 'fin': 'tcp-fin', 'rst': 'tcp-rst', 'urg': 'tcp-urg', 'psh': 'tcp-push', 'all': '(tcp-syn|tcp-ack|tcp-fin|tcp-rst|tcp-urg|tcp-push)', 'none': '(tcp-syn&tcp-ack&tcp-fin&tcp-rst&tcp-urg&tcp-push)', } _PROTO_TABLE = { 'ah': 'proto \\ah', 'esp': 'proto \\esp', 'icmp': 'proto \\icmp', 'icmpv6': 'icmp6', 'ip': 'proto \\ip', 'ip6': 'ip6', 'igmp': 'proto \\igmp', 'igrp': 'igrp', 'pim': 'proto \\pim', 'tcp': 'proto \\tcp', 'udp': 'proto \\udp', # bpf supports "\vrrp", but some winpcap version dont' recognize it, # so use the IANA protocol number for it: 'vrrp': 'proto 112', 'hopopt': 'ip6 protochain 0', } def __init__(self, term, filter_name, af='inet', direction=''): """Setup a new term. Args: term: A policy.Term object to represent in packetfilter. filter_name: The name of the filter chan to attach the term to. af: Which address family ('inet' or 'inet6') to apply the term to. direction: Direction of the flow. Raises: aclgenerator.UnsupportedFilterError: Filter is not supported. """ super().__init__(term) self.term = term # term object self.filter = filter_name # actual name of filter self.options = [] self.default_action = 'deny' self.af = af self.direction = direction def __str__(self): """Render config output from this term object.""" # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' conditions = [] # if terms does not specify action, use filter default action if not self.term.action: self.term.action[0].value = self.default_action if str(self.term.action[0]) not in self._ACTION_TABLE: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\n', self.term.name, self.term.action[0], 'action not currently supported.')) # source address term_saddrs = self._CheckAddressAf(self.term.source_address) if not term_saddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.af)) return '' conditions.append(self._GenerateAddrStatement( term_saddrs, self.term.source_address_exclude)) # destination address term_daddrs = self._CheckAddressAf(self.term.destination_address) if not term_daddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.af)) return '' conditions.append(self._GenerateAddrStatement( term_daddrs, self.term.destination_address_exclude)) # protocol if self.term.protocol_except: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'protocol_except logic not currently supported.')) conditions.append(self._GenerateProtoStatement(self.term.protocol)) conditions.append(self._GeneratePortStatement( self.term.source_port, 'src')) conditions.append(self._GeneratePortStatement( self.term.destination_port, 'dst')) # icmp-type icmp_types = [''] if self.term.icmp_type: if self.term.protocol == ['icmp']: af = 'inet' elif self.term.protocol == ['icmpv6']: af = 'inet6' else: raise aclgenerator.UnsupportedFilterError( '%s %s %s' % ('\n', self.term.name, 'icmp protocol is not defined or not supported.')) icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, af) if 'icmp' in self.term.protocol: conditions.append(self._GenerateIcmpType(icmp_types, self.term.icmp_code)) # tcp options if 'tcp' in self.term.protocol: conditions.append(self._GenerateTcpOptions(self.term.option)) cond = Term.JoinConditionals(conditions, 'and') # Note that directionally-based pcap filter requires post-processing to # replace 'localhost' with whatever the IP(s) of the local machine happen # to be. This bit of logic ensure there's a placeholder with the # appropriate booleans around it. We also have to check that there exists # some form of condition already, else we'll end up with something overly # broad like 'dst net localhost' (e.g., 'default-deny'). if cond and self.direction == 'in': cond = Term.JoinConditionals(['dst net localhost', cond], 'and') elif cond and self.direction == 'out': cond = Term.JoinConditionals(['src net localhost', cond], 'and') return cond + '\n' def _CheckAddressAf(self, addrs): """Verify that the requested address-family matches the address's family.""" if not addrs: return ['any'] if self.af == 'mixed': return addrs af_addrs = [] af = self.NormalizeAddressFamily(self.af) for addr in addrs: if addr.version == af: af_addrs.append(addr) return af_addrs @staticmethod def JoinConditionals(condition_list, operator): """Join conditionals using the specified operator. Filters out empty elements and blank strings. Args: condition_list: a list of str()-able items to join. operator: the join string. Returns: A string consisting of the joined elements. If all elements are False or whitespace-only, the empty string. """ condition_list = [_f for _f in condition_list if _f] condition_list = [str(x).strip(' ') for x in condition_list if str(x).strip()] if not condition_list: return '' op = ' %s ' % (operator) res = '(%s)' % (op.join(condition_list)) return res def _GenerateAddrStatement(self, addrs, exclude_addrs): addrlist = [] for d in addrs: if d != 'any' and str(d) != '::/0': addrlist.append('dst net %s' % (d)) excludes = [] if exclude_addrs: for d in exclude_addrs: if d != 'any' and str(d) != '::/0': excludes.append('not dst net %s' % (d)) else: # excluding 'any' doesn't really make sense ... return '' if excludes: return Term.JoinConditionals( [Term.JoinConditionals(addrlist, 'or'), Term.JoinConditionals(excludes, 'or')], 'and not') else: return Term.JoinConditionals(addrlist, 'or') def _GenerateProtoStatement(self, protocols): return Term.JoinConditionals( [self._PROTO_TABLE[p] for p in protocols], 'or') def _GeneratePortStatement(self, ports, direction): conditions = [] # term.destination_port is a list of tuples containing the start and end # ports of the port range. In the event it is a single port, the start # and end ports are the same. for port_tuple in ports: if port_tuple[0] == port_tuple[1]: conditions.append('%s port %s' % (direction, port_tuple[0])) else: conditions.append('%s portrange %s-%s' % ( direction, port_tuple[0], port_tuple[1])) return Term.JoinConditionals(conditions, 'or') def _GenerateTcpOptions(self, options): opts = [str(x) for x in options] tcp_flags_set = [] tcp_flags_check = [] for next_opt in opts: if next_opt == 'tcp-established': tcp_flags_set.append(self._TCP_FLAGS_TABLE['ack']) tcp_flags_check.extend([self._TCP_FLAGS_TABLE['ack']]) else: # Iterate through flags table, and create list of tcp-flags to append for next_flag in self._TCP_FLAGS_TABLE: if next_opt.find(next_flag) == 0: tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag)) tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag)) if tcp_flags_check: return '(tcp[tcpflags] & (%s) == (%s))' % ('|'.join(tcp_flags_check), '|'.join(tcp_flags_set)) return '' def _GenerateIcmpType(self, icmp_types, icmp_code): rtr_str = '' if icmp_types: code_strings = [''] if icmp_code: code_strings = [' and icmp[icmpcode] == %d' % code for code in icmp_code] rtr_str = Term.JoinConditionals( ['icmp[icmptype] == %d%s' % (x, y) for y in code_strings for x in icmp_types], 'or') return rtr_str class PcapFilter(aclgenerator.ACLGenerator): """Generates filters and terms from provided policy object. Note that since pcap isn't actually a firewall grammar, this generates a filter that only matches matches that which would be accepted by the specified policy. """ _PLATFORM = 'pcap' _DEFAULT_PROTOCOL = 'all' SUFFIX = '.pcap' _TERM = Term def __init__(self, *args, **kwargs): """Initialize a PcapFilter generator. Takes standard ACLGenerator arguments, as well as an 'invert' kwarg. If this argument is true, the pcap filter will be reversed, such that it matches all those packets that would be denied by the specified policy. Args: *args: Arguments. **kwargs: Keyword arguments. """ self._invert = False if 'invert' in kwargs: self._invert = kwargs['invert'] del kwargs['invert'] super().__init__(*args, **kwargs) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'logging', 'icmp_code'} supported_tokens -= {'verbatim'} supported_sub_tokens.update( {'action': {'accept', 'deny', 'reject', 'next'}, 'option': { 'tcp-established', 'established', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'}, }) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.pcap_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) good_afs = ['inet', 'inet6', 'mixed'] good_options = ['in', 'out'] direction = '' for header, terms in pol.filters: filter_type = None if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM)[1:] filter_name = header.FilterName(self._PLATFORM) # ensure all options after the filter name are expected for opt in filter_options: if opt not in good_afs + good_options: raise UnsupportedTargetOptionError('%s %s %s %s' % ( '\nUnsupported option found in', self._PLATFORM, 'target definition:', opt)) if 'in' in filter_options: direction = 'in' elif 'out' in filter_options: direction = 'out' # Check for matching af for address_family in good_afs: if address_family in filter_options: # should not specify more than one AF in options if filter_type is not None: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\nMay only specify one of', good_afs, 'in filter options:', filter_options)) filter_type = address_family if filter_type is None: filter_type = 'mixed' # add the terms accept_terms = [] deny_terms = [] term_names = set() for term in terms: if term.name in term_names: raise aclgenerator.DuplicateTermError( 'You have a duplicate term: %s' % term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if not term: continue if term.action[0] == 'accept': accept_terms.append(self._TERM(term, filter_name, filter_type, direction)) elif term.action[0] == 'deny' or term.action[0] == 'reject': deny_terms.append(self._TERM(term, filter_name, filter_type, direction)) self.pcap_policies.append((header, filter_name, filter_type, accept_terms, deny_terms)) def __str__(self): """Render the output of the PF policy into config.""" target = [] for (unused_header, unused_filter_name, unused_filter_type, accept_terms, deny_terms) in self.pcap_policies: accept = [] for term in accept_terms: term_str = str(term) if term_str: accept.append(str(term)) accept_clause = Term.JoinConditionals(accept, 'and') deny = [] for term in deny_terms: term_str = str(term) if term_str: deny.append(str(term)) deny_clause = Term.JoinConditionals(deny, 'and') if self._invert: target.append( Term.JoinConditionals([deny_clause, accept_clause], 'and not')) else: target.append( Term.JoinConditionals([accept_clause, deny_clause], 'and not')) return '\nor\n'.join(target) + '\n' capirca-2.0.9/capirca/lib/policy.py000066400000000000000000002573211437377527500172030ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Parses the generic policy files and return a policy object for acl rendering. """ import datetime import os import sys from absl import logging from capirca.lib import nacaddr from capirca.lib import naming from ply import lex from ply import yacc DEFINITIONS = None DEFAULT_DEFINITIONS = './def' ACTIONS = set(('accept', 'count', 'deny', 'reject', 'next', 'reject-with-tcp-rst')) PROTOS_WITH_PORTS = frozenset(('tcp', 'udp', 'udplite', 'sctp')) _FLEXIBLE_MATCH_RANGE_ATTRIBUTES = {'byte-offset', 'bit-offset', 'bit-length', 'match-start', 'range', 'range-except', 'flexible-range-name'} _FLEXIBLE_MATCH_START_OPTIONS = {'layer-3', 'layer-4', 'payload'} _LOGGING = set(('true', 'True', 'syslog', 'local', 'disable', 'log-both')) _OPTIMIZE = True _SHADE_CHECK = False _MAX_TTL = 255 _MIN_TTL = 0 class Error(Exception): """Generic error class.""" class FileNotFoundError(Error): """Policy file unable to be read.""" class FileReadError(Error): """Policy file unable to be read.""" class RecursionTooDeepError(Error): """Included files exceed maximum recursion depth.""" class InvalidIncludeDirectoryError(Error): """Included files are from invalid directories.""" class ParseError(Error): """ParseError in the input.""" class TermAddressExclusionError(Error): """Excluded address block is not contained in the accepted address block.""" class TermObjectTypeError(Error): """Error with an object passed to Term.""" class TermPortProtocolError(Error): """Error when a requested protocol doesn't have any of the requested ports.""" class TermProtocolEtherTypeError(Error): """Error when both ether-type & upper-layer protocol matches are requested.""" class TermNoActionError(Error): """Error when a term hasn't defined an action.""" class TermInvalidIcmpType(Error): """Error when a term has invalid icmp-types specified.""" class InvalidTermActionError(Error): """Error when an action is invalid.""" class InvalidTermLoggingError(Error): """Error when a option is set for logging.""" class UndefinedAddressError(Error): """Error when an undefined address is referenced.""" class NoTermsError(Error): """Error when no terms were found.""" class ShadingError(Error): """Error when a term is shaded by a prior term.""" class FlexibleMatchError(Error): """Error when a term contains an invalid flexible match value.""" class ICMPCodeError(Error): """Error when ICMP Codes are used with multiple or invalid types.""" class InvalidTermTTLValue(Error): """Error when TTL value is invalid.""" class MixedPortandNonPortProtos(Error): """Error when protocols that use ports are mixed with protocols that do not""" def TranslatePorts(ports, protocols, term_name): """Return all ports of all protocols requested. Args: ports: list of ports, eg ['SMTP', 'DNS', 'HIGH_PORTS'] protocols: list of protocols, eg ['tcp', 'udp'] term_name: name of current term, used for warning messages Returns: ret_array: list of ports tuples such as [(25,25), (53,53), (1024,65535)] Note: Duplication will be taken care of in Term.CollapsePortList """ ret_array = [] for proto in protocols: for port in ports: service_by_proto = DEFINITIONS.GetServiceByProto(port, proto) if not service_by_proto: logging.warning('Term %s has service %s which is not defined with ' 'protocol %s, but will be permitted. Unless intended' ', you should consider splitting the protocols ' 'into separate terms!', term_name, port, proto) for p in [x.split('-') for x in service_by_proto]: if len(p) == 1: ret_array.append((int(p[0]), int(p[0]))) else: ret_array.append((int(p[0]), int(p[1]))) return ret_array # classes for storing the object types in the policy files. class Policy: """The policy object contains everything found in a given policy file.""" def __init__(self, header, terms): """Initiator for the Policy object. Args: header: __main__.Header object. contains comments which should be passed on to the rendered acls as well as the type of acls this policy file should render to. terms: list __main__.Term. an array of Term objects which must be rendered in each of the rendered acls. Attributes: filters: list of tuples containing (header, terms). """ self.filters = [] self.filename = '' self.AddFilter(header, terms) def AddFilter(self, header, terms): """Add another header & filter.""" self.filters.append((header, terms)) self._TranslateTerms(terms) if _SHADE_CHECK: self._DetectShading(terms) def _TranslateTerms(self, terms): """.""" if not terms: raise NoTermsError('no terms found') for term in terms: # TODO(pmoody): this probably belongs in Term.SanityCheck(), # or at the very least, in some method under class Term() if term.translated: continue if term.port: term.port = TranslatePorts(term.port, term.protocol, term.name) if not term.port: raise TermPortProtocolError( 'no ports of the correct protocol for term %s' % ( term.name)) if term.source_port: term.source_port = TranslatePorts(term.source_port, term.protocol, term.name) if not term.source_port: raise TermPortProtocolError( 'no source ports of the correct protocol for term %s' % ( term.name)) if term.destination_port: term.destination_port = TranslatePorts(term.destination_port, term.protocol, term.name) if not term.destination_port: raise TermPortProtocolError( 'no destination ports of the correct protocol for term %s' % ( term.name)) # If argument is true, we optimize, otherwise just sort addresses term.AddressCleanup(_OPTIMIZE, self._NeedsAddressBook()) term.SanityCheck() term.translated = True def _NeedsAddressBook(self): """Returns True if the policy uses a generator needing an addressbook.""" for header in self.headers: if not header: continue if 'srx' in header.platforms: return True for target in header.target: opts = header.FilterOptions(target.platform) if opts and 'object-group' in opts: return True return False @property def headers(self): """Returns the headers from each of the configured filters. Returns: headers """ return [x[0] for x in self.filters] def _DetectShading(self, terms): """Finds terms which are shaded (impossible to reach). Iterate through each term, looking at each prior term. If a prior term contains every component of the current term then the current term would never be hit and is thus shaded. This can be a mistake. Args: terms: list of Term objects. Raises: ShadingError: When a term is impossible to reach. """ shading_errors = [] for index, term in enumerate(terms): for prior_index in range(index): # Check each term that came before for shading. Terms with next as an # action do not terminate evaluation, so cannot shade. if (term in terms[prior_index] and 'next' not in terms[prior_index].action): shading_errors.append( ' %s is shaded by %s.' % ( term.name, terms[prior_index].name)) if shading_errors: raise ShadingError('\n'.join(shading_errors)) def __eq__(self, obj): """Compares for equality against another Policy object. Note that it is picky and requires the list contents to be in the same order. Args: obj: object to be compared to for equality. Returns: True if the list of filters in this policy object is equal to the list in obj and False otherwise. """ if not isinstance(obj, Policy): return False return self.filters == obj.filters def __str__(self): def tuple_str(tup): return '%s:%s' % (tup[0], tup[1]) return 'Policy: {%s}' % ', '.join(map(tuple_str, self.filters)) def __repr__(self): return self.__str__() class Term: """The Term object is used to store each of the terms. Args: obj: an object of type VarType or a list of objects of type VarType members: address/source_address/destination_address/: list of VarType.(S|D)?ADDRESS's address_exclude/source_address_exclude/destination_address_exclude: list of VarType.(S|D)?ADDEXCLUDE's restrict-address-family: VarType.RESTRICT_ADDRESS_FAMILY port/source_port/destination_port: list of VarType.(S|D)?PORT's options: list of VarType.OPTION's. protocol: list of VarType.PROTOCOL's. counter: VarType.COUNTER traffic-class-count: VarType.TRAFFIC_CLASS_COUNT action: list of VarType.ACTION's dscp-set: VarType.DSCP_SET dscp-match: VarType.DSCP_MATCH dscp-except: VarType.DSCP_EXCEPT comments: VarType.COMMENT encapsulate: VarType.ENCAPSULATE decapsulate: VarType.DECAPSULATE filter-term: VarType.FILTER_TERM flexible-match-range: VarType.FLEXIBLE_MATCH_RANGE forwarding-class: VarType.FORWARDING_CLASS forwarding-class-except: VarType.FORWARDING_CLASS_EXCEPT expiration: VarType.EXPIRATION verbatim: VarType.VERBATIM logging: VarType.LOGGING log_name: VarType.LOG_NAME next-ip: VarType.NEXT_IP port-mirror: VarType.PORT_MIRROR qos: VarType.QOS pan-application: VarType.PAN_APPLICATION policer: VarType.POLICER priority: VarType.PRIORITY destination-zone: VarType.DZONE source-service-accounts: VarType.SOURCE_SERVICE_ACCOUNTS target-service-accounts: VarType.TARGET_SERVICE_ACCOUNTS source-zone: VarType.SZONE vpn: VarType.VPN """ ICMP_TYPE = {4: {'echo-reply': 0, 'unreachable': 3, 'source-quench': 4, 'redirect': 5, 'alternate-address': 6, 'echo-request': 8, 'router-advertisement': 9, 'router-solicitation': 10, 'time-exceeded': 11, 'parameter-problem': 12, 'timestamp-request': 13, 'timestamp-reply': 14, 'information-request': 15, 'information-reply': 16, 'mask-request': 17, 'mask-reply': 18, 'conversion-error': 31, 'mobile-redirect': 32, }, 6: {'destination-unreachable': 1, 'packet-too-big': 2, 'time-exceeded': 3, 'parameter-problem': 4, 'echo-request': 128, 'echo-reply': 129, 'multicast-listener-query': 130, 'multicast-listener-report': 131, 'multicast-listener-done': 132, 'router-solicit': 133, 'router-advertisement': 134, 'neighbor-solicit': 135, 'neighbor-advertisement': 136, 'redirect-message': 137, 'router-renumbering': 138, 'icmp-node-information-query': 139, 'icmp-node-information-response': 140, 'inverse-neighbor-discovery-solicitation': 141, 'inverse-neighbor-discovery-advertisement': 142, 'version-2-multicast-listener-report': 143, 'home-agent-address-discovery-request': 144, 'home-agent-address-discovery-reply': 145, 'mobile-prefix-solicitation': 146, 'mobile-prefix-advertisement': 147, 'certification-path-solicitation': 148, 'certification-path-advertisement': 149, 'multicast-router-advertisement': 151, 'multicast-router-solicitation': 152, 'multicast-router-termination': 153, }, } ICMP_CODE = {'unreachable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 'redirect': [0, 1, 2, 3], 'router-advertisement': [0, 16], 'time-exceeded': [0, 1], 'parameter-problem': [0, 1, 2], 'destination-unreachable': [0, 1, 2, 3, 4, 5, 6, 7], 'parameter-problem': [0, 1, 2, 3], 'router-renumbering': [0, 1, 255], 'icmp-node-information-query': [0, 1, 2], 'icmp-node-information-response': [0, 1, 2], } _IPV4_BYTE_SIZE = 1 _IPV6_BYTE_SIZE = 4 def __init__(self, obj): self.name = None self.action = [] self.address = [] self.address_exclude = [] self.restrict_address_family = None self.comment = [] self.counter = None self.expiration = None self.destination_address = [] self.destination_address_exclude = [] self.destination_port = [] self.destination_prefix = [] self.filter_term = None self.forwarding_class = [] self.forwarding_class_except = [] self.logging = [] self.log_limit = None self.log_name = None self.loss_priority = None self.option = [] self.owner = None self.policer = None self.port = [] self.precedence = [] self.protocol = [] self.protocol_except = [] self.qos = None self.pan_application = [] self.routing_instance = None self.source_address = [] self.source_address_exclude = [] self.source_port = [] self.source_prefix = [] self.ttl = None self.verbatim = [] # juniper specific. self.packet_length = None self.fragment_offset = None self.hop_limit = None self.icmp_type = [] self.icmp_code = [] self.ether_type = [] self.traffic_class_count = None self.traffic_type = [] self.translated = False self.dscp_set = None self.dscp_match = [] self.dscp_except = [] self.next_ip = None self.flexible_match_range = [] self.source_prefix_except = [] self.destination_prefix_except = [] self.inactive = False self.encapsulate = None self.decapsulate = None self.port_mirror = None # srx specific self.destination_zone = [] self.source_zone = [] self.vpn = None # gce specific self.source_tag = [] self.destination_tag = [] self.priority = None self.source_service_accounts = [] self.target_service_accounts = [] # iptables specific self.source_interface = None self.destination_interface = None self.platform = [] self.platform_exclude = [] self.target_resources = [] self.timeout = None self.flattened = False self.flattened_addr = None self.flattened_saddr = None self.flattened_daddr = None self.stateless_reply = False # AddObject touches variables which might not have been initialized # further up so this has to be at the end. self.AddObject(obj) def __contains__(self, other): """Determine if other term is contained in this term.""" if self.verbatim or other.verbatim: # short circuit these if sorted(list(self.verbatim)) != sorted(other.verbatim): return False # check protocols # either protocol or protocol-except may be used, not both at the same time. if self.protocol: if other.protocol: if not self.CheckProtocolIsContained(other.protocol, self.protocol): return False # this term has protocol, other has protocol_except. elif other.protocol_except: return False else: # other does not have protocol or protocol_except. since we do other # cannot be contained in self. return False elif self.protocol_except: if other.protocol_except: if not self.CheckProtocolIsContained( self.protocol_except, other.protocol_except): return False elif other.protocol: for proto in other.protocol: if proto in self.protocol_except: return False else: return False # combine addresses with exclusions for proper contains comparisons. if not self.flattened: self.FlattenAll() if not other.flattened: other.FlattenAll() # flat 'address' is compared against other flat (saddr|daddr). # if NONE of these evaluate to True other is not contained. if not ( self.CheckAddressIsContained( self.flattened_addr, other.flattened_addr) or self.CheckAddressIsContained( self.flattened_addr, other.flattened_saddr) or self.CheckAddressIsContained( self.flattened_addr, other.flattened_daddr)): return False # compare flat address from other to flattened self (saddr|daddr). if not ( # other's flat address needs both self saddr & daddr to contain in order # for the term to be contained. We already compared the flattened_addr # attributes of both above, which was not contained. self.CheckAddressIsContained( other.flattened_addr, self.flattened_saddr) and self.CheckAddressIsContained( other.flattened_addr, self.flattened_daddr)): return False # basic saddr/daddr check. if not ( self.CheckAddressIsContained( self.flattened_saddr, other.flattened_saddr)): return False if not ( self.CheckAddressIsContained( self.flattened_daddr, other.flattened_daddr)): return False # check ports # like the address directive, the port directive is special in that it can # be either source or destination. if self.port: if not (self.CheckPortIsContained(self.port, other.port) or self.CheckPortIsContained(self.port, other.source_port) or self.CheckPortIsContained(self.port, other.destination_port)): return False if not self.CheckPortIsContained(self.source_port, other.source_port): return False if not self.CheckPortIsContained(self.destination_port, other.destination_port): return False # prefix lists if self.source_prefix: if sorted(self.source_prefix) != sorted(other.source_prefix): return False if self.source_prefix_except: if sorted(self.source_prefix_except) != sorted( other.source_prefix_except): return False if self.destination_prefix: if sorted(self.destination_prefix) != sorted( other.destination_prefix): return False if self.destination_prefix_except: if sorted(self.destination_prefix_except) != sorted( other.destination_prefix_except): return False # check source and destination tags if self.source_tag: if sorted(self.source_tag) != sorted(other.source_tag): return False if sorted(self.destination_tag) != sorted(other.destination_tag): return False # check precedence if self.precedence: if not other.precedence: return False for precedence in other.precedence: if precedence not in self.precedence: return False elif other.precedence: return False # check various options if self.option: if not other.option: return False for opt in other.option: if opt not in self.option: return False elif other.option: return False # check forwarding-class if self.forwarding_class: if not other.forwarding_class: return False for fc in other.forwarding_class: if fc not in self.forwarding_class: return False # check forwarding-class-except if self.forwarding_class_except: if not other.forwarding_class_except: return False for fc in other.forwarding_class_except: if fc not in self.forwarding_class_except: return False if self.next_ip: if not other.next_ip: return False if self.encapsulate: if not other.encapsulate: return False if self.decapsulate: if not other.decapsulate: return False if self.fragment_offset: # fragment_offset looks like 'integer-integer' or just, 'integer' sfo = sorted([int(x) for x in self.fragment_offset.split('-')]) if other.fragment_offset: ofo = sorted([int(x) for x in other.fragment_offset.split('-')]) if ofo[0] < sfo[0] or sfo[1:] < ofo[1:]: return False else: return False if self.hop_limit: # hop_limit looks like 'integer-integer' or just, 'integer' shl = [int(x) for x in self.hop_limit.split('-')] if other.hop_limit: ohl = [int(x) for x in other.hop_limit.split('-')] if shl[0] < ohl[0]: return False shll, ohll = shl[1:2], ohl[1:2] if shll and ohll: if shl[0] > ohl[0]: return False else: return False if self.packet_length: # packet_length looks like 'integer-integer' or just, 'integer' spl = [int(x) for x in self.packet_length.split('-')] if other.packet_length: opl = [int(x) for x in other.packet_length.split('-')] if spl[0] < opl[0] or sorted(spl[1:]) > sorted(opl[1:]): return False else: return False if self.port_mirror: if not other.port_mirror: return False if self.icmp_type: if sorted(self.icmp_type) is not sorted(other.icmp_type): return False if self.icmp_code: if sorted(self.icmp_code) is not sorted(other.icmp_code): return False # check platform if self.platform: if sorted(self.platform) is not sorted(other.platform): return False if self.platform_exclude: if sorted(self.platform_exclude) is not sorted(other.platform_exclude): return False if self.source_zone: if sorted(self.source_zone) is not sorted(other.source_zone): return False if self.destination_zone: if sorted(self.destination_zone) is not sorted(other.destination_zone): return False # we have containment return True def __str__(self): ret_str = [] ret_str.append(' name: %s' % self.name) if self.address: ret_str.append(' address: %s' % sorted(self.address)) if self.address_exclude: ret_str.append(' address_exclude: %s' % sorted(self.address_exclude)) if self.source_address: ret_str.append(' source_address: %s' % self._SortAddressesByFamily('source_address')) if self.source_address_exclude: ret_str.append(' source_address_exclude: %s' % self._SortAddressesByFamily('source_address_exclude')) if self.source_tag: ret_str.append(' source_tag: %s' % self.source_tag) if self.destination_address: ret_str.append(' destination_address: %s' % self._SortAddressesByFamily('destination_address')) if self.destination_address_exclude: ret_str.append(' destination_address_exclude: %s' % self._SortAddressesByFamily('destination_address_exclude')) if self.destination_tag: ret_str.append(' destination_tag: %s' % self.destination_tag) if self.target_resources: ret_str.append(' target_resources: %s' % self.target_resources) if self.source_service_accounts: ret_str.append(' source_service_accounts: %s' % self.source_service_accounts) if self.target_service_accounts: ret_str.append(' target_service_accounts: %s' % self.target_service_accounts) if self.source_prefix: ret_str.append(' source_prefix: %s' % self.source_prefix) if self.source_prefix_except: ret_str.append(' source_prefix_except: %s' % self.source_prefix_except) if self.destination_prefix: ret_str.append(' destination_prefix: %s' % self.destination_prefix) if self.destination_prefix_except: ret_str.append(' destination_prefix_except: %s' % self.destination_prefix_except) if self.filter_term: ret_str.append(' filter_term: %s' % self.filter_term) if self.forwarding_class: ret_str.append(' forwarding_class: %s' % self.forwarding_class) if self.forwarding_class_except: ret_str.append(' forwarding_class_except: %s' % self.forwarding_class_except) if self.icmp_type: ret_str.append(' icmp_type: %s' % sorted(self.icmp_type)) if self.icmp_code: ret_str.append(' icmp_code: %s' % sorted(self.icmp_code)) if self.next_ip: ret_str.append(' next_ip: %s' % self.next_ip) if self.encapsulate: ret_str.append(' encapsulate: %s' % self.encapsulate) if self.decapsulate: ret_str.append(' decapsulate: %s' % self.decapsulate) if self.protocol: ret_str.append(' protocol: %s' % sorted(self.protocol)) if self.protocol_except: ret_str.append(' protocol-except: %s' % self.protocol_except) if self.owner: ret_str.append(' owner: %s' % self.owner) if self.port: ret_str.append(' port: %s' % sorted(self.port)) if self.port_mirror: ret_str.append(' port_mirror: %s' % self.port_mirror) if self.source_port: ret_str.append(' source_port: %s' % sorted(self.source_port)) if self.destination_port: ret_str.append(' destination_port: %s' % sorted(self.destination_port)) if self.action: ret_str.append(' action: %s' % self.action) if self.option: ret_str.append(' option: %s' % self.option) if self.flexible_match_range: ret_str.append(' flexible_match_range: %s' % self.flexible_match_range) if self.qos: ret_str.append(' qos: %s' % self.qos) if self.pan_application: ret_str.append(' pan_application: %s' % self.pan_application) if self.logging: ret_str.append(' logging: %s' % self.logging) if self.log_limit: ret_str.append(' log_limit: %s/%s' % (self.log_limit[0], self.log_limit[1])) if self.log_name: ret_str.append(' log_name: %s' % self.log_name) if self.priority: ret_str.append(' priority: %s' % self.priority) if self.counter: ret_str.append(' counter: %s' % self.counter) if self.traffic_class_count: ret_str.append(' traffic_class_count: %s' % self.traffic_class_count) if self.source_interface: ret_str.append(' source_interface: %s' % self.source_interface) if self.destination_interface: ret_str.append(' destination_interface: %s' % self.destination_interface) if self.expiration: ret_str.append(' expiration: %s' % self.expiration) if self.platform: ret_str.append(' platform: %s' % self.platform) if self.platform_exclude: ret_str.append(' platform_exclude: %s' % self.platform_exclude) if self.ttl: ret_str.append(' ttl: %s' % self.ttl) if self.timeout: ret_str.append(' timeout: %s' % self.timeout) if self.vpn: vpn_name, pair_policy = self.vpn if pair_policy: ret_str.append(' vpn: name = %s, pair_policy = %s' % (vpn_name, pair_policy)) else: ret_str.append(' vpn: name = %s' % vpn_name) if self.source_zone: ret_str.append(' source_zone: %s' % sorted(self.source_zone)) if self.destination_zone: ret_str.append(' destination_zone: %s' % sorted(self.destination_zone)) return '\n'.join(ret_str) def __repr__(self): return self.__str__() def __eq__(self, other): # action if sorted(self.action) != sorted(other.action): return False # addresses. if not (sorted(self.address) == sorted(other.address) and sorted(self.source_address) == sorted(other.source_address) and sorted(self.source_address_exclude) == sorted(other.source_address_exclude) and sorted(self.destination_address) == sorted(other.destination_address) and sorted(self.destination_address_exclude) == sorted(other.destination_address_exclude)): return False # prefix lists if not (sorted(self.source_prefix) == sorted(other.source_prefix) and sorted(self.source_prefix_except) == sorted(other.source_prefix_except) and sorted(self.destination_prefix) == sorted(other.destination_prefix) and sorted(self.destination_prefix_except) == sorted(other.destination_prefix_except)): return False # ports if not (sorted(self.port) == sorted(other.port) and sorted(self.source_port) == sorted(other.source_port) and sorted(self.destination_port) == sorted(other.destination_port)): return False # protocol if not (sorted(self.protocol) == sorted(other.protocol) and sorted(self.protocol_except) == sorted(other.protocol_except)): return False # option if sorted(self.option) != sorted(other.option): return False # qos if self.qos != other.qos: return False # pan-application if sorted(self.pan_application) != sorted(other.pan_application): return False # verbatim if self.verbatim != other.verbatim: return False # policer if self.policer != other.policer: return False # interface if self.source_interface != other.source_interface: return False if self.destination_interface != other.destination_interface: return False # tags if not (sorted(self.source_tag) == sorted(other.source_tag) and sorted(self.destination_tag) == sorted(other.destination_tag)): return False if self.ttl != other.ttl: return False if sorted(self.logging) != sorted(other.logging): return False if self.log_limit != other.log_limit: return False if self.qos != other.qos: return False if sorted(self.pan_application) != sorted(other.pan_application): return False if self.packet_length != other.packet_length: return False if self.fragment_offset != other.fragment_offset: return False if self.hop_limit != other.hop_limit: return False if sorted(self.icmp_type) != sorted(other.icmp_type): return False if sorted(self.icmp_code) != sorted(other.icmp_code): return False if sorted(self.ether_type) != sorted(other.ether_type): return False if sorted(self.traffic_type) != sorted(other.traffic_type): return False # vpn if self.vpn != other.vpn: return False # platform if not (sorted(self.platform) == sorted(other.platform) and sorted(self.platform_exclude) == sorted(other.platform_exclude)): return False # timeout if self.timeout != other.timeout: return False # precedence if self.precedence != other.precedence: return False # filter if self.filter_term != other.filter_term: return False # forwarding-class if sorted(self.forwarding_class) != sorted(other.forwarding_class): return False # forwarding-class-except if sorted(self.forwarding_class_except) != sorted( other.forwarding_class_except): return False # next_ip if self.next_ip != other.next_ip: return False # encapsulate if self.encapsulate != other.encapsulate: return False # decapsulate if self.decapsulate != other.decapsulate: return False # flexible_match if self.flexible_match_range != other.flexible_match_range: return False # port_mirror if self.port_mirror != other.port_mirror: return False # source_zone if sorted(self.source_zone) != sorted(other.source_zone): return False # destination_zone if sorted(self.destination_zone) != sorted(other.destination_zone): return False return True def __ne__(self, other): return not self.__eq__(other) def _SortAddressesByFamily(self, addr_type): """Provide the term address field to sort. Method will sort v4 and then concatenate sorted v6 addresses. This will support Term.__str__ function which outputs a string of sorted IP addresses. Args: addr_type: string, this will be either 'source_address', 'source_address_exclude', 'destination_address' or 'destination_address_exclude' Returns: List of IP addresses sourted v4 then v6 """ # Sort v4 and v6 sort_v4 = sorted(self.GetAddressOfVersion(addr_type, 4)) sort_v6 = sorted(self.GetAddressOfVersion(addr_type, 6)) # Concatenate return sort_v4 + sort_v6 def AddressesByteLength(self, address_family=(4, 6)): """Returns the byte length of all IP addresses in the term. This is used in the srx generator due to a address size limitation. Args: address_family: Address families to include for determining byte length. Returns: counter: Byte length of the sum of both source and destination IPs. """ counter = 0 for i in self.source_address: if i.version == 6 and i.version in address_family: counter += self._IPV6_BYTE_SIZE elif i.version == 4 and i.version in address_family: counter += self._IPV4_BYTE_SIZE for i in self.destination_address: if i.version == 6 and i.version in address_family: counter += self._IPV6_BYTE_SIZE elif i.version == 4 and i.version in address_family: counter += self._IPV4_BYTE_SIZE return counter def FlattenAll(self, mutate=True): """Reduce source, dest, and address fields to their post-exclude state. Populates the self.flattened_addr, self.flattened_saddr, self.flattened_daddr by removing excludes from includes. Args: mutate: Boolean value indicating if this method should mutate the original address (address, destination_address, source_address) """ # No excludes, set flattened attributes and move along. self.flattened = True if not (self.source_address_exclude or self.destination_address_exclude or self.address_exclude): self.flattened_saddr = self.source_address self.flattened_daddr = self.destination_address self.flattened_addr = self.address return if self.source_address_exclude: self.flattened_saddr = nacaddr.AddressListExclude( self.source_address, self.source_address_exclude, collapse_addrs=False) if mutate: self.source_address = self.flattened_saddr if self.destination_address_exclude: self.flattened_daddr = nacaddr.AddressListExclude( self.destination_address, self.destination_address_exclude, collapse_addrs=False) if mutate: self.destination_address = self.flattened_daddr if self.address_exclude: self.flattened_addr = nacaddr.AddressListExclude( self.address, self.address_exclude, collapse_addrs=False) if mutate: self.address = self.flattened_addr def GetAddressOfVersion(self, addr_type, af=None): """Returns addresses of the appropriate Address Family. Args: addr_type: string, this will be either 'source_address', 'source_address_exclude', 'destination_address' or 'destination_address_exclude' af: int or None, either 4 for IPv4 or 6 for IPv6 Returns: list of addresses of the correct family. """ if not af: return getattr(self, addr_type) return [x for x in getattr(self, addr_type) if x.version == af] def AddObject(self, obj): """Add an object of unknown type to this term. Args: obj: single or list of either [Address, Port, Option, Protocol, Counter, Action, Comment, Expiration] Raises: InvalidTermActionError: if the action defined isn't an accepted action. eg, action:: godofoobar TermObjectTypeError: if AddObject is called with an object it doesn't understand. InvalidTermLoggingError: when a option is set for logging not known. """ if type(obj) is list: for x in obj: # do we have a list of addresses? # expanded address fields consolidate naked address fields with # saddr/daddr. if x.var_type is VarType.SADDRESS: saddr = DEFINITIONS.GetNetAddr(x.value) self.source_address.extend(saddr) elif x.var_type is VarType.DADDRESS: daddr = DEFINITIONS.GetNetAddr(x.value) self.destination_address.extend(daddr) elif x.var_type is VarType.ADDRESS: addr = DEFINITIONS.GetNetAddr(x.value) self.address.extend(addr) # do we have address excludes? elif x.var_type is VarType.SADDREXCLUDE: saddr_exclude = DEFINITIONS.GetNetAddr(x.value) self.source_address_exclude.extend(saddr_exclude) elif x.var_type is VarType.DADDREXCLUDE: daddr_exclude = DEFINITIONS.GetNetAddr(x.value) self.destination_address_exclude.extend(daddr_exclude) elif x.var_type is VarType.ADDREXCLUDE: addr_exclude = DEFINITIONS.GetNetAddr(x.value) self.address_exclude.extend(addr_exclude) # do we have a list of ports? elif x.var_type is VarType.PORT: self.port.append(x.value) elif x.var_type is VarType.SPORT: self.source_port.append(x.value) elif x.var_type is VarType.DPORT: self.destination_port.append(x.value) # do we have a list of protocols? elif x.var_type is VarType.PROTOCOL: self.protocol.append(x.value) # do we have a list of protocol-exceptions? elif x.var_type is VarType.PROTOCOL_EXCEPT: self.protocol_except.append(x.value) # do we have a list of options? elif x.var_type is VarType.OPTION: self.option.append(x.value) elif x.var_type is VarType.SPFX: self.source_prefix.append(x.value) elif x.var_type is VarType.ESPFX: self.source_prefix_except.append(x.value) elif x.var_type is VarType.DPFX: self.destination_prefix.append(x.value) elif x.var_type is VarType.EDPFX: self.destination_prefix_except.append(x.value) elif x.var_type is VarType.ETHER_TYPE: self.ether_type.append(x.value) elif x.var_type is VarType.TRAFFIC_TYPE: self.traffic_type.append(x.value) elif x.var_type is VarType.PRECEDENCE: self.precedence.append(x.value) elif x.var_type is VarType.FORWARDING_CLASS: self.forwarding_class.append(x.value) elif x.var_type is VarType.FORWARDING_CLASS_EXCEPT: self.forwarding_class_except.append(x.value) elif x.var_type is VarType.PAN_APPLICATION: self.pan_application.append(x.value) elif x.var_type is VarType.NEXT_IP: self.next_ip = DEFINITIONS.GetNetAddr(x.value) elif x.var_type is VarType.PLATFORM: self.platform.append(x.value) elif x.var_type is VarType.PLATFORMEXCLUDE: self.platform_exclude.append(x.value) elif x.var_type is VarType.DSCP_MATCH: self.dscp_match.append(x.value) elif x.var_type is VarType.DSCP_EXCEPT: self.dscp_except.append(x.value) elif x.var_type is VarType.STAG: self.source_tag.append(x.value) elif x.var_type is VarType.DTAG: self.destination_tag.append(x.value) elif x.var_type is VarType.FLEXIBLE_MATCH_RANGE: self.flexible_match_range.append(x.value) elif x.var_type is VarType.TARGET_RESOURCES: self.target_resources.append(x.value) elif x.var_type is VarType.SOURCE_SERVICE_ACCOUNTS: self.source_service_accounts.append(x.value) elif x.var_type is VarType.TARGET_SERVICE_ACCOUNTS: self.target_service_accounts.append(x.value) elif x.var_type is VarType.SZONE: self.source_zone.append(x.value) elif x.var_type is VarType.DZONE: self.destination_zone.append(x.value) else: raise TermObjectTypeError( '%s isn\'t a type I know how to deal with (contains \'%s\')' % ( type(x), x.value)) else: # stupid no switch statement in python if obj.var_type is VarType.RESTRICT_ADDRESS_FAMILY: self.restrict_address_family = obj.value elif obj.var_type is VarType.COMMENT: self.comment.append(str(obj)) elif obj.var_type is VarType.OWNER: self.owner = obj.value elif obj.var_type is VarType.EXPIRATION: self.expiration = obj.value elif obj.var_type is VarType.LOSS_PRIORITY: self.loss_priority = obj.value elif obj.var_type is VarType.ROUTING_INSTANCE: self.routing_instance = obj.value elif obj.var_type is VarType.PRECEDENCE: self.precedence = obj.value elif obj.var_type is VarType.FORWARDING_CLASS: self.forwarding_class.append(obj.value) elif obj.var_type is VarType.FORWARDING_CLASS_EXCEPT: self.forwarding_class_except.append(obj.value) elif obj.var_type is VarType.PAN_APPLICATION: self.pan_application.append(obj.value) elif obj.var_type is VarType.NEXT_IP: self.next_ip = DEFINITIONS.GetNetAddr(obj.value) elif obj.var_type is VarType.VERBATIM: self.verbatim.append(obj.value) elif obj.var_type is VarType.ACTION: if str(obj) not in ACTIONS: raise InvalidTermActionError('%s is not a valid action' % obj) self.action.append(obj.value) elif obj.var_type is VarType.COUNTER: self.counter = obj elif obj.var_type is VarType.ENCAPSULATE: self.encapsulate = obj.value elif obj.var_type is VarType.DECAPSULATE: self.decapsulate = obj.value elif obj.var_type is VarType.PORT_MIRROR: self.port_mirror = obj.value elif obj.var_type is VarType.TRAFFIC_CLASS_COUNT: self.traffic_class_count = obj elif obj.var_type is VarType.ICMP_TYPE: self.icmp_type.extend(obj.value) elif obj.var_type is VarType.ICMP_CODE: self.icmp_code.extend(obj.value) elif obj.var_type is VarType.LOGGING: if str(obj) not in _LOGGING: raise InvalidTermLoggingError('%s is not a valid logging option' % obj) self.logging.append(obj) elif obj.var_type is VarType.LOG_LIMIT: self.log_limit = obj.value elif obj.var_type is VarType.LOG_NAME: self.log_name = obj.value # police man, tryin'a take you jail elif obj.var_type is VarType.POLICER: self.policer = obj.value elif obj.var_type is VarType.PRIORITY: self.priority = obj.value # qos? elif obj.var_type is VarType.QOS: self.qos = obj.value elif obj.var_type is VarType.PACKET_LEN: self.packet_length = obj.value elif obj.var_type is VarType.FRAGMENT_OFFSET: self.fragment_offset = obj.value elif obj.var_type is VarType.HOP_LIMIT: self.hop_limit = obj.value elif obj.var_type is VarType.SINTERFACE: self.source_interface = obj.value elif obj.var_type is VarType.DINTERFACE: self.destination_interface = obj.value elif obj.var_type is VarType.TIMEOUT: self.timeout = obj.value elif obj.var_type is VarType.DSCP_SET: self.dscp_set = obj.value elif obj.var_type is VarType.VPN: self.vpn = (obj.value[0], obj.value[1]) elif obj.var_type is VarType.TTL: self.ttl = int(obj.value) elif obj.var_type is VarType.TARGET_RESOURCES: self.target_resources.append(obj.value) elif obj.var_type is VarType.SOURCE_SERVICE_ACCOUNTS: self.source_service_accounts.append(obj.value) elif obj.var_type is VarType.TARGET_SERVICE_ACCOUNTS: self.target_service_accounts.append(obj.value) elif obj.var_type is VarType.FILTER_TERM: self.filter_term = obj.value else: raise TermObjectTypeError( '%s isn\'t a type I know how to deal with' % (type(obj))) def SanityCheck(self): """Sanity check the definition of the term. Raises: ParseError: if term has both verbatim and non-verbatim tokens TermInvalidIcmpType: if term has invalid icmp-types specified TermNoActionError: if the term doesn't have an action defined. TermPortProtocolError: if the term has a service/protocol definition pair which don't match up, eg. SNMP and tcp TermAddressExclusionError: if one of the *-exclude directives is defined, but that address isn't contained in the non *-exclude directive. eg: source-address::CORP_INTERNAL source-exclude:: LOCALHOST TermProtocolEtherTypeError: if the term has both ether-type and upper-layer protocol restrictions InvalidTermActionError: action and routing-instance both defined InvalidTermTTLValue: TTL value is invalid. MixedPortandNonPortProtos: Ports specified with protocol that doesn't support ports. This should be called when the term is fully formed, and all of the options are set. """ if self.verbatim: if (self.action or self.source_port or self.destination_port or self.port or self.protocol or self.option): raise ParseError( 'term "%s" has both verbatim and non-verbatim tokens.' % self.name) else: if (not self.action and not self.routing_instance and not self.next_ip and not self.encapsulate and not self.decapsulate and not self.filter_term and not self.port_mirror): raise TermNoActionError('no action specified for term %s' % self.name) if self.filter_term and self.action: raise InvalidTermActionError('term "%s" has both filter and action tokens.' % self.name) # have we specified a port with a protocol that doesn't support ports? protos_no_ports = {p for p in self.protocol if p not in PROTOS_WITH_PORTS} if protos_no_ports != set() and (self.source_port or self.destination_port or self.port): if set(self.protocol) - protos_no_ports != set(): # This is a more specific error - some protocols support, but not all raise MixedPortandNonPortProtos( 'Term %s contains mixed uses of protocols with and without port ' 'numbers\nProtocols: %s' % (self.name, self.protocol)) else: raise TermPortProtocolError( 'ports specified with protocol(s) that don\'t support ports. ' 'Term: %s Protocols: %s ' % (self.name, protos_no_ports)) # TODO(pmoody): do we have mutually exclusive options? # eg. tcp-established + tcp-initial? if self.ether_type and ( self.protocol or self.address or self.destination_address or self.destination_address_exclude or self.destination_port or self.destination_prefix or self.destination_prefix_except or self.source_address or self.source_address_exclude or self.source_port or self.source_prefix or self.source_prefix_except): raise TermProtocolEtherTypeError( 'ether-type not supported when used with upper-layer protocol ' 'restrictions. Term: %s' % self.name) # validate icmp-types if specified, but addr_family will have to be checked # in the generators as policy module doesn't know about that at this point. if self.icmp_code: if len(self.icmp_type) != 1: raise ICMPCodeError('ICMP Code used with invalid number of types.' 'Use only one ICMP type.\n Term: %s' % self.name) type_name = self.icmp_type[0] bad_codes = [] for code in self.icmp_code: if code not in self.ICMP_CODE[type_name]: bad_codes.append(code) if bad_codes: raise ICMPCodeError('ICMP Codes %s are invalid for ICMP Type %s.' '\nTerm: %s' % (bad_codes, type_name, self.name)) if self.icmp_type: for icmptype in self.icmp_type: if (icmptype not in self.ICMP_TYPE[4] and icmptype not in self.ICMP_TYPE[6]): raise TermInvalidIcmpType('Term %s contains an invalid icmp-type:' '%s' % (self.name, icmptype)) if self.ttl: if not _MIN_TTL <= self.ttl <= _MAX_TTL: raise InvalidTermTTLValue('Term %s contains invalid TTL: %s' % (self.name, self.ttl)) def AddressCleanup(self, optimize=True, addressbook=False): """Do Address and Port collapsing. Notes: Collapses both the address definitions and the port definitions to their smallest possible length. Args: optimize: boolean value indicating whether to optimize addresses addressbook: Boolean indicating if addressbook is used. """ def cleanup(addresses, complement_addresses): if not optimize: return nacaddr.SortAddrList(addresses) if addressbook: return nacaddr.CollapseAddrListPreserveTokens(addresses) else: return nacaddr.CollapseAddrList(addresses, complement_addresses) # address collapsing. if self.address: self.address = cleanup(self.address, None) if self.source_address: self.source_address = cleanup(self.source_address, self.source_address_exclude) if self.source_address_exclude: self.source_address_exclude = cleanup(self.source_address_exclude, self.source_address) if self.destination_address: self.destination_address = cleanup(self.destination_address, self.destination_address_exclude) if self.destination_address_exclude: self.destination_address_exclude = cleanup( self.destination_address_exclude, self.destination_address) # port collapsing. if self.port: self.port = self.CollapsePortList(self.port) if self.source_port: self.source_port = self.CollapsePortList(self.source_port) if self.destination_port: self.destination_port = self.CollapsePortList(self.destination_port) def CollapsePortList(self, ports): """Given a list of ports, Collapse to the smallest required. Args: ports: a list of port strings eg: [(80,80), (53,53) (2000, 2009), (1024,65535)] Returns: ret_array: the collapsed sorted list of ports, eg: [(53,53), (80,80), (1024,65535)] """ ret_ports = [] for port in sorted(ports): if not ret_ports: ret_ports.append(port) elif ret_ports[-1][1] >= port[1]: # (10, 20) and (12, 13) -> (10, 20) pass elif port[0] < ret_ports[-1][1] < port[1]: # (10, 20) and (15, 30) -> (10, 30) ret_ports[-1] = (ret_ports[-1][0], port[1]) elif ret_ports[-1][1] + 1 == port[0]: # (10, 20) and (21, 30) -> (10, 30) ret_ports[-1] = (ret_ports[-1][0], port[1]) else: # (10, 20) and (22, 30) -> (10, 20), (22, 30) ret_ports.append(port) return ret_ports def CheckProtocolIsContained(self, superset, subset): """Check if the given list of protocols is wholly contained. Args: superset: list of protocols subset: list of protocols Returns: bool: True if subset is contained in superset. false otherwise. """ if not superset: return True if not subset: return False # Convert these lists to sets to use set comparison. sup = set(superset) sub = set(subset) return sub.issubset(sup) def CheckPortIsContained(self, superset, subset): """Check if the given list of ports is wholly contained. Args: superset: list of port tuples subset: list of port tuples Returns: bool: True if subset is contained in superset, false otherwise """ if not superset: return True if not subset: return False for sub_port in subset: not_contains = True for sup_port in superset: if (int(sub_port[0]) >= int(sup_port[0]) and int(sub_port[1]) <= int(sup_port[1])): not_contains = False break if not_contains: return False return True def CheckAddressIsContained(self, superset, subset): """Check if subset is wholey contained by superset. Args: superset: list of the superset addresses subset: list of the subset addresses Returns: True or False. """ if not superset: return True if not subset: return False for sub_addr in subset: sub_contained = False for sup_addr in superset: # ipaddr ensures that version numbers match for inclusion. if sub_addr.subnet_of(sup_addr): sub_contained = True break if not sub_contained: return False return True class VarType: """Generic object meant to store lots of basic policy types.""" COMMENT = 0 COUNTER = 1 ACTION = 2 SADDRESS = 3 DADDRESS = 4 ADDRESS = 5 SPORT = 6 DPORT = 7 PROTOCOL_EXCEPT = 8 OPTION = 9 PROTOCOL = 10 SADDREXCLUDE = 11 DADDREXCLUDE = 12 LOGGING = 13 QOS = 14 POLICER = 15 PACKET_LEN = 16 FRAGMENT_OFFSET = 17 ICMP_TYPE = 18 SPFX = 19 DPFX = 20 ETHER_TYPE = 21 TRAFFIC_TYPE = 22 VERBATIM = 23 LOSS_PRIORITY = 24 ROUTING_INSTANCE = 25 PRECEDENCE = 26 SINTERFACE = 27 EXPIRATION = 28 DINTERFACE = 29 PLATFORM = 30 PLATFORMEXCLUDE = 31 PORT = 32 TIMEOUT = 33 OWNER = 34 ADDREXCLUDE = 36 VPN = 37 APPLY_GROUPS = 38 APPLY_GROUPS_EXCEPT = 39 DSCP_SET = 40 DSCP_MATCH = 41 DSCP_EXCEPT = 42 FORWARDING_CLASS = 43 STAG = 44 DTAG = 45 NEXT_IP = 46 HOP_LIMIT = 47 LOG_NAME = 48 FLEXIBLE_MATCH_RANGE = 49 ESPFX = 50 EDPFX = 51 FORWARDING_CLASS_EXCEPT = 52 TRAFFIC_CLASS_COUNT = 53 PAN_APPLICATION = 54 ICMP_CODE = 55 PRIORITY = 56 TTL = 57 LOG_LIMIT = 58 TARGET_RESOURCES = 59 TARGET_SERVICE_ACCOUNTS = 60 ENCAPSULATE = 61 FILTER_TERM = 62 RESTRICT_ADDRESS_FAMILY = 63 PORT_MIRROR = 64 SZONE = 65 DZONE = 66 DECAPSULATE = 67 SOURCE_SERVICE_ACCOUNTS = 68 def __init__(self, var_type, value): self.var_type = var_type if self.var_type == self.COMMENT or self.var_type == self.LOG_NAME: # remove the double quotes val = str(value).strip('"') # make all of the lines start w/o leading whitespace. self.value = '\n'.join([x.lstrip() for x in val.splitlines()]) else: self.value = value def __str__(self): return str(self.value) def __repr__(self): return self.__str__() def __eq__(self, other): return self.var_type == other.var_type and self.value == other.value def __hash__(self): return id(self) class Header: """The header of the policy file contains the targets and a global comment.""" def __init__(self): self.target = [] self.comment = [] self.apply_groups = [] self.apply_groups_except = [] def AddObject(self, obj): """Add and object to the Header. Args: obj: of type VarType.COMMENT, VarType.APPLY_GROUPS, VarType.APPLY_GROUPS_EXCEPT, or Target Raises: RuntimeError: if object type cannot be determined """ if type(obj) == Target: self.target.append(obj) elif isinstance(obj, list) and all(isinstance(x, VarType) for x in obj): for x in obj: if x.var_type == VarType.APPLY_GROUPS: self.apply_groups.append(str(x)) elif x.var_type == VarType.APPLY_GROUPS_EXCEPT: self.apply_groups_except.append(str(x)) elif obj.var_type == VarType.COMMENT: self.comment.append(str(obj)) else: raise RuntimeError('Unable to add object from header.') @property def platforms(self): """The platform targets of this particular header.""" return [x.platform for x in self.target] def FilterOptions(self, platform): """Given a platform return the options. Args: platform: string Returns: list or None """ for target in self.target: if target.platform == platform: return target.options return [] def FilterName(self, platform): """Given a filter_type, return the filter name. Args: platform: string Returns: filter_name: string or None Notes: !! Deprecated in favor of Header.FilterOptions(platform) !! """ for target in self.target: if target.platform == platform: if target.options: return target.options[0] return None def __str__(self): return 'Target[%s], Comments [%s], Apply groups: [%s], except: [%s]' % ( ', '.join(map(str, self.target)), ', '.join(self.comment), ', '.join(self.apply_groups), ', '.join(self.apply_groups_except)) def __repr__(self): return self.__str__() def __eq__(self, obj): """Compares for equality against another Header object. Note that it is picky and requires the list contents to be in the same order. Args: obj: object to be compared to for equality. Returns: True if all the list member variables of this object are equal to the list member variables of obj and False otherwise. """ if not isinstance(obj, Header): return False if self.target != obj.target: return False if self.comment != obj.comment: return False if self.apply_groups != obj.apply_groups: return False if self.apply_groups_except != obj.apply_groups_except: return False return True # This could be a VarType object, but I'm keeping it as it's class # b/c we're almost certainly going to have to do something more exotic with # it shortly to account for various rendering options like default iptables # policies or output file names, etc. etc. class Target: """The type of acl to be rendered from this policy file.""" def __init__(self, target): self.platform = target[0] self.options = target[1:] def __str__(self): return self.platform def __repr__(self): return self.__str__() def __eq__(self, other): return self.platform == other.platform and self.options == other.options def __ne__(self, other): return not self.__eq__(other) # Lexing/Parsing starts here tokens = ( 'ACTION', 'ADDR', 'ADDREXCLUDE', 'RESTRICT_ADDRESS_FAMILY', 'COMMENT', 'COUNTER', 'DADDR', 'DADDREXCLUDE', 'DECAPSULATE', 'DINTERFACE', 'DPFX', 'EDPFX', 'DPORT', 'DQUOTEDSTRING', 'DSCP', 'DSCP_EXCEPT', 'DSCP_MATCH', 'DSCP_RANGE', 'DSCP_SET', 'DTAG', 'DZONE', 'ENCAPSULATE', 'ESCAPEDSTRING', 'ETHER_TYPE', 'EXPIRATION', 'FILTER_TERM', 'FLEXIBLE_MATCH_RANGE', 'FORWARDING_CLASS', 'FORWARDING_CLASS_EXCEPT', 'FRAGMENT_OFFSET', 'HOP_LIMIT', 'APPLY_GROUPS', 'APPLY_GROUPS_EXCEPT', 'HEADER', 'HEX', 'ICMP_TYPE', 'ICMP_CODE', 'INTEGER', 'LOGGING', 'LOG_LIMIT', 'LOG_NAME', 'LOSS_PRIORITY', 'LPAREN', 'LSQUARE', 'NEXT_IP', 'OPTION', 'OWNER', 'PACKET_LEN', 'PLATFORM', 'PLATFORMEXCLUDE', 'POLICER', 'PORT', 'PORT_MIRROR', 'PRECEDENCE', 'PRIORITY', 'PROTOCOL', 'PROTOCOL_EXCEPT', 'QOS', 'RPAREN', 'RSQUARE', 'PAN_APPLICATION', 'ROUTING_INSTANCE', 'SADDR', 'SADDREXCLUDE', 'SINTERFACE', 'SOURCE_SERVICE_ACCOUNTS', 'SPFX', 'ESPFX', 'SPORT', 'SZONE', 'STAG', 'STRING', 'TARGET', 'TARGET_RESOURCES', 'TARGET_SERVICE_ACCOUNTS', 'TERM', 'TIMEOUT', 'TRAFFIC_CLASS_COUNT', 'TRAFFIC_TYPE', 'TTL', 'VERBATIM', 'VPN', ) literals = r':{},-/' t_ignore = ' \t' t_LSQUARE = r'\[' t_RSQUARE = r'\]' t_LPAREN = r'\(' t_RPAREN = r'\)' reserved = { 'action': 'ACTION', 'address': 'ADDR', 'address-exclude': 'ADDREXCLUDE', 'restrict-address-family': 'RESTRICT_ADDRESS_FAMILY', 'comment': 'COMMENT', 'counter': 'COUNTER', 'decapsulate': 'DECAPSULATE', 'destination-address': 'DADDR', 'destination-exclude': 'DADDREXCLUDE', 'destination-interface': 'DINTERFACE', 'destination-prefix': 'DPFX', 'destination-prefix-except': 'EDPFX', 'destination-port': 'DPORT', 'destination-tag': 'DTAG', 'destination-zone': 'DZONE', 'dscp-except': 'DSCP_EXCEPT', 'dscp-match': 'DSCP_MATCH', 'dscp-set': 'DSCP_SET', 'encapsulate': 'ENCAPSULATE', 'ether-type': 'ETHER_TYPE', 'expiration': 'EXPIRATION', 'filter-term': 'FILTER_TERM', 'flexible-match-range': 'FLEXIBLE_MATCH_RANGE', 'forwarding-class': 'FORWARDING_CLASS', 'forwarding-class-except': 'FORWARDING_CLASS_EXCEPT', 'fragment-offset': 'FRAGMENT_OFFSET', 'hex': 'HEX', 'hop-limit': 'HOP_LIMIT', 'apply-groups': 'APPLY_GROUPS', 'apply-groups-except': 'APPLY_GROUPS_EXCEPT', 'header': 'HEADER', 'icmp-type': 'ICMP_TYPE', 'icmp-code': 'ICMP_CODE', 'logging': 'LOGGING', 'log-limit': 'LOG_LIMIT', 'log_name': 'LOG_NAME', 'loss-priority': 'LOSS_PRIORITY', 'next-ip': 'NEXT_IP', 'option': 'OPTION', 'owner': 'OWNER', 'packet-length': 'PACKET_LEN', 'platform': 'PLATFORM', 'platform-exclude': 'PLATFORMEXCLUDE', 'policer': 'POLICER', 'port': 'PORT', 'port-mirror': 'PORT_MIRROR', 'precedence': 'PRECEDENCE', 'priority': 'PRIORITY', 'protocol': 'PROTOCOL', 'protocol-except': 'PROTOCOL_EXCEPT', 'qos': 'QOS', 'pan-application': 'PAN_APPLICATION', 'routing-instance': 'ROUTING_INSTANCE', 'source-address': 'SADDR', 'source-exclude': 'SADDREXCLUDE', 'source-interface': 'SINTERFACE', 'source-service-accounts': 'SOURCE_SERVICE_ACCOUNTS', 'source-prefix': 'SPFX', 'source-prefix-except': 'ESPFX', 'source-port': 'SPORT', 'source-tag': 'STAG', 'source-zone': 'SZONE', 'target': 'TARGET', 'target-resources': 'TARGET_RESOURCES', 'target-service-accounts': 'TARGET_SERVICE_ACCOUNTS', 'term': 'TERM', 'timeout': 'TIMEOUT', 'traffic-class-count': 'TRAFFIC_CLASS_COUNT', 'traffic-type': 'TRAFFIC_TYPE', 'ttl': 'TTL', 'verbatim': 'VERBATIM', 'vpn': 'VPN', } # disable linting warnings for lexx/yacc code # pylint: disable=unused-argument,invalid-name,g-short-docstring-punctuation # pylint: disable=g-docstring-quotes,g-short-docstring-space # pylint: disable=g-space-before-docstring-summary,g-doc-args # pylint: disable=g-no-space-after-docstring-summary # pylint: disable=g-docstring-missing-newline def t_IGNORE_COMMENT(t): r'\#.*' pass def t_ESCAPEDSTRING(t): r'"([^"\\]*(?:\\"[^"\\]*)+)"' t.lexer.lineno += str(t.value).count('\n') return t def t_DQUOTEDSTRING(t): r'"[^"]*?"' t.lexer.lineno += str(t.value).count('\n') return t def t_newline(t): r'\n+' t.lexer.lineno += len(t.value) def t_error(t): print("Illegal character '%s' on line %s" % (t.value[0], t.lineno)) t.lexer.skip(1) def t_DSCP_RANGE(t): # pylint: disable=line-too-long r'\b((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))([-]{1})((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))\b' t.type = reserved.get(t.value, 'DSCP_RANGE') return t def t_DSCP(t): # we need to handle the '-' as part of the word, not as a boundary r'\b((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))(?![\w-])\b' t.type = reserved.get(t.value, 'DSCP') return t def t_HEX(t): r'0x[a-fA-F0-9]+' return t def t_INTEGER(t): r'\d+' return t def t_STRING(t): r'\w+([-_+.@/]\w*)*' # we have an identifier; let's check if it's a keyword or just a string. t.type = reserved.get(t.value, 'STRING') return t ### ## parser starts here ### def p_target(p): """ target : target header terms | """ if len(p) > 1: if type(p[1]) is Policy: p[1].AddFilter(p[2], p[3]) p[0] = p[1] else: p[0] = Policy(p[2], p[3]) def p_header(p): """ header : HEADER '{' header_spec '}' """ p[0] = p[3] def p_header_spec(p): """ header_spec : header_spec target_spec | header_spec comment_spec | header_spec apply_groups_spec | header_spec apply_groups_except_spec | """ if len(p) > 1: if type(p[1]) == Header: p[1].AddObject(p[2]) p[0] = p[1] else: p[0] = Header() p[0].AddObject(p[2]) # we may want to change this at some point if we want to be clever with things # like being able to set a default input/output policy for iptables policies. def p_target_spec(p): """ target_spec : TARGET ':' ':' strings_or_ints """ p[0] = Target(p[4]) def p_terms(p): """ terms : terms TERM STRING '{' term_spec '}' | """ if len(p) > 1: p[5].name = p[3] if type(p[1]) == list: p[1].append(p[5]) p[0] = p[1] else: p[0] = [p[5]] def p_term_spec(p): """ term_spec : term_spec action_spec | term_spec addr_spec | term_spec restrict_address_family_spec | term_spec comment_spec | term_spec counter_spec | term_spec traffic_class_count_spec | term_spec dscp_set_spec | term_spec dscp_match_spec | term_spec dscp_except_spec | term_spec decapsulate_spec | term_spec encapsulate_spec | term_spec ether_type_spec | term_spec exclude_spec | term_spec expiration_spec | term_spec filter_term_spec | term_spec flexible_match_range_spec | term_spec forwarding_class_spec | term_spec forwarding_class_except_spec | term_spec fragment_offset_spec | term_spec hop_limit_spec | term_spec icmp_type_spec | term_spec icmp_code_spec | term_spec interface_spec | term_spec logging_spec | term_spec log_limit_spec | term_spec log_name_spec | term_spec losspriority_spec | term_spec next_ip_spec | term_spec option_spec | term_spec owner_spec | term_spec packet_length_spec | term_spec platform_spec | term_spec policer_spec | term_spec port_spec | term_spec port_mirror_spec | term_spec precedence_spec | term_spec priority_spec | term_spec prefix_list_spec | term_spec protocol_spec | term_spec qos_spec | term_spec pan_application_spec | term_spec routinginstance_spec | term_spec source_service_accounts_spec | term_spec term_zone_spec | term_spec tag_list_spec | term_spec target_resources_spec | term_spec target_service_accounts_spec | term_spec timeout_spec | term_spec ttl_spec | term_spec traffic_type_spec | term_spec verbatim_spec | term_spec vpn_spec | """ if len(p) > 1: if type(p[1]) == Term: p[1].AddObject(p[2]) p[0] = p[1] else: p[0] = Term(p[2]) def p_restrict_address_family_spec(p): """ restrict_address_family_spec : RESTRICT_ADDRESS_FAMILY ':' ':' STRING """ p[0] = VarType(VarType.RESTRICT_ADDRESS_FAMILY, p[4]) def p_routinginstance_spec(p): """ routinginstance_spec : ROUTING_INSTANCE ':' ':' STRING """ p[0] = VarType(VarType.ROUTING_INSTANCE, p[4]) def p_losspriority_spec(p): """ losspriority_spec : LOSS_PRIORITY ':' ':' STRING """ p[0] = VarType(VarType.LOSS_PRIORITY, p[4]) def p_precedence_spec(p): """ precedence_spec : PRECEDENCE ':' ':' one_or_more_ints """ p[0] = VarType(VarType.PRECEDENCE, p[4]) def p_flexible_match_range_spec(p): """ flexible_match_range_spec : FLEXIBLE_MATCH_RANGE ':' ':' flex_match_key_values """ p[0] = [] for kv in p[4]: p[0].append(VarType(VarType.FLEXIBLE_MATCH_RANGE, kv)) def p_flex_match_key_values(p): """ flex_match_key_values : flex_match_key_values STRING HEX | flex_match_key_values STRING INTEGER | flex_match_key_values STRING STRING | STRING HEX | STRING INTEGER | STRING STRING | """ if len(p) < 1: return if p[1] not in _FLEXIBLE_MATCH_RANGE_ATTRIBUTES: raise FlexibleMatchError('%s is not a valid attribute' % p[1]) if p[1] == 'match-start': if p[2] not in _FLEXIBLE_MATCH_START_OPTIONS: raise FlexibleMatchError('%s value is not valid' % p[1]) # per Juniper, max bit length is 32 elif p[1] == 'bit-length': if int(p[2]) not in list(range(33)): raise FlexibleMatchError('%s value is not valid' % p[1]) # per Juniper, max bit offset is 7 elif p[1] == 'bit-offset': if int(p[2]) not in list(range(8)): raise FlexibleMatchError('%s value is not valid' % p[1]) # per Juniper, offset can be up to 256 bytes elif p[1] == 'byte-offset': if int(p[2]) not in list(range(256)): raise FlexibleMatchError('%s value is not valid' % p[1]) if type(p[0]) == type([]): p[0].append([p.slice[1:]]) else: p[0] = [[i.value for i in p.slice[1:]]] def p_forwarding_class_spec(p): """ forwarding_class_spec : FORWARDING_CLASS ':' ':' one_or_more_strings """ p[0] = [] for fclass in p[4]: p[0].append(VarType(VarType.FORWARDING_CLASS, fclass)) def p_forwarding_class_except_spec(p): """ forwarding_class_except_spec : FORWARDING_CLASS_EXCEPT ':' ':' one_or_more_strings """ p[0] = [] for fclass in p[4]: p[0].append(VarType(VarType.FORWARDING_CLASS_EXCEPT, fclass)) def p_next_ip_spec(p): """ next_ip_spec : NEXT_IP ':' ':' STRING """ p[0] = VarType(VarType.NEXT_IP, p[4]) def p_encapsulate_spec(p): """ encapsulate_spec : ENCAPSULATE ':' ':' STRING """ p[0] = VarType(VarType.ENCAPSULATE, p[4]) def p_decapsulate_spec(p): """ decapsulate_spec : DECAPSULATE ':' ':' STRING """ p[0] = VarType(VarType.DECAPSULATE, p[4]) def p_port_mirror_spec(p): """ port_mirror_spec : PORT_MIRROR ':' ':' STRING """ p[0] = VarType(VarType.PORT_MIRROR, p[4]) def p_icmp_type_spec(p): """ icmp_type_spec : ICMP_TYPE ':' ':' one_or_more_strings """ p[0] = VarType(VarType.ICMP_TYPE, p[4]) def p_icmp_code_spec(p): """ icmp_code_spec : ICMP_CODE ':' ':' one_or_more_ints """ p[0] = VarType(VarType.ICMP_CODE, p[4]) def p_priority_spec(p): """ priority_spec : PRIORITY ':' ':' INTEGER """ p[0] = VarType(VarType.PRIORITY, p[4]) def p_packet_length_spec(p): """ packet_length_spec : PACKET_LEN ':' ':' INTEGER | PACKET_LEN ':' ':' INTEGER '-' INTEGER """ if len(p) == 5: p[0] = VarType(VarType.PACKET_LEN, str(p[4])) else: p[0] = VarType(VarType.PACKET_LEN, str(p[4]) + '-' + str(p[6])) def p_fragment_offset_spec(p): """ fragment_offset_spec : FRAGMENT_OFFSET ':' ':' INTEGER | FRAGMENT_OFFSET ':' ':' INTEGER '-' INTEGER """ if len(p) == 5: p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4])) else: p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4]) + '-' + str(p[6])) def p_hop_limit_spec(p): """ hop_limit_spec : HOP_LIMIT ':' ':' INTEGER | HOP_LIMIT ':' ':' INTEGER '-' INTEGER """ if len(p) == 5: p[0] = VarType(VarType.HOP_LIMIT, str(p[4])) else: p[0] = VarType(VarType.HOP_LIMIT, str(p[4]) + '-' + str(p[6])) def p_one_or_more_dscps(p): """ one_or_more_dscps : one_or_more_dscps DSCP_RANGE | one_or_more_dscps DSCP | one_or_more_dscps INTEGER | DSCP_RANGE | DSCP | INTEGER """ if len(p) > 1: if type(p[1]) is list: p[1].append(p[2]) p[0] = p[1] else: p[0] = [p[1]] def p_dscp_set_spec(p): """ dscp_set_spec : DSCP_SET ':' ':' DSCP | DSCP_SET ':' ':' INTEGER """ p[0] = VarType(VarType.DSCP_SET, p[4]) def p_dscp_match_spec(p): """ dscp_match_spec : DSCP_MATCH ':' ':' one_or_more_dscps """ p[0] = [] for dscp in p[4]: p[0].append(VarType(VarType.DSCP_MATCH, dscp)) def p_dscp_except_spec(p): """ dscp_except_spec : DSCP_EXCEPT ':' ':' one_or_more_dscps """ p[0] = [] for dscp in p[4]: p[0].append(VarType(VarType.DSCP_EXCEPT, dscp)) def p_exclude_spec(p): """ exclude_spec : SADDREXCLUDE ':' ':' one_or_more_strings | DADDREXCLUDE ':' ':' one_or_more_strings | ADDREXCLUDE ':' ':' one_or_more_strings | PROTOCOL_EXCEPT ':' ':' one_or_more_strings """ p[0] = [] for ex in p[4]: if p[1].find('source-exclude') >= 0: p[0].append(VarType(VarType.SADDREXCLUDE, ex)) elif p[1].find('destination-exclude') >= 0: p[0].append(VarType(VarType.DADDREXCLUDE, ex)) elif p[1].find('address-exclude') >= 0: p[0].append(VarType(VarType.ADDREXCLUDE, ex)) elif p[1].find('protocol-except') >= 0: p[0].append(VarType(VarType.PROTOCOL_EXCEPT, ex)) def p_prefix_list_spec(p): """ prefix_list_spec : DPFX ':' ':' one_or_more_strings | EDPFX ':' ':' one_or_more_strings | SPFX ':' ':' one_or_more_strings | ESPFX ':' ':' one_or_more_strings """ p[0] = [] for pfx in p[4]: if p[1].find('source-prefix-except') >= 0: p[0].append(VarType(VarType.ESPFX, pfx)) elif p[1].find('source-prefix') >= 0: p[0].append(VarType(VarType.SPFX, pfx)) elif p[1].find('destination-prefix-except') >= 0: p[0].append(VarType(VarType.EDPFX, pfx)) elif p[1].find('destination-prefix') >= 0: p[0].append(VarType(VarType.DPFX, pfx)) def p_addr_spec(p): """ addr_spec : SADDR ':' ':' one_or_more_strings | DADDR ':' ':' one_or_more_strings | ADDR ':' ':' one_or_more_strings """ p[0] = [] for addr in p[4]: if p[1].find('source-address') >= 0: p[0].append(VarType(VarType.SADDRESS, addr)) elif p[1].find('destination-address') >= 0: p[0].append(VarType(VarType.DADDRESS, addr)) else: p[0].append(VarType(VarType.ADDRESS, addr)) def p_port_spec(p): """ port_spec : SPORT ':' ':' one_or_more_strings | DPORT ':' ':' one_or_more_strings | PORT ':' ':' one_or_more_strings """ p[0] = [] for port in p[4]: if p[1].find('source-port') >= 0: p[0].append(VarType(VarType.SPORT, port)) elif p[1].find('destination-port') >= 0: p[0].append(VarType(VarType.DPORT, port)) else: p[0].append(VarType(VarType.PORT, port)) def p_protocol_spec(p): """ protocol_spec : PROTOCOL ':' ':' strings_or_ints """ p[0] = [] for proto in p[4]: p[0].append(VarType(VarType.PROTOCOL, proto)) def p_source_service_accounts_spec(p): """ source_service_accounts_spec : SOURCE_SERVICE_ACCOUNTS ':' ':' one_or_more_strings """ p[0] = [] for service_account in p[4]: p[0].append(VarType(VarType.SOURCE_SERVICE_ACCOUNTS, service_account)) def p_tag_list_spec(p): """ tag_list_spec : DTAG ':' ':' one_or_more_strings | STAG ':' ':' one_or_more_strings """ p[0] = [] for tag in p[4]: if p[1].find('source-tag') >= 0: p[0].append(VarType(VarType.STAG, tag)) elif p[1].find('destination-tag') >= 0: p[0].append(VarType(VarType.DTAG, tag)) def p_target_resources_spec(p): """ target_resources_spec : TARGET_RESOURCES ':' ':' one_or_more_tuples """ p[0] = [] for target_resource in p[4]: p[0].append(VarType(VarType.TARGET_RESOURCES, target_resource)) def p_target_service_accounts_spec(p): """ target_service_accounts_spec : TARGET_SERVICE_ACCOUNTS ':' ':' one_or_more_strings """ p[0] = [] for service_account in p[4]: p[0].append(VarType(VarType.TARGET_SERVICE_ACCOUNTS, service_account)) def p_ether_type_spec(p): """ ether_type_spec : ETHER_TYPE ':' ':' one_or_more_strings """ p[0] = [] for proto in p[4]: p[0].append(VarType(VarType.ETHER_TYPE, proto)) def p_traffic_type_spec(p): """ traffic_type_spec : TRAFFIC_TYPE ':' ':' one_or_more_strings """ p[0] = [] for proto in p[4]: p[0].append(VarType(VarType.TRAFFIC_TYPE, proto)) def p_policer_spec(p): """ policer_spec : POLICER ':' ':' STRING """ p[0] = VarType(VarType.POLICER, p[4]) def p_logging_spec(p): """ logging_spec : LOGGING ':' ':' STRING """ p[0] = VarType(VarType.LOGGING, p[4]) def p_log_limit_spec(p): """ log_limit_spec : LOG_LIMIT ':' ':' INTEGER '/' STRING""" p[0] = VarType(VarType.LOG_LIMIT, (p[4], p[6])) def p_log_name_spec(p): """ log_name_spec : LOG_NAME ':' ':' DQUOTEDSTRING """ p[0] = VarType(VarType.LOG_NAME, p[4]) def p_option_spec(p): """ option_spec : OPTION ':' ':' one_or_more_strings """ p[0] = [] for opt in p[4]: p[0].append(VarType(VarType.OPTION, opt)) def p_action_spec(p): """ action_spec : ACTION ':' ':' STRING """ p[0] = VarType(VarType.ACTION, p[4]) def p_counter_spec(p): """ counter_spec : COUNTER ':' ':' STRING """ p[0] = VarType(VarType.COUNTER, p[4]) def p_traffic_class_count_spec(p): """ traffic_class_count_spec : TRAFFIC_CLASS_COUNT ':' ':' STRING """ p[0] = VarType(VarType.TRAFFIC_CLASS_COUNT, p[4]) def p_expiration_spec(p): """ expiration_spec : EXPIRATION ':' ':' INTEGER '-' INTEGER '-' INTEGER """ p[0] = VarType(VarType.EXPIRATION, datetime.date(int(p[4]), int(p[6]), int(p[8]))) def p_comment_spec(p): """ comment_spec : COMMENT ':' ':' DQUOTEDSTRING """ p[0] = VarType(VarType.COMMENT, p[4]) def p_owner_spec(p): """ owner_spec : OWNER ':' ':' STRING """ p[0] = VarType(VarType.OWNER, p[4]) def p_verbatim_spec(p): """ verbatim_spec : VERBATIM ':' ':' STRING DQUOTEDSTRING | VERBATIM ':' ':' STRING ESCAPEDSTRING """ p[0] = VarType(VarType.VERBATIM, [p[4], p[5].strip('"').replace('\\"', '"')]) def p_term_zone_spec(p): """ term_zone_spec : SZONE ':' ':' one_or_more_strings | DZONE ':' ':' one_or_more_strings """ p[0] = [] for zone in p[4]: if p[1].find('source-zone') >= 0: p[0].append(VarType(VarType.SZONE, zone)) elif p[1].find('destination-zone') >= 0: p[0].append(VarType(VarType.DZONE, zone)) def p_vpn_spec(p): """ vpn_spec : VPN ':' ':' STRING STRING | VPN ':' ':' STRING """ if len(p) == 6: p[0] = VarType(VarType.VPN, [p[4], p[5]]) else: p[0] = VarType(VarType.VPN, [p[4], '']) def p_qos_spec(p): """ qos_spec : QOS ':' ':' STRING """ p[0] = VarType(VarType.QOS, p[4]) def p_pan_application_spec(p): """ pan_application_spec : PAN_APPLICATION ':' ':' one_or_more_strings """ p[0] = [] for apps in p[4]: p[0].append(VarType(VarType.PAN_APPLICATION, apps)) def p_interface_spec(p): """ interface_spec : SINTERFACE ':' ':' STRING | DINTERFACE ':' ':' STRING """ if p[1].find('source-interface') >= 0: p[0] = VarType(VarType.SINTERFACE, p[4]) elif p[1].find('destination-interface') >= 0: p[0] = VarType(VarType.DINTERFACE, p[4]) def p_platform_spec(p): """ platform_spec : PLATFORM ':' ':' one_or_more_strings | PLATFORMEXCLUDE ':' ':' one_or_more_strings """ p[0] = [] for platform in p[4]: if p[1].find('platform-exclude') >= 0: p[0].append(VarType(VarType.PLATFORMEXCLUDE, platform)) elif p[1].find('platform') >= 0: p[0].append(VarType(VarType.PLATFORM, platform)) def p_apply_groups_spec(p): """ apply_groups_spec : APPLY_GROUPS ':' ':' one_or_more_strings """ p[0] = [] for group in p[4]: p[0].append(VarType(VarType.APPLY_GROUPS, group)) def p_apply_groups_except_spec(p): """ apply_groups_except_spec : APPLY_GROUPS_EXCEPT ':' ':' one_or_more_strings """ p[0] = [] for group_except in p[4]: p[0].append(VarType(VarType.APPLY_GROUPS_EXCEPT, group_except)) def p_timeout_spec(p): """ timeout_spec : TIMEOUT ':' ':' INTEGER """ p[0] = VarType(VarType.TIMEOUT, p[4]) def p_ttl_spec(p): """ ttl_spec : TTL ':' ':' INTEGER """ p[0] = VarType(VarType.TTL, p[4]) def p_filter_term_spec(p): """ filter_term_spec : FILTER_TERM ':' ':' STRING """ p[0] = VarType(VarType.FILTER_TERM, p[4]) def p_one_or_more_strings(p): """ one_or_more_strings : one_or_more_strings STRING | STRING | """ if len(p) > 1: if type(p[1]) == type([]): p[1].append(p[2]) p[0] = p[1] else: p[0] = [p[1]] def p_one_or_more_tuples(p): """ one_or_more_tuples : LSQUARE one_or_more_tuples RSQUARE | one_or_more_tuples ',' one_tuple | one_or_more_tuples one_tuple | one_tuple | """ if len(p) > 1: if p[1] == '[': p[0] = p[2] elif type(p[1]) == type([]): if p[2] == ',': p[1].append(p[3]) else: p[1].append(p[2]) p[0] = p[1] else: p[0] = [p[1]] def p_one_tuple(p): """ one_tuple : LPAREN STRING ',' STRING RPAREN | """ p[0] = (p[2], p[4]) def p_one_or_more_ints(p): """ one_or_more_ints : one_or_more_ints INTEGER | INTEGER | """ if len(p) > 1: if type(p[1]) == type([]): p[1].append(int(p[2])) p[0] = p[1] else: p[0] = [int(p[1])] def p_strings_or_ints(p): """ strings_or_ints : strings_or_ints STRING | strings_or_ints INTEGER | STRING | INTEGER | """ if len(p) > 1: if type(p[1]) is list: p[1].append(p[2]) p[0] = p[1] else: p[0] = [p[1]] def p_error(p): """.""" global parser next_token = parser.token() if next_token is None: use_token = 'EOF' else: use_token = repr(next_token.value) if p: raise ParseError(' ERROR on "%s" (type %s, line %d, Next %s)' % (p.value, p.type, p.lineno, use_token)) else: raise ParseError(' ERROR you likely have unablanaced "{"\'s') parser = yacc.yacc(write_tables=False, debug=0, errorlog=yacc.NullLogger()) # pylint: enable=unused-argument,invalid-name,g-short-docstring-punctuation # pylint: enable=g-docstring-quotes,g-short-docstring-space # pylint: enable=g-space-before-docstring-summary,g-doc-args # pylint: enable=g-no-space-after-docstring-summary # pylint: enable=g-docstring-missing-newline def _ReadFile(filename): """Read data from a file if it exists. Args: filename: str - Filename Returns: data: str contents of file. Raises: FileNotFoundError: if requested file does not exist. FileReadError: Any error resulting from trying to open/read file. """ logging.debug('ReadFile(%s)', filename) if os.path.exists(filename): try: data = open(filename, 'r').read() return data except IOError: raise FileReadError('Unable to open or read file %s' % filename) else: raise FileNotFoundError('Unable to open policy file %s' % filename) def _SubDirectory(child, parent): """Returns if the child is a subdirectory of the parent. Resolves relative paths, but does not resolve symbolic links. Args: child: A presumed child file path string. parent: Base parent path string. Returns: A boolean, true if the child is a subdirectory of the parent. """ child_path = os.path.abspath(child) parent_path = os.path.abspath(parent) return os.path.commonpath([parent_path, child_path]) == os.path.commonpath([ parent_path]) def _Preprocess(data, max_depth=5, base_dir=''): """Search input for include statements and import specified include file. Search input for include statements and if found, import specified file and recursively search included data for includes as well up to max_depth. Args: data: A string of Policy file data. max_depth: Maximum depth of included files base_dir: Base path string where to look for policy or include files Returns: A string containing result of the processed input data Raises: RecursionTooDeepError: nested include files exceed maximum InvalidIncludeDirectoryError: nested include files from invalid directories """ if not max_depth: raise RecursionTooDeepError('%s' % ( 'Included files exceed maximum recursion depth of %s.' % max_depth)) rval = [] for line in [x.rstrip() for x in data.splitlines()]: words = line.split() if len(words) > 1 and words[0] == '#include': # remove any quotes around included filename include_file = words[1].strip('\'"') include_file_path = os.path.join(base_dir, include_file) if not _SubDirectory(include_file_path, base_dir): raise InvalidIncludeDirectoryError( '%s' % ('Included file is from invalid directory: %s.' % include_file) ) data = _ReadFile(include_file_path) # recursively handle includes in included data inc_data = _Preprocess(data, max_depth - 1, base_dir=base_dir) rval.extend(inc_data) else: rval.append(line) return rval def ParseFile(filename, definitions=None, optimize=True, base_dir='', shade_check=False): """Parse the policy contained in file, optionally provide a naming object. Read specified policy file and parse into a policy object. Args: filename: Name of policy file to parse. definitions: optional naming library definitions object. optimize: bool - whether to summarize networks and services. base_dir: base path string to look for acls or include files. shade_check: bool - whether to raise an exception when a term is shaded. Returns: policy object or False (if parse error). """ data = _ReadFile(filename) p = ParsePolicy(data, definitions, optimize, base_dir=base_dir, shade_check=shade_check, filename=filename) return p def ParsePolicy(data, definitions=None, optimize=True, base_dir='', shade_check=False, filename=''): """Parse the policy in 'data', optionally provide a naming object. Parse a blob of policy text into a policy object. Args: data: a string blob of policy data to parse. definitions: optional naming library definitions object. optimize: bool - whether to summarize networks and services. base_dir: base path string to look for acls or include files. shade_check: bool - whether to raise an exception when a term is shaded. filename: string - filename used by the policy. Returns: policy object or False (if parse error). """ try: if definitions: globals()['DEFINITIONS'] = definitions else: globals()['DEFINITIONS'] = naming.Naming(DEFAULT_DEFINITIONS) globals()['_OPTIMIZE'] = optimize globals()['_SHADE_CHECK'] = shade_check lexer = lex.lex() preprocessed_data = '\n'.join(_Preprocess(data, base_dir=base_dir)) global parser policy = parser.parse(preprocessed_data, lexer=lexer) policy.filename = filename return policy except IndexError: return False # if you call this from the command line, you can specify a pol file for it to # read. if __name__ == '__main__': ret = 0 if len(sys.argv) > 1: try: ret = ParsePolicy(open(sys.argv[1], 'r').read(), filename=sys.argv[1]) except IOError: print('ERROR: \'%s\' either does not exist or is not readable' % (sys.argv[1])) ret = 1 else: # default to reading stdin ret = ParsePolicy(sys.stdin.read()) sys.exit(ret) capirca-2.0.9/capirca/lib/policy_simple.py000066400000000000000000000452521437377527500205520ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple, line-oriented parser for Caprica policies. This parser differs from the default parser in that it preserves the original structure and defers resolving referents to the user. This is useful for analyzing policy structures and their use of naming data. It happens to discard inline comments but preservers line-level comments. Fields expected to have "naming" values are stored as a set without order or line breaks retained. """ from absl import logging import six class Field: """A name-value assignment within a block.""" def __init__(self, value): self.value = value def __str__(self): t = type(self) f = 'UNKNOWN' for k, v in field_map.items(): if t == v: f = k break indent = len(f) + 5 return '%s::%s' % (f, self.ValueStr().replace('\n', '\n' + ' ' * indent)) def __eq__(self, o): if not isinstance(o, self.__class__): return False return self.value == o.value def __ne__(self, o): return not self == o def Append(self, value): self.value += value def ValueStr(self): return self.value class IntegerField(Field): def __init__(self, value): super().__init__(value) try: _ = int(value) except ValueError: raise ValueError('Invalid integer field: "%s"' % str(self)) class NamingField(Field): """A naming field is one that refers to names in used in naming.py.""" def __init__(self, value): super().__init__(value) self.value = self.ParseString(value) def ParseString(self, value): """Split and validate a string value into individual names.""" parts = set(value.split()) for p in parts: self.ValidatePart(p) return parts def ValidatePart(self, part): """Validate that a string smells like a naming.py name.""" for c in part: if c not in '-_.' and not c.isdigit() and not c.isupper(): raise ValueError('Invalid name reference: "%s"' % part) def Append(self, value): """Split, validate, and add name contained within a string.""" parts = self.ParseString(value) self.value.update(parts) def ValueStr(self): """Return the value as a series of lines no longer than 60 chars each.""" values = sorted(self.value) line_wrap = 60 length = 0 line_buf = [] value_buf = [] for v in values: if length + len(v) > line_wrap: value_buf.append(' '.join(line_buf)) length = 0 line_buf = [] else: if line_buf: length += 1 line_buf.append(v) length += len(v) if line_buf: value_buf.append(' '.join(line_buf)) return ' ' + '\n'.join(value_buf) class Action(Field): """An action field.""" class Address(NamingField): """An address field.""" class Port(NamingField): """A port field.""" class Comment(Field): """A comment field.""" def ValueStr(self): # Comments should align with the string contents, after the leading # quotation mark. return self.value.replace('\n', '\n ') class Counter(Field): """A counter field.""" class Encapsulate(Field): """An encapsulate field.""" class Decapsulate(Field): """An decapsulate field.""" class DestinationAddress(Address): """A destination-address field.""" class DestinationExclude(Address): """A destination-exclude field.""" class DestinationInterface(Field): """A destination-interface field.""" class DestinationPort(Port): """A destination-port field.""" class DestinationPrefix(Field): """A destination-prefix field.""" class DestinationPrefixExcept(Field): """A destination-prefix-except field.""" class DestinationTag(Field): """A destination tag field.""" class DestinationZone(Field): """A destination-zone field.""" class DscpMatch(Field): """A dscp-match field.""" class DscpSet(Field): """A dscp-set field.""" class EtherType(Field): """An ether-type field.""" class Expiration(Field): """An expiration field.""" class FragmentOffset(Field): """A fragment-offset field.""" class ForwardingClass(Field): """A forwarding-class field.""" class ForwardingClassExcept(Field): """A forwarding-class-except field.""" class IcmpCode(Field): """A icmp-code field.""" class IcmpType(Field): """A icmp-type field.""" class Logging(Field): """A logging field.""" class LossPriority(Field): """A loss-priority field.""" class Option(Field): """An Option field.""" class Owner(Field): """An owner field.""" class NextIP(Field): """An owner field.""" class PacketLength(Field): """A packet-length field.""" class Platform(Field): """A platform field.""" class PlatformExclude(Field): """A platform-exclude field.""" class Policer(Field): """A rate-limit-icmp field.""" class PortMirror(Field): """A port-mirror field.""" class Precedence(Field): """A precedence field.""" class Protocol(Field): """A Protocol field.""" class ProtocolExcept(Field): """A protocol-except field.""" class Qos(Field): """A rate-limit-icmp field.""" class PANApplication(Field): """A rate-limit-icmp field.""" class RoutingInstance(Field): """A routing-instance field.""" class SourceAddress(Address): """A source-address field.""" class SourceExclude(Address): """A source-exclude field.""" class SourceInterface(Field): """A source-interface field.""" class SourcePort(Port): """A source-port field.""" class SourcePrefix(Field): """A source-prefix field.""" class SourcePrefixExcept(Field): """A source-prefix-except field.""" class SourceTag(Field): """A source tag field.""" class SourceZone(Field): """A source-zone field.""" class Target(Field): """A target field.""" class Timeout(IntegerField): """A timeout field.""" class TrafficType(Field): """A traffic-type field.""" class TrafficClassCount(Field): """A traffic-class-count field.""" class Verbatim(Field): """A verbatim field.""" class Vpn(Field): """A vpn field.""" destination_address_fields = (DestinationAddress, DestinationExclude, DestinationPrefix) field_map = { 'action': Action, 'address': Address, 'comment': Comment, 'counter': Counter, 'destination-address': DestinationAddress, 'destination-exclude': DestinationExclude, 'destination-interface': DestinationInterface, 'destination-port': DestinationPort, 'destination-prefix': DestinationPrefix, 'destination-prefix-except': DestinationPrefixExcept, 'destination-tag': DestinationTag, 'destination-zone': DestinationZone, 'dscp-match': DscpMatch, 'dscp-set': DscpSet, 'ether-type': EtherType, 'expiration': Expiration, 'fragment-offset': FragmentOffset, 'forwarding-class': ForwardingClass, 'forwarding-class-except': ForwardingClassExcept, 'icmp-code': IcmpCode, 'icmp-type': IcmpType, 'logging': Logging, 'loss-priority': LossPriority, 'option': Option, 'owner': Owner, 'next-ip': NextIP, 'packet-length': PacketLength, 'platform': Platform, 'platform-exclude': PlatformExclude, 'policer': Policer, 'port': Port, 'port-mirror': PortMirror, 'precedence': Precedence, 'protocol': Protocol, 'protocol-except': ProtocolExcept, 'qos': Qos, 'pan-application': PANApplication, 'routing-instance': RoutingInstance, 'source-address': SourceAddress, 'source-exclude': SourceExclude, 'source-interface': SourceInterface, 'source-port': SourcePort, 'source-prefix': SourcePrefix, 'source-prefix-except': SourcePrefixExcept, 'source-tag': SourceTag, 'source-zone': SourceZone, 'target': Target, 'timeout': Timeout, 'traffic-class-count': TrafficClassCount, 'traffic-type': TrafficType, 'verbatim': Verbatim, 'vpn': Vpn, 'encapsulate': Encapsulate, 'decapsulate': Decapsulate, } class Block: """A section containing fields.""" def __init__(self): self.fields = [] def __iter__(self): return iter(self.fields) def __getitem__(self, i): return self.fields[i] def __str__(self): buf = [] buf.append(type(self).__name__.lower()) buf.append(' ') if self.Name(): buf.append(self.Name()) buf.append(' ') buf.append('{') # } buf.append('\n') for field in self.fields: buf.append(' ') buf.append(str(field)) buf.append('\n') buf.append('}') buf.append('\n') return ''.join(buf) def AddField(self, field): if not issubclass(type(field), Field): raise TypeError('%s not subclass of Field.' % field) self.fields.append(field) def FieldsWithType(self, f_type): if not issubclass(f_type, Field): raise TypeError('%s not subclass of Field.' % f_type) return [x for x in self.fields if isinstance(x, f_type)] def Match(self, match_fn): """Yield the fields and their indices for which match_fn is True.""" for i, f in enumerate(self.fields): if match_fn(f): yield i, f def Name(self): return '' def __eq__(self, o): if not isinstance(o, self.__class__): return False if len(self.fields) != len(o.fields): return False for mine, theirs in zip(self.fields, o.fields): logging.debug('testing "%s" vs "%s"', mine, theirs) if mine != theirs: return False return True def __ne__(self, o): return not self == o class Header(Block): """A header block.""" class Term(Block): """A policy term.""" def __init__(self, name): super().__init__() self.name = name def Name(self): return self.name def __eq__(self, o): if not super().__eq__(o): return False return self.name == o.name def Describe(self): """Return a human-readable description of the term.""" verbatims = self.FieldsWithType(Verbatim) if verbatims: return 'Verbatim: %s' % verbatims handled = set() handled.update(self.FieldsWithType(Comment)) pieces = [] actions = self.FieldsWithType(Action) if len(actions) != 1: raise ValueError('No action or multiple actions.') handled.update(actions) pieces.append(actions[0].value.title() + ' traffic') protocols = self.FieldsWithType(Protocol) all_protocols = set() if protocols: handled.update(protocols) for protocol in protocols: all_protocols.update(protocol.value.split()) pieces.append('using ' + ' or '.join(sorted(all_protocols))) icmp_code = self.FieldsWithType(IcmpCode) all_icmp_code = set() if icmp_code: handled.update(icmp_code) for code in icmp_code: all_icmp_code.update(code.value.split()) pieces.append('(ICMP code %s)' % ', '.join(sorted(all_icmp_code))) icmp_types = self.FieldsWithType(IcmpType) all_icmp_types = set() if icmp_types: handled.update(icmp_types) for icmp_type in icmp_types: all_icmp_types.update(icmp_type.value.split()) pieces.append('(ICMP types %s)' % ', '.join(sorted(all_icmp_types))) sources = self.FieldsWithType(SourceAddress) if sources: handled.update(sources) pieces.append('originating from') all_sources = set() for source in sources: all_sources.update(source.value) pieces.append(', '.join(sorted(all_sources))) source_ports = self.FieldsWithType(SourcePort) if source_ports: handled.update(source_ports) if sources: pieces.append('using port') else: pieces.append('originating port') all_sources = set() for source in source_ports: all_sources.update(source.value) pieces.append(', '.join(sorted(all_sources))) destinations = self.FieldsWithType(DestinationAddress) if destinations: handled.update(destinations) pieces.append('destined for') all_destinations = set() for destination in destinations: all_destinations.update(destination.value) pieces.append(', '.join(sorted(all_destinations))) destination_ports = self.FieldsWithType(DestinationPort) if destination_ports: handled.update(destination_ports) if destinations: pieces.append('on port') else: pieces.append('destined for port') all_destinations = set() for destination in destination_ports: all_destinations.update(destination.value) pieces.append(', '.join(sorted(all_destinations))) vpns = self.FieldsWithType(Vpn) if vpns: handled.update(vpns) pieces.append('via VPNs') pieces.append(','.join(x.value for x in vpns)) # Ignore some fields for ignored_type in (Expiration, Owner): ignored_fields = self.FieldsWithType(ignored_type) if ignored_fields: handled.update(ignored_fields) for field in self: if field not in handled: raise ValueError('Uncovered field: ' + str(field)) return ' '.join(pieces) class BlankLine: """A blank line.""" def __str__(self): return '\n' def __eq__(self, o): return isinstance(o, self.__class__) def __ne__(self, o): return not self == o class CommentLine: """A comment in the file.""" def __init__(self, data): self.data = data def __str__(self): return str(self.data) + '\n' def __eq__(self, o): if not isinstance(o, self.__class__): return False return self.data == o.data def __ne__(self, o): return not self == o class Include: """A reference to another policy definition.""" def __init__(self, identifier): self.identifier = identifier def __str__(self): return '#include %s' % self.identifier def __eq__(self, o): if not isinstance(o, self.__class__): return False return self.identifier == o.identifier def __ne__(self, o): return not self == o class Policy: """An ordered list of headers, terms, comments, blank lines and includes.""" def __init__(self, identifier): self.identifier = identifier self.members = [] def AddMember(self, member): m_type = type(member) if (m_type not in (Include, CommentLine, BlankLine) and not issubclass(m_type, Block)): raise TypeError('%s must be a Block, CommentLine, BlankLine,' ' or Include' % m_type) self.members.append(member) def __str__(self): return ''.join(str(x) for x in self.members) def __iter__(self): return iter(self.members) def __getitem__(self, i): return self.members[i] def Match(self, match_fn): """Yield the members and their indices for which match_fn is True.""" for i, m in enumerate(self.members): if match_fn(m): yield i, m def MatchFields(self, block_match_fn, field_match_fn): for match_idx, m in self.Match(block_match_fn): if not isinstance(m, Block): continue for field_idx, f in m.Match(field_match_fn): yield match_idx, field_idx, f class PolicyParser: """Parse a policy object from a data buffer.""" def __init__(self, data, identifier): self.data = data self.identifier = identifier self.block_in_progress = None self.policy = None def Parse(self): """Do the needful.""" self.policy = Policy(self.identifier) for line in self.data.split('\n'): line = line.strip() logging.debug('Processing line: "%s"', line) if self.block_in_progress: self.ParseInBlock(line) else: self.ParseTopLevel(line) if self.block_in_progress: raise ValueError('Unexpected EOF reading "%s"' % self.block_in_progress) return self.policy def ParseTopLevel(self, line): """Parse a line not nested within a block.""" if line == '': # pylint: disable=g-explicit-bool-comparison self.policy.AddMember(BlankLine()) return if line.startswith('#'): if line.startswith('#include '): self.ParseIncludeLine(line) return self.ParseCommentLine(line) return if line.startswith('header {') or line.startswith('header{'): # } self.ParseHeaderLine(line) return if line.startswith('term '): self.ParseTermLine(line) return raise ValueError('Unhandled top-level line %s' % line) def ParseCommentLine(self, line): """Parse a line with a line level comment.""" if self.block_in_progress: raise ValueError('Found comment line in block: %s' % line) self.policy.AddMember(CommentLine(line)) def ParseIncludeLine(self, line): """Parse an #include line refering to another file.""" if self.block_in_progress: raise ValueError('Found include line in block: %s' % line) line_parts = line.split() if len(line_parts) < 2: raise ValueError('Invalid include: %s' % line) inc_ref = line_parts[1] if '#' in inc_ref: inc_ref, _ = inc_ref.split('#', 1) self.policy.AddMember(Include(inc_ref)) def ParseHeaderLine(self, line): """Parse a line beginning a header block.""" if self.block_in_progress: raise ValueError('Nested blocks not allowed: %s' % line) self.block_in_progress = Header() def ParseTermLine(self, line): """Parse a line beginning a term block.""" if self.block_in_progress: raise ValueError('Nested blocks not allowed: %s' % line) line_parts = line.split() # Some terms don't have a space after the name if '{' in line_parts[1]: # } brace_idx = line_parts[1].index('{') # } line_parts[1] = line_parts[1][:brace_idx] else: if not line_parts[2].startswith('{'): # } raise ValueError('Invalid term line: %s' % line) term_name = line_parts[1] self.block_in_progress = Term(term_name) def ParseInBlock(self, line): """Parse a line when inside a block definition.""" if line == '' or line.startswith('#'): # pylint: disable=g-explicit-bool-comparison return if '::' in line: self.ParseField(line) return if line.startswith('}'): self.policy.AddMember(self.block_in_progress) self.block_in_progress = None return if self.block_in_progress is not None: self.block_in_progress.fields[-1].Append('\n' + line) def ParseField(self, line): """Parse a line containing a block field.""" name, value = line.split('::', 1) name = name.strip().lower() f_type = field_map.get(name) if not f_type: raise ValueError('Invalid field line: %s' % line) self.block_in_progress.AddField(f_type(value)) capirca-2.0.9/capirca/lib/policyreader.py000066400000000000000000000173261437377527500203650ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Utility to provide exploration of policy definition files. Allows read only access of policy definition files. The library creates a Policy object, which has filters containing terms. This library does no expansion on the tokens directly, such as in policy.py. TODO: This library is currently incomplete, and does not allow access to every argument of a policy term. """ from capirca.lib import naming class Error(Exception): """Generic error class.""" class FileOpenError(Error): """Trouble opening a file.""" class InvalidFilterError(Error): """Filter is invalid.""" class Filter: """Simple filter with a name a list of terms.""" def __init__(self, filtername=''): self.name = filtername self.term = [] def __str__(self): rval = [] title = 'Filter: %s' % str(self.name) rval.append('\n%s' % title) rval.append('-' * len(title)) for term in self.term: rval.append(str(term)) return '\n\n'.join(rval) class Term: """Simple term with a name a list of attributes.""" def __init__(self, termname=''): self.name = termname self.source = [] self.destination = [] self.sport = [] self.dport = [] self.action = [] self.option = [] self.protocol = [] def __str__(self): rval = [] rval.append(' Term: %s' % self.name) rval.append(' Source-address:: %s' % ' '.join(self.source)) rval.append(' Destination-address:: %s' % ' '.join(self.destination)) rval.append(' Source-port:: %s' % ' '.join(self.sport)) rval.append(' Destination-port:: %s' % ' '.join(self.dport)) rval.append(' Protocol:: %s' % ' '.join(self.protocol)) rval.append(' Option:: %s' % ' '.join(self.option)) rval.append(' Action:: %s' % ' '.join(self.action)) return '\n'.join(rval) class Policy: """Holds basic attributes of an unexpanded policy definition file.""" def __init__(self, filename, defs_data=None): """Build policy object and naming definitions from provided filenames. Args: filename: location of a .pol file defs_data: location of naming definitions directory, if any """ self.defs = naming.Naming(defs_data) self.filter = [] try: self.data = open(filename, 'r').readlines() except IOError as error_info: info = str(filename) + ' cannot be opened' raise FileOpenError('%s\n%s' % (info, error_info)) indent = 0 in_header = False in_term = False filt = Filter() term = Term() in_string = False for line in self.data: words = line.strip().split() quotes = len(line.split('"')) + 1 if quotes % 2: # are we in or out of double quotes in_string = not in_string # flip status of quote status if not in_string: if '{' in words: indent += 1 if words: if words[0] == 'header': in_header = True if words[0] == 'term': in_term = True term = Term(words[1]) if in_header and words[0] == 'target::': if filt.name != words[2]: # avoid empty dupe filters due to filt = Filter(words[2]) # multiple target header lines if in_term: if words[0] == 'source-address::': term.source.extend(words[1:]) if words[0] == 'destination-address::': term.destination.extend(words[1:]) if words[0] == 'source-port::': term.sport.extend(words[1:]) if words[0] == 'destination-port::': term.dport.extend(words[1:]) if words[0] == 'action::': term.action.extend(words[1:]) if words[0] == 'protocol::': term.protocol.extend(words[1:]) if words[0] == 'option::': term.option.extend(words[1:]) if '}' in words: indent -= 1 if in_header: self.filter.append(filt) in_header = False if in_term: filt.term.append(term) in_term = False def __str__(self): return '\n'.join(str(i) for i in self.filter) def Matches(self, src=None, dst=None, dport=None, sport=None, filtername=None): """Return list of term names that match specific attributes. Args: src: source ip address '12.1.1.1' dst: destination ip address '10.1.1.1' dport: any port/protocol combo, such as '80/tcp' or '53/udp' sport: any port/protocol combo, such as '80/tcp' or '53/udp' filtername: a filter name or None to search all filters Returns: results: list of lists, each list is index to filter & term in the policy Raises: InvalidFilterError: Error if filter is invalid. Example: p=policyreader.Policy('policy_path', 'definitions_path') p.Matches(dst='209.85.216.5', dport='25/tcp') [[0, 26]] print p.filter[0].term[26].name for match in p.Matches(dst='209.85.216.5'): print p.filter[match[0]].term[match[1]].name """ rval = [] results = [] filter_list = [] dport_parents = None sport_parents = None destination_parents = None source_parents = None if dport: dport_parents = self.defs.GetServiceParents(dport) if sport: sport_parents = self.defs.GetServiceParents(sport) if dst: destination_parents = self.defs.GetIpParents(dst) try: destination_parents.remove('ANY') destination_parents.remove('RESERVED') except ValueError: pass # ignore and continue if src: source_parents = self.defs.GetIpParents(src) try: source_parents.remove('ANY') source_parents.remove('RESERVED') except ValueError: pass # ignore and continue if not filtername: filter_list = self.filter else: for idx, fil in enumerate(self.filter): if filtername == fil.name: filter_list = [self.filter[idx]] if not filter_list: raise InvalidFilterError('invalid filter name: %s' % filtername) for findex, xfilter in enumerate(filter_list): mterms = [] mterms.append(set()) # dport mterms.append(set()) # sport mterms.append(set()) # dst mterms.append(set()) # src for tindex, term in enumerate(xfilter.term): if dport_parents: for token in dport_parents: if token in term.dport: mterms[0].add(tindex) else: mterms[0].add(tindex) if sport_parents: for token in sport_parents: if token in term.sport: mterms[1].add(tindex) else: mterms[1].add(tindex) if destination_parents: for token in destination_parents: if token in term.destination: mterms[2].add(tindex) else: mterms[2].add(tindex) if source_parents: for token in source_parents: if token in term.source: mterms[3].add(tindex) else: mterms[3].add(tindex) rval.append(list(mterms[0] & mterms[1] & mterms[2] & mterms[3])) for findex, fresult in enumerate(rval): for i in list(fresult): results.append([findex, i]) return results capirca-2.0.9/capirca/lib/port.py000066400000000000000000000104461437377527500166630ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Common library for network ports and protocol handling.""" class Error(Exception): """Base error class.""" class BadPortValue(Error): """Invalid port format.""" class BadPortRange(Error): """Out of bounds port range.""" class InvalidRange(Error): """Range is not valid (eg, single port).""" class NotSinglePort(Error): """Port range defined instead of a single port.""" class PPP: """PPP: [P]ort [P]rotocol [P]airs. Make port/protocol pairs an object for easy comparisons """ def __init__(self, service): """Init for PPP object. Args: service: A port/protocol pair as str (eg: '80/tcp', '22-23/tcp') or a nested service name (eg: 'SSH') """ # remove comments (if any) self.service = service.split('#')[0].strip() if '/' in self.service: self.port = self.service.split('/')[0] self.protocol = self.service.split('/')[1] self.nested = False else: # for nested services self.nested = True self.port = None self.protocol = None @property def is_range(self): if self.port: return '-' in self.port else: return False @property def is_single_port(self): if self.port: return '-' not in self.port else: return False @property def start(self): # return the first port in the range as int if '-' in self.port: self._start = int(self.port.split('-')[0]) else: raise InvalidRange('%s is not a valid port range' % self.port) return self._start @property def end(self): # return the last port in the range as int if '-' in self.port: self._end = int(self.port.split('-')[1]) else: raise InvalidRange('%s is not a valid port range' % self.port) return self._end def __contains__(self, other): # determine if a single-port object is within another objects' range try: return ((int(self.start) <= int(other.port) <= int(self.end)) and self.protocol == other.protocol) except: raise InvalidRange('%s must be a range' % self.port) def __lt__(self, other): if self.is_single_port: try: return int(self.port) < int(other.port) except: return False else: raise NotSinglePort('Comparisons cannot be performed on port ranges') def __gt__(self, other): if self.is_single_port: try: return int(self.port) > int(other.port) except: return False else: raise NotSinglePort('Comparisons cannot be performed on port ranges') def __le__(self, other): if self.is_single_port: try: return int(self.port) <= int(other.port) except: return False else: raise NotSinglePort('Comparisons cannot be performed on port ranges') def __ge__(self, other): if self.is_single_port: try: return int(self.port) >= int(other.port) except: return False else: raise NotSinglePort('Comparisons cannot be performed on port ranges') def __eq__(self, other): if self.is_single_port: try: return (int(self.port) == int(other.port) and self.protocol == other.protocol) except: return False else: raise NotSinglePort('Comparisons cannot be performed on port ranges') def Port(port): """Sanitize a port value. Args: port: a port value Returns: port: a port value Raises: BadPortValue: port is not valid integer or string BadPortRange: port is outside valid range """ pval = -1 try: pval = int(port) except ValueError: raise BadPortValue('port %s is not valid.' % port) if pval < 0 or pval > 65535: raise BadPortRange('port %s is out of range 0-65535.' % port) return pval capirca-2.0.9/capirca/lib/sonic.py000066400000000000000000000214711437377527500170120ustar00rootroot00000000000000# Copyright 2022 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """SONiC CONFIG_DB ACL generator.""" import copy import datetime import json import logging from capirca.lib import aclgenerator class Error(Exception): """Generic error class.""" class Term(aclgenerator.Term): """Creates the term for the SONiC CONFIG_DB ACL.""" # Capirca to SONiC config_db policy action map. ACTION_MAP = {'accept': 'FORWARD', 'deny': 'DROP'} def __init__(self, term, inet_version='inet', platform='sonic'): super().__init__(term) self.term = term self.inet_version = inet_version self.af = self.AF_MAP.get(self.inet_version) self.platform = platform # Combine (flatten) addresses with their exclusions into a resulting # flattened_saddr, flattened_daddr, flattened_addr. self.term.FlattenAll() def ConvertToDict(self): if not self.term.action: logging.info('Skipping term with empty action %s', self.term) return [] if self.term.platform: if self.platform not in self.term.platform: return [] a = self.term.action[0] action = self.ACTION_MAP[a] if self.term.protocol == ['icmp'] and self.af == 6: # proto and ip version mismatch return [] if self.term.protocol == ['icmpv6'] and self.af == 4: # proto and ip version mismatch return [] protos = self.term.protocol if not protos: protos = [None] tcp_flags = [] if self.term.option: for opt in [str(x) for x in self.term.option]: if opt == 'tcp-established': tcp_flags = [ '0x10/0x10', # ACK '0x4/0x4', # RST ] icmp_types = [] if self.term.icmp_type: icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, self.term.protocol, self.af) if not icmp_types: icmp_types = [None] sports = self.term.source_port if not sports: sports = [(0, 0)] dports = self.term.destination_port if not dports: dports = [(0, 0)] src_ip_key = 'SRC_IP' dst_ip_key = 'DST_IP' icmp_type_key = 'ICMP_TYPE' if self.af == 6: src_ip_key = 'SRC_IPV6' dst_ip_key = 'DST_IPV6' icmp_type_key = 'ICMPV6_TYPE' if not self._HasBothAddressFamiliesIPs('flattened_saddr'): return [] saddrs = self.term.GetAddressOfVersion('flattened_saddr', self.af) if not saddrs: saddrs = [None] if not self._HasBothAddressFamiliesIPs('flattened_daddr'): return [] daddrs = self.term.GetAddressOfVersion('flattened_daddr', self.af) if not daddrs: daddrs = [None] rules = [] rule_dict = { 'PACKET_ACTION': action, } for proto in protos: if proto is not None: # TODO: do we need to handle a case when the proto is a number? rule_dict['IP_PROTOCOL'] = str(self.PROTO_MAP[proto]) if proto == 'tcp' and tcp_flags: rule_dict['TCP_FLAGS'] = tcp_flags for icmp_type in icmp_types: if icmp_type is not None: rule_dict[icmp_type_key] = str(icmp_type) for saddr in saddrs: if saddr: rule_dict[src_ip_key] = str(saddr) for daddr in daddrs: if daddr: rule_dict[dst_ip_key] = str(daddr) for start, end in sports: if not start == end == 0: if start == end: rule_dict['L4_SRC_PORT'] = str(start) else: rule_dict['L4_SRC_PORT_RANGE'] = f'{start}-{end}' for start, end in dports: if not start == end == 0: if start == end: rule_dict['L4_DST_PORT'] = str(start) else: rule_dict['L4_DST_PORT_RANGE'] = f'{start}-{end}' rules.append(copy.deepcopy(rule_dict)) return rules def _HasBothAddressFamiliesIPs(self, address_type): """Checks if requested src/dst IPs of of matching term af exist. Args: address_type: Could be either flattened_saddr or flattened_daddr. Str. Returns: True if address_type of matching term af exists or False otherwise. Raises: Error: if unsupported address_type is passed. """ if address_type not in ['flattened_saddr', 'flattened_daddr']: raise Error(f'_HasBothAddressFamiliesIPs does not support {address_type}') addrs_af4 = self.term.GetAddressOfVersion(address_type, 4) addrs_af6 = self.term.GetAddressOfVersion(address_type, 6) if self.af == 4: if not addrs_af4 and addrs_af6: # We have IPv6 addresses, but no IPv4 - don't render this term for IPv4. return False else: if not addrs_af6 and addrs_af4: # We have IPv4 addresses, but no IPv6 - don't render this term for IPv6. return False return True def __str__(self): """Convert term to a string.""" rules = self.ConvertToDict() return json.dumps(rules, indent=2) class Sonic(aclgenerator.ACLGenerator): """A SONiC config_db ACL policy object.""" _PLATFORM = 'sonic' SUFFIX = '.sonicacl' _SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed')) _rule_counter = 0 _rule_increment = 10 _rule_priority = 65536 def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() # Remove unsupported things. unsupported_tokens = { 'verbatim', 'stateless_reply', 'platform_exclude', 'platform', 'source_address_exclude', 'destination_address_exclude' } supported_tokens -= unsupported_tokens # SONiC ACL model only supports these three forwarding actions. supported_sub_tokens['action'] = {'accept', 'deny'} # Simplify SONiC ACL model down to these options. supported_sub_tokens['option'] = {'tcp-established'} return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.sonic_policy = {} current_date = datetime.datetime.now(datetime.timezone.utc).date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) for header, terms in pol.filters: if self._PLATFORM not in header.platforms: continue # Options are anything after the platform name in the target message of # the policy header, [1:]. filter_options = header.FilterOptions(self._PLATFORM) # TODO: assume first item as a policy name. if filter_options: filter_name = filter_options[0] else: raise Error('Unable to find policy name') term_address_families = set() for i in self._SUPPORTED_AF: if i in filter_options: if i == 'mixed': term_address_families.update( self._SUPPORTED_AF.difference(['mixed'])) else: term_address_families.add(i) if not term_address_families: # No supported families. logging.info('Skipping policy %s as it does not apply to any of %s', filter_name, self._SUPPORTED_AF) continue for term in terms: if term.expiration: if term.expiration <= exp_info_date: logging.info( 'INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue for term_af in term_address_families: t = Term(term=term, inet_version=term_af) for rule in t.ConvertToDict(): if not rule: continue self._rule_counter += self._rule_increment self._rule_priority -= self._rule_increment if self._rule_priority < 0: raise Error('Rule priority can not be less than zero') rule_name = f'{filter_name}|RULE_{self._rule_counter}' rule['PRIORITY'] = str(self._rule_priority) self.sonic_policy[rule_name] = rule self.sonic_policy = {'ACL_RULE': self.sonic_policy} # This is what actually "renders" the policy into vendor-specific # representation! def __str__(self): return json.dumps(self.sonic_policy, indent=2, separators=(',', ': ')) capirca-2.0.9/capirca/lib/speedway.py000066400000000000000000000023041437377527500175120ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Speedway iptables generator. This is a subclass of Iptables lib.""" from string import Template from capirca.lib import iptables class Error(Exception): pass class Term(iptables.Term): """Generate Iptables policy terms.""" _PLATFORM = 'speedway' _PREJUMP_FORMAT = None _POSTJUMP_FORMAT = Template('-A $filter -j $term') class Speedway(iptables.Iptables): """Generates filters and terms from provided policy object.""" _PLATFORM = 'speedway' _DEFAULT_PROTOCOL = 'all' SUFFIX = '.ipt' _RENDER_PREFIX = '*filter' _RENDER_SUFFIX = 'COMMIT' _DEFAULTACTION_FORMAT = ':%s %s' _TERM = Term capirca-2.0.9/capirca/lib/srxlo.py000066400000000000000000000042611437377527500170440ustar00rootroot00000000000000# Copyright 2014 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Juniper SRX generator for loopback ACLs. This is a subclass of Juniper generator. Juniper SRX loopback filter uses the same syntax as regular Juniper stateless ACLs, with minor differences. This subclass effects those differences. """ from capirca.lib import juniper class Term(juniper.Term): """Single SRXlo term representation.""" _PLATFORM = 'srxlo' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.term.protocol = ['icmp6' if x == 'icmpv6' else x for x in self.term.protocol] self.term.protocol_except = [ 'icmp6' if x == 'icmpv6' else x for x in self.term.protocol_except ] def NormalizeIcmpTypes(self, icmp_types, protocols, af): protocols = ['icmpv6' if x == 'icmp6' else x for x in protocols] return super().NormalizeIcmpTypes(icmp_types, protocols, af) class SRXlo(juniper.Juniper): """SRXlo generator.""" _PLATFORM = 'srxlo' SUFFIX = '.jsl' _TERM = Term def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() # flexible match is MX/Trio only supported_tokens.remove('flexible_match_range') # currently only support 'encapsulate' in juniper supported_tokens.remove('encapsulate') # currently only support 'decapsulate' in juniper supported_tokens.remove('decapsulate') # currently only support 'port-mirror' in juniper supported_tokens.remove('port_mirror') return supported_tokens, supported_sub_tokens capirca-2.0.9/capirca/lib/summarizer.py000066400000000000000000000170741437377527500201010ustar00rootroot00000000000000# Copyright 2014 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Discontinuous subnet mask summarizer.""" import collections from capirca.lib import nacaddr class DSMNet: """Hold IP address information for the purposes of DSM summarization. ipaddr maintainers explicitly declared that they will not support discontinuous subnet masks, hence this is required. """ def __init__(self, address, netmask, text=''): """Creates DSMNet. Args: address: network address as int. netmask: subnet mask as int. text: text comment. """ self.address = address self.netmask = netmask self.text = text def __eq__(self, other): try: return (self.address == other.address and self.netmask == other.netmask) except AttributeError: return NotImplemented def __ne__(self, other): eq = self.__eq__(other) if eq is NotImplemented: return NotImplemented return not eq def __le__(self, other): gt = self.__gt__(other) if gt is NotImplemented: return NotImplemented return not gt def __ge__(self, other): lt = self.__lt__(other) if lt is NotImplemented: return NotImplemented return not lt def __lt__(self, other): try: if self.address != other.address: return self.address < other.address except AttributeError: return NotImplemented return False def __gt__(self, other): try: if self.address != other.address: return self.address > other.address except AttributeError: return NotImplemented return False def __str__(self): return ' '.join([self.address, self.netmask]) def MergeText(self, text=''): """Returns self.text joined with optional text. Don't join the text if it's already contained in self.text. Args: text: text to be combined with self.text. Returns: Combined text. """ if self.text: if text and text not in self.text: return ', '.join([self.text, text]) return self.text else: return text def ToDottedQuad(net, negate=False, nondsm=False): """Turns a DSMNet object into decimal dotted quad tuple. Args: net: DSMNet object. negate: if subnet mask should be negated (and become wildcard). nondsm: if mask should be generated in prefixlen when non-DSM. Returns: tuple (decimal dotted address, decimal dotted mask). Raises: ValueError: if address is larger than 32 bits or mask is not exactly 0 or 32 bits. """ if net.address.bit_length() > 32: raise ValueError('Addresses larger than 32 bits ' 'are currently not supported.') if net.netmask.bit_length() not in (0, 32): raise ValueError('Subnet masks other than 0 or 32 ' 'are currently not supported.') if negate: netmask = ~net.netmask else: netmask = net.netmask return (_Int32ToDottedQuad(net.address), _PrefixlenForNonDSM(netmask)) if nondsm else ( _Int32ToDottedQuad(net.address), _Int32ToDottedQuad(netmask)) def _PrefixlenForNonDSM(intmask): """Turns 32 bit integer into dotted decimal with JunOS friendly. Args: intmask: 32 bit integer. Returns: A string in dotted decimal or prefixlen format. """ dotmask = _Int32ToDottedQuad(intmask) if dotmask == '255.255.255.255': return '32' bitmask = '{:032b}'.format(intmask) prefixlen = 0 while bitmask[prefixlen] == '1': prefixlen += 1 return dotmask if int(bitmask[prefixlen:], 2) else str(prefixlen) def _Int32ToDottedQuad(num): """Turns 32 bit integer into dotted decimal notation. Args: num: 32 bit integer. Returns: Integer as a string in dotted decimal notation. """ octets = [] for _ in range(4): octet = num & 0xFF octets.insert(0, str(octet)) num >>= 8 return '.'.join(octets) def _NacaddrNetToDSMNet(net): """Converts nacaddr.IPv4 or nacaddr.IPv6 object into DSMNet object. Args: net: nacaddr.IPv4 or nacaddr.IPv6 object. Returns: DSMNet object. """ # left shift number of subnet mask bits, then leftshift until # full length of address reached address_as_int = int(net.network_address) netmask_as_int = (((1 << net.prefixlen) - 1) << (net.max_prefixlen - net.prefixlen)) return DSMNet(address_as_int, netmask_as_int, net.text) def _ToPrettyBinaryFormat(num): """Prettily formatted string of binary representation of suplied number. Useful for debugging. Args: num: number to be prettily formatted Returns: prettily formatted string """ # like ipaddr make assumption that this is ipv4 byte_strings = [] while num > 0 or len(byte_strings) < 4: byte_strings.append('{0:08b}'.format(num & 0xff)) num >>= 8 return ' '.join(reversed(byte_strings)) def Summarize(nets): """Summarizes networks while allowing for discontinuous subnet mask. Args: nets: list of nacaddr.IPv4 or nacaddr.IPv6 objects. Address family can be mixed, however there is no support for rendering anything other than IPv4. Returns: sorted list of DSMIPNet objects. """ result = [] optimized_nets = nacaddr.CollapseAddrList(nets) nets_by_netmask = collections.defaultdict(list) # group nets by subnet mask for net in optimized_nets: nets_by_netmask[net.prefixlen].append(_NacaddrNetToDSMNet(net)) for nets in nets_by_netmask.values(): result.extend(_SummarizeSameMask(nets)) return sorted(result) def _SummarizeSameMask(nets): """Summarizes networks while allowing for discontinuous subnet mask. Args: nets: list of unique, summarized DSMNet objects with the same netmask. Returns: sorted list of DSMNet objects that are discontinuously summarized. """ # singletons can not possible be paired and are our result singletons = [] # combinetons can potentially be paired combinetons = nets while combinetons: current_nets = combinetons combinetons = [] while current_nets: current_net = current_nets.pop(0) # look for pair net, but keep index handy for pair_net_index, pair_net in enumerate(current_nets): xored_address = current_net.address ^ pair_net.address # For networks with the same network mask: # check if they have exactly one bit difference # or they are "a pair". if (current_net.netmask == pair_net.netmask and (xored_address & (xored_address - 1) == 0) and xored_address > 0): # if pair was found, remove both, add paired up network # to combinetons for next run and move along # otherwise this network can never be paired current_nets.pop(pair_net_index) new_netmask = current_net.netmask ^ xored_address # summarize supplied networks into one using discontinuous # subnet mask. combinetons.append(DSMNet(min(current_net.address, pair_net.address), new_netmask, current_net.MergeText(pair_net.text))) break else: singletons.append(current_net) return singletons capirca-2.0.9/capirca/lib/windows.py000066400000000000000000000304121437377527500173640ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Generic Windows security policy generator; requires subclassing.""" import datetime import string from absl import logging from capirca.lib import aclgenerator from capirca.lib import nacaddr CMD_PREFIX = 'netsh ipsec static add ' class Term(aclgenerator.Term): """Generate generic windows policy terms.""" _PLATFORM = 'windows' _COMMENT_FORMAT = string.Template(': $comment') # filter rules _ACTION_TABLE = {} def __init__(self, term, filter_name, filter_action, af='inet'): """Setup a new term. Args: term: A policy.Term object to represent in windows_ipsec. filter_name: The name of the filter chan to attach the term to. filter_action: The default action of the filter. af: Which address family ('inet' or 'inet6') to apply the term to. Raises: UnsupportedFilterError: Filter is not supported. """ super().__init__(term) self.term = term # term object self.filter = filter_name # actual name of filter self.default_action = filter_action self.options = [] self.af = af if af == 'inet6': self._all_ips = nacaddr.IPv6('::/0') else: self._all_ips = nacaddr.IPv4('0.0.0.0/0') self.term_name = '%s_%s' % (self.filter[:1], self.term.name) def __str__(self): # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' ret_str = [] # Don't render icmpv6 protocol terms under inet, or icmp under inet6 if ((self.af == 'inet6' and 'icmp' in self.term.protocol) or (self.af == 'inet' and 'icmpv6' in self.term.protocol)): logging.debug(self.NO_AF_LOG_PROTO.substitute(term=self.term.name, proto=self.term.protocol, af=self.af)) return '' # append comments to output ret_str.append(self._COMMENT_FORMAT.substitute(filter=self.filter, term=self.term_name, comment=self.term.comment)) # if terms does not specify action, use filter default action if not self.term.action: self.term.action[0].value = self.default_action if self.term.action[0] == 'next': return '' if len(self.term.action) > 1: raise aclgenerator.UnsupportedFilterError('\n%s %s %s %s' % ( 'Multiple actions unsupported by', self._PLATFORM, '\nError in term:', self.term.name)) # protocol if self.term.protocol: protocols = self.term.protocol else: protocols = ['any'] # addresses src_addr = self.term.source_address if not src_addr: src_addr = [self._all_ips] dst_addr = self.term.destination_address if not dst_addr: dst_addr = [self._all_ips] if (self.term.source_address_exclude or self.term.destination_address_exclude): raise aclgenerator.UnsupportedFilterError('\n%s %s %s %s' % ( 'address exclusions unsupported by', self._PLATFORM, '\nError in term:', self.term.name)) # ports = Map the ports in a straight list since multiports aren't supported (src_ports, dst_ports) = self._HandlePorts(self.term.source_port, self.term.destination_port) # The windows ipsec driver requires either 'tcp' or 'udp' to be specified # if a srcport or dstport is specified. Fail if src or dst ports are # specified and of the protocols are not exactly one or both of 'tcp' # or 'udp'. if ((not set(protocols).issubset(set(['tcp', 'udp']))) and (len(src_ports) > 1 or len(dst_ports) > 1)): raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'src or dst ports may only be specified with "tcp" and/or "udp".')) # icmp-types (icmp_types, protocols) = self._HandleIcmpTypes(self.term.icmp_type, protocols) ret_str = [] self._HandlePreRule(ret_str) self._CartesianProduct(src_addr, dst_addr, protocols, icmp_types, src_ports, dst_ports, ret_str) self._HandlePreRule(ret_str) return '\n'.join(str(v) for v in ret_str if v) def _HandleIcmpTypes(self, icmp_types, protocols): """Perform implementation-specific icmp_type and protocol transforms. Note that icmp_types or protocols are passed as parameters in case they are to be munged prior to this function call, and may not be identical to self.term.* parameters. Args: icmp_types: a list of icmp types, e.g., self.term.icmp_types protocols: a list of protocols, e.g., self.term.protocols Returns: A pair of lists of (icmp_types, protocols) """ return None, None def _HandlePorts(self, src_ports, dst_ports): """Perform implementation-specific port transforms. Note that icmp_types or protocols are passed as parameters in case they are to be munged prior to this function call, and may not be identical to self.term.* parameters. Args: src_ports: list of source port range tuples, e.g., self.term.source_port dst_ports: list of destination port range tuples Returns: A pair of lists of (icmp_types, protocols) """ return None, None def _HandlePreRule(self, ret_str): """Perform any pre-cartesian product transforms on the ret_str array. Args: ret_str: an array of strings that will eventually be joined to form the string output for the term. """ pass def _CartesianProduct(self, src_addr, dst_addr, protocol, icmp_types, src_ports, dst_ports, ret_str): """Perform any the appropriate cartesian product of the input parameters. Args: src_addr: a type(IP) list of the source addresses dst_addr: a type(IP) list of the destination addresses protocol: a string list of the protocols icmp_types: a numeric list of the icmp_types src_ports: a (start, end) list of the source ports dst_ports: a (start,end) list of the destination ports ret_str: an array of strings that will eventually be joined to form the string output for the term. """ pass def _HandlePostRule(self, ret_str): """Perform any port-cartesian product transforms on the ret_str array. Args: ret_str: an array of strings that will eventually be joined to form the string output for the term. """ pass class WindowsGenerator(aclgenerator.ACLGenerator): """Generates filters and terms from provided policy object.""" _PLATFORM = 'windows' _DEFAULT_PROTOCOL = 'all' SUFFIX = '.bat' _RENDER_PREFIX = None _DEFAULT_ACTION = 'block' _TERM = Term _GOOD_AFS = ['inet', 'inet6'] def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens |= {'option'} supported_tokens -= {'verbatim'} supported_sub_tokens.update({'action': {'accept', 'deny'}}) del supported_sub_tokens['option'] return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): """Translate a policy from objects into strings.""" self.windows_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) default_action = None good_default_actions = ['permit', 'block'] good_options = [] for header, terms in pol.filters: filter_type = None if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM)[1:] filter_name = header.FilterName(self._PLATFORM) # ensure all options after the filter name are expected for opt in filter_options: if opt not in good_default_actions + self._GOOD_AFS + good_options: raise aclgenerator.UnsupportedTargetOptionError('%s %s %s %s' % ( '\nUnsupported option found in', self._PLATFORM, 'target definition:', opt)) # Check for matching af for address_family in self._GOOD_AFS: if address_family in filter_options: # should not specify more than one AF in options if filter_type is not None: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\nMay only specify one of', self._GOOD_AFS, 'in filter options:', filter_options)) filter_type = address_family if filter_type is None: filter_type = 'inet' # does this policy override the default filter actions? for next_target in header.target: if next_target.platform == self._PLATFORM: if len(next_target.options) > 1: for arg in next_target.options: if arg in good_default_actions: default_action = arg if default_action and default_action not in good_default_actions: raise aclgenerator.UnsupportedTargetOptionError('%s %s %s %s %s' % ( '\nOnly', ', '.join(good_default_actions), 'default filter action allowed;', default_action, 'used.')) # add the terms new_terms = [] term_names = set() for term in terms: if term.name in term_names: raise aclgenerator.DuplicateTermError( 'You have a duplicate term: %s' % term.name) term_names.add(term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warning('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if 'established' in term.option or 'tcp-established' in term.option: continue new_terms.append(self._TERM(term, filter_name, default_action, filter_type)) self.windows_policies.append((header, filter_name, filter_type, default_action, new_terms)) def __str__(self): target = [] pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:]) if self._RENDER_PREFIX: target.append(self._RENDER_PREFIX) for header, _, filter_type, default_action, terms in self.windows_policies: # Add comments for this filter target.append(': %s %s Policy' % (pretty_platform, header.FilterName(self._PLATFORM))) self._HandlePolicyHeader(header, target) # reformat long text comments, if needed comments = aclgenerator.WrapWords(header.comment, 70) if comments and comments[0]: for line in comments: target.append(': %s' % line) target.append(':') # add the p4 tags target.extend(aclgenerator.AddRepositoryTags(': ')) target.append(': ' + filter_type) if default_action: raise aclgenerator.UnsupportedTargetOptionError( 'Windows generator does not support default actions') # add the terms for term in terms: term_str = str(term) if term_str: target.append(term_str) self._HandleTermFooter(header, term, target) target.append('') return '\n'.join(target) def _HandlePolicyHeader(self, header, target): pass def _HandleTermFooter(self, header, term, target): pass capirca-2.0.9/capirca/lib/windows_advfirewall.py000066400000000000000000000120031437377527500217400ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Windows advfirewall policy generator.""" import string from capirca.lib import windows class Term(windows.Term): """Generate windows advfirewall policy terms.""" _PLATFORM = 'windows_advfirewall' CMD_PREFIX = 'netsh advfirewall firewall ' # Logging: # netsh advfirewall>set allprofiles logging allowedconnections enable" # netsh advfirewall>set allprofiles logging droppedconnections enable" # 'in' or 'out' _DIR_ATOM = string.Template('dir=${dir}') # 'local' or 'remote' _ADDR_ATOM = string.Template('${dir}ip=${addr}') _PORT_ATOM = string.Template('${dir}port=${port}') # any | Integer | icmpv4 | icmpv6 | icmpv4:type,code | icmpv6:type,code # | tcp | udp _PROTO_ATOM = string.Template('protocol=${protocol}') # 'allow' or 'block' _ACTION_ATOM = string.Template('action=${action}') _RULE_FORMAT = string.Template('add rule name=${name} enable=yes ' 'interfacetype=any ${atoms}') _ACTION_TABLE = { 'accept': 'allow', 'deny': 'block', 'reject': 'block', } def _HandleIcmpTypes(self, icmp_types, protocols): # advfirewall actually puts this in the protocol spec, eg.: # icmpv4 | icmpv6 | icmpv4:type,code | icmpv6:type,code types = [''] if icmp_types: types = self.NormalizeIcmpTypes(self.term.icmp_type, protocols, self.af) # NormalizeIcmpTypes enforces this the af/ip version match: icmp_prefix = 'icmpv4' if self.af == 'inet6': icmp_prefix = 'icmpv6' if types: protocols = [] for typ in types: protocols.append('%s:%s,any' % (icmp_prefix, typ)) types = [''] # fixup for icmp v4 for i in range(len(protocols)): if protocols[i] == 'icmp': protocols[i] = 'icmpv4' return (types, protocols) def _HandlePorts(self, src_ports, dst_ports): return ([self._ComposePortString(src_ports)], [self._ComposePortString(dst_ports)]) def _CartesianProduct(self, src_addr, dst_addr, protocol, unused_icmp_types, src_port, dst_port, ret_str): # At least advfirewall supports port ranges, unlike windows ipsec, # so the src and dst port lists will always be one element long. for saddr in src_addr: for daddr in dst_addr: for proto in protocol: ret_str.append(self._ComposeRule( saddr, daddr, proto, src_port[0], dst_port[0], self.term.action[0])) def _ComposeRule(self, srcaddr, dstaddr, proto, srcport, dstport, action): """Convert the given parameters into a netsh add rule string.""" atoms = [] src_label = 'local' dst_label = 'remote' # We assume a default direction of OUT, but if it's IN, the Windows # advfirewall changes around the remote and local labels. if 'in' == self.filter.lower(): src_label = 'remote' dst_label = 'local' atoms.append(self._DIR_ATOM.substitute(dir=self.filter)) if srcaddr.prefixlen == 0: atoms.append(self._ADDR_ATOM.substitute(dir=src_label, addr='any')) else: atoms.append(self._ADDR_ATOM.substitute(dir=src_label, addr=str(srcaddr))) if dstaddr.prefixlen == 0: atoms.append(self._ADDR_ATOM.substitute(dir=dst_label, addr='any')) else: atoms.append(self._ADDR_ATOM.substitute(dir=dst_label, addr=str(dstaddr))) if srcport: atoms.append(self._PORT_ATOM.substitute(dir=src_label, port=srcport)) if dstport: atoms.append(self._PORT_ATOM.substitute(dir=dst_label, port=dstport)) if proto: if proto == 'vrrp': proto = '112' elif proto == 'ah': proto = '51' elif proto == 'hopopt': proto = '0' atoms.append(self._PROTO_ATOM.substitute(protocol=proto)) atoms.append(self._ACTION_ATOM.substitute( action=self._ACTION_TABLE[action])) return (self.CMD_PREFIX + self._RULE_FORMAT.substitute( name=self.term_name, atoms=' '.join(atoms))) def _ComposePortString(self, ports): """Convert the list of ports tuples into a multiport range string.""" if not ports: return '' multiports = [] for (start, end) in ports: if start == end: multiports.append(str(start)) else: multiports.append('-'.join([str(start), str(end)])) return ','.join(multiports) class WindowsAdvFirewall(windows.WindowsGenerator): """Generates filters and terms from provided policy object.""" _PLATFORM = 'windows_advfirewall' _TERM = Term capirca-2.0.9/capirca/lib/windows_ipsec.py000066400000000000000000000173751437377527500205640ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Windows IP security policy generator.""" # pylint: disable=g-importing-member from string import Template from absl import logging from capirca.lib import aclgenerator from capirca.lib import windows class Term(windows.Term): """Generate windows IP security policy terms.""" _PLATFORM = 'windows_ipsec' CMD_PREFIX = 'netsh ipsec static add ' # Windows IPSec Policy (which actually isn't limit to IPSec proper, as you # might expect) is structured such that you create: # * One policy (more can be defined, but only one active) # * One or more filter lists, which is similar to an IP chain (but does not # support action:: next) # * A filter, which is the matcher portion of a term # * A filteraction, which is the action to perform when a a filter is # matched. Not that the filter action does not support logging, logging # is associated with a auditpol filterlist. # * a rule, which links a policy, filter list, and filter action. # Logging: # auditpol /set /subcategory:"Filtering Platform Packet Drop" /success:enable # /failure:enable # auditpol /set /subcategory:"IPsec Driver" /success:enable /failure:enable # auditpol /set /subcategory:"IPsec Main Mode" /success:enable /failure:enable # auditpol /set /subcategory:"IPsec Quick Mode" /success:enable # /failure:enable # auditpol /set /subcategory:"IPsec Extended Mode" /success:enable # /failure:enable _MASK_ATOM = Template('${dir}mask=${mask}') _ADDR_ATOM = Template('${dir}addr=${addr} ${mask}') _PROTO_ATOM = Template('protocol=${protocol}') _PORT_ATOM = Template('${dir}port=${port}') # ipsec is not stateful, we need mirrored=yes to get responses: _FILTER_FORMAT = Template('filter filterlist=${name} mirrored=yes ' '${atoms}') _FILTERACTION_FORMAT = Template('filteraction name=${name} ' 'action=${action}') _FILTERLIST_FORMAT = Template('filterlist name=$name ') _RULE_FORMAT = Template('rule name=$name policy=$policy ' 'filterlist=$filterlist ' 'filteraction=$filteraction ') _COMMENT_FORMAT = Template(': $comment') _LIST_SUFFIX = '-list' _ACTION_SUFFIX = '-action' # filter rules _ACTION_TABLE = { 'accept': 'permit', 'deny': 'block', 'reject': 'block', } def _HandleIcmpTypes(self, icmp_types, protocols): if icmp_types: raise aclgenerator.UnsupportedFilterError('\n%s %s %s %s' % ( 'icmp types unsupported by', self._PLATFORM, '\nError in term:', self.term.name)) return ([''], protocols) def _HandlePorts(self, src_ports, dst_ports): # ports = Map the ports in a straight list since multiports aren't supported return (self._CollapsePortTuples(src_ports), self._CollapsePortTuples(dst_ports)) def _HandlePreRule(self, ret_str): ret_str.append(self._ComposeFilterList()) ret_str.append(self._ComposeFilterAction( self._ACTION_TABLE[self.term.action[0]])) def _CartesianProduct(self, src_addr, dst_addr, protocol, unused_icmp_types, src_port, dst_port, ret_str): # yup, the full cartesian product... this makes me cry on the inside. for saddr in src_addr: if saddr.version != 4: logging.warning('WARNING: term contains a non IPv4 address %s, ' 'ignoring element of term %s.', saddr, self.term_name) continue for daddr in dst_addr: if daddr.version != 4: logging.warning('WARNING: term contains a non IPv4 address %s, ' 'ignoring element of term %s.', daddr, self.term_name) continue for proto in protocol: for sport in src_port: for dport in dst_port: ret_str.append(self._ComposeFilter(saddr.network_address, daddr.network_address, proto, saddr.prefixlen, daddr.prefixlen, sport, dport)) def _CollapsePortTuples(self, port_tuples): ports = [''] for tpl in port_tuples: if tpl: (port_start, port_end) = tpl ports = list(range(port_start, port_end+1)) return ports def _ComposeFilterList(self): return (self.CMD_PREFIX + self._FILTERLIST_FORMAT.substitute( name=self.term_name + self._LIST_SUFFIX)) def _ComposeFilterAction(self, action): return (self.CMD_PREFIX + self._FILTERACTION_FORMAT.substitute( name=self.term_name + self._ACTION_SUFFIX, action=action)) def _ComposeFilter(self, srcaddr, dstaddr, proto, srcmask, dstmask, srcport, dstport): """Convert the given parameters to a netsh filter rule string.""" atoms = [] if srcmask == 0: atoms.append(self._ADDR_ATOM.substitute(dir='src', addr='any', mask='')) else: mask_atom = self._MASK_ATOM.substitute(dir='src', mask=srcmask) atoms.append(self._ADDR_ATOM.substitute( dir='src', addr=srcaddr, mask=mask_atom)) if dstmask == 0: atoms.append(self._ADDR_ATOM.substitute(dir='dst', addr='any', mask='')) else: mask_atom = self._MASK_ATOM.substitute(dir='dst', mask=dstmask) atoms.append(self._ADDR_ATOM.substitute( dir='dst', addr=dstaddr, mask=mask_atom)) if srcport: atoms.append(self._PORT_ATOM.substitute(dir='src', port=srcport)) if dstport: atoms.append(self._PORT_ATOM.substitute(dir='dst', port=dstport)) if proto: atoms.append(self._PROTO_ATOM.substitute(protocol=proto)) return (self.CMD_PREFIX + self._FILTER_FORMAT.substitute( name=self.term_name + self._LIST_SUFFIX, atoms=' '.join(atoms))) def ComposeRule(self, policy): return (self.CMD_PREFIX + self._RULE_FORMAT.substitute( name=self.term_name + '-rule', policy=policy, filterlist=self.term_name + self._LIST_SUFFIX, filteraction=self.term_name + self._ACTION_SUFFIX)) class WindowsIPSec(windows.WindowsGenerator): """Generates filters and terms from provided policy object.""" _PLATFORM = 'windows_ipsec' _TERM = Term _POLICY_FORMAT = Template('policy name=$name assign=yes') _POLICY_SUFFIX = '-policy' _GOOD_AFS = ['inet'] def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super()._BuildTokens() supported_tokens -= {'icmp_type'} del supported_sub_tokens['icmp_type'] return supported_tokens, supported_sub_tokens def _HandlePolicyHeader(self, header, target): policy_name = header.FilterName(self._PLATFORM) + self._POLICY_SUFFIX target.append(Term.CMD_PREFIX + self._POLICY_FORMAT.substitute(name=policy_name) + '\n') def _HandleTermFooter(self, header, term, target): target.append( term.ComposeRule(header.FilterName(self._PLATFORM)) + '\n') capirca-2.0.9/capirca/utils/000077500000000000000000000000001437377527500157125ustar00rootroot00000000000000capirca-2.0.9/capirca/utils/__init__.py000066400000000000000000000000001437377527500200110ustar00rootroot00000000000000capirca-2.0.9/capirca/utils/config.py000066400000000000000000000034631437377527500175370ustar00rootroot00000000000000"""A module to handle merging file configurations with CLI configs for Capirca.""" import yaml defaults = { 'base_directory': './policies', 'definitions_directory': './def', 'policy_file': None, 'output_directory': './', 'optimize': False, 'recursive': True, 'debug': False, 'verbose': False, 'ignore_directories': ['DEPRECATED', 'def'], 'max_renderers': 10, 'shade_check': False, 'exp_info': 2 } def yaml_loader(filename): with open(filename, 'r') as f: try: data = yaml.safe_load(f) except AttributeError: data = yaml.safe_load(f) return data def flags_to_dict(absl_flags): base = { 'base_directory': absl_flags.base_directory, 'definitions_directory': absl_flags.definitions_directory, 'policy_file': absl_flags.policy_file, 'output_directory': absl_flags.output_directory, 'optimize': absl_flags.optimize, 'recursive': absl_flags.recursive, 'debug': absl_flags.debug, 'verbose': absl_flags.verbose, 'ignore_directories': absl_flags.ignore_directories, 'max_renderers': absl_flags.max_renderers, 'shade_check': absl_flags.shade_check, 'exp_info': absl_flags.exp_info, } return { flag: base[flag] for flag in filter(lambda f: base[f] is not None, base) } def merge_files(*files): result = {} for item in files: data = yaml_loader(item) result.update(data) return { flag: result[flag] for flag in filter(lambda f: result[f] is not None, result) } def generate_configs(absl_flags): cli_configs = flags_to_dict(absl_flags) if absl_flags.config_file: file_configs = merge_files(*absl_flags.config_file) else: file_configs = {} result = defaults.copy() result.update(cli_configs) result.update(file_configs) return result capirca-2.0.9/capirca/utils/iputils.py000066400000000000000000000061441437377527500177620ustar00rootroot00000000000000"""A module of utilities to work with IP addresses in a faster way.""" import ipaddress def exclude_address( base_net: ipaddress._BaseNetwork, # pylint disable=protected-access exclude_net: ipaddress._BaseNetwork # pylint disable=protected-access ): """ Function to exclude a subnetwork from another, returning a generator that yields all values that correspond to the base network without the exclude network. This is functionally equivalent to the _BaseNetwork "address_exclude" from the `ipaddress` standard library, but is a faster implementation since the standard library function is a O(n) operation on the length of the netmask of the excluding network, whereas this function is O(1) for all cases. Args: base_net: an object of type _BaseNetwork, the network that contains the exclude network exclude_net: an object of type _BaseNetwork, the network that is being removed from the base_net Raises: ValueError if exclude_net is not completely contained in base_net Yields: A sequence of IP networks that do not encompass the exclude_net """ if not isinstance(base_net, ipaddress._BaseNetwork): # pylint disable=protected-access raise TypeError('%s is not a network object' % base_net) if not isinstance(exclude_net, ipaddress._BaseNetwork): # pylint disable=protected-access raise TypeError('%s is not a network object' % exclude_net) if not base_net._version == exclude_net._version: # pylint disable=protected-access # pytype: disable=attribute-error raise TypeError( '%s and %s are not of the same version' % (base_net, exclude_net) ) if not exclude_net.subnet_of(base_net): # pytype: disable=attribute-error raise ValueError() if exclude_net == base_net: return include_range = base_net.network_address._ip, base_net.broadcast_address._ip # pylint disable=protected-access # pytype: disable=attribute-error exclude_range = exclude_net.network_address._ip, exclude_net.broadcast_address._ip # pylint disable=protected-access # pytype: disable=attribute-error address_class = base_net.network_address.__class__ # pylint disable=protected-access if include_range[0] == exclude_range[0]: result_start = address_class(exclude_range[1] + 1) result_end = address_class(include_range[1]) for address in ipaddress.summarize_address_range(result_start, result_end): yield address elif include_range[1] == exclude_range[1]: result_start = address_class(include_range[0]) result_end = address_class(exclude_range[0] - 1) for address in ipaddress.summarize_address_range(result_start, result_end): yield address else: first_section_start = address_class(include_range[0]) first_section_end = address_class(exclude_range[0] - 1) second_section_start = address_class(exclude_range[1] + 1) second_section_end = address_class(include_range[1]) for address in ipaddress.summarize_address_range(first_section_start, first_section_end): yield address for address in ipaddress.summarize_address_range(second_section_start, second_section_end): yield address capirca-2.0.9/def/000077500000000000000000000000001437377527500137065ustar00rootroot00000000000000capirca-2.0.9/def/LARGE_NETWORK.net000066400000000000000000003414661437377527500164770ustar00rootroot00000000000000# # Sample naming defintions for large network objects # # 2050 IPv4 IPs MANY_IPV4 = 0.0.0.0/32 1.1.1.1/32 2.2.2.2/32 3.3.3.3/32 4.4.4.4/32 5.5.5.5/32 6.6.6.6/32 7.7.7.7/32 8.8.8.8/32 9.9.9.9/32 10.10.10.10/32 11.11.11.11/32 12.12.12.12/32 13.13.13.13/32 14.14.14.14/32 15.15.15.15/32 16.16.16.16/32 17.17.17.17/32 18.18.18.18/32 19.19.19.19/32 20.20.20.20/32 21.21.21.21/32 22.22.22.22/32 23.23.23.23/32 24.24.24.24/32 25.25.25.25/32 26.26.26.26/32 27.27.27.27/32 28.28.28.28/32 29.29.29.29/32 30.30.30.30/32 31.31.31.31/32 32.32.32.32/32 33.33.33.33/32 34.34.34.34/32 35.35.35.35/32 36.36.36.36/32 37.37.37.37/32 38.38.38.38/32 39.39.39.39/32 40.40.40.40/32 41.41.41.41/32 42.42.42.42/32 43.43.43.43/32 44.44.44.44/32 45.45.45.45/32 46.46.46.46/32 47.47.47.47/32 48.48.48.48/32 49.49.49.49/32 50.50.50.50/32 51.51.51.51/32 52.52.52.52/32 53.53.53.53/32 54.54.54.54/32 55.55.55.55/32 56.56.56.56/32 57.57.57.57/32 58.58.58.58/32 59.59.59.59/32 60.60.60.60/32 61.61.61.61/32 62.62.62.62/32 63.63.63.63/32 64.64.64.64/32 65.65.65.65/32 66.66.66.66/32 67.67.67.67/32 68.68.68.68/32 69.69.69.69/32 70.70.70.70/32 71.71.71.71/32 72.72.72.72/32 73.73.73.73/32 74.74.74.74/32 75.75.75.75/32 76.76.76.76/32 77.77.77.77/32 78.78.78.78/32 79.79.79.79/32 80.80.80.80/32 81.81.81.81/32 82.82.82.82/32 83.83.83.83/32 84.84.84.84/32 85.85.85.85/32 86.86.86.86/32 87.87.87.87/32 88.88.88.88/32 89.89.89.89/32 90.90.90.90/32 91.91.91.91/32 92.92.92.92/32 93.93.93.93/32 94.94.94.94/32 95.95.95.95/32 96.96.96.96/32 97.97.97.97/32 98.98.98.98/32 99.99.99.99/32 100.100.100.100/32 101.101.101.101/32 102.102.102.102/32 103.103.103.103/32 104.104.104.104/32 105.105.105.105/32 106.106.106.106/32 107.107.107.107/32 108.108.108.108/32 109.109.109.109/32 110.110.110.110/32 111.111.111.111/32 112.112.112.112/32 113.113.113.113/32 114.114.114.114/32 115.115.115.115/32 116.116.116.116/32 117.117.117.117/32 118.118.118.118/32 119.119.119.119/32 120.120.120.120/32 121.121.121.121/32 122.122.122.122/32 123.123.123.123/32 124.124.124.124/32 125.125.125.125/32 126.126.126.126/32 127.127.127.127/32 128.128.128.128/32 129.129.129.129/32 130.130.130.130/32 131.131.131.131/32 132.132.132.132/32 133.133.133.133/32 134.134.134.134/32 135.135.135.135/32 136.136.136.136/32 137.137.137.137/32 138.138.138.138/32 139.139.139.139/32 140.140.140.140/32 141.141.141.141/32 142.142.142.142/32 143.143.143.143/32 144.144.144.144/32 145.145.145.145/32 146.146.146.146/32 147.147.147.147/32 148.148.148.148/32 149.149.149.149/32 150.150.150.150/32 151.151.151.151/32 152.152.152.152/32 153.153.153.153/32 154.154.154.154/32 155.155.155.155/32 156.156.156.156/32 157.157.157.157/32 158.158.158.158/32 159.159.159.159/32 160.160.160.160/32 161.161.161.161/32 162.162.162.162/32 163.163.163.163/32 164.164.164.164/32 165.165.165.165/32 166.166.166.166/32 167.167.167.167/32 168.168.168.168/32 169.169.169.169/32 170.170.170.170/32 171.171.171.171/32 172.172.172.172/32 173.173.173.173/32 174.174.174.174/32 175.175.175.175/32 176.176.176.176/32 177.177.177.177/32 178.178.178.178/32 179.179.179.179/32 180.180.180.180/32 181.181.181.181/32 182.182.182.182/32 183.183.183.183/32 184.184.184.184/32 185.185.185.185/32 186.186.186.186/32 187.187.187.187/32 188.188.188.188/32 189.189.189.189/32 190.190.190.190/32 191.191.191.191/32 192.192.192.192/32 193.193.193.193/32 194.194.194.194/32 195.195.195.195/32 196.196.196.196/32 197.197.197.197/32 198.198.198.198/32 199.199.199.199/32 200.200.200.200/32 201.201.201.201/32 202.202.202.202/32 203.203.203.203/32 204.204.204.204/32 205.205.205.205/32 206.206.206.206/32 207.207.207.207/32 208.208.208.208/32 209.209.209.209/32 210.210.210.210/32 211.211.211.211/32 212.212.212.212/32 213.213.213.213/32 214.214.214.214/32 215.215.215.215/32 216.216.216.216/32 217.217.217.217/32 218.218.218.218/32 219.219.219.219/32 220.220.220.220/32 221.221.221.221/32 222.222.222.222/32 223.223.223.223/32 224.224.224.224/32 225.225.225.225/32 226.226.226.226/32 227.227.227.227/32 228.228.228.228/32 229.229.229.229/32 230.230.230.230/32 231.231.231.231/32 232.232.232.232/32 233.233.233.233/32 234.234.234.234/32 235.235.235.235/32 236.236.236.236/32 237.237.237.237/32 238.238.238.238/32 239.239.239.239/32 240.240.240.240/32 241.241.241.241/32 242.242.242.242/32 243.243.243.243/32 244.244.244.244/32 245.245.245.245/32 246.246.246.246/32 247.247.247.247/32 248.248.248.248/32 249.249.249.249/32 250.250.250.250/32 251.251.251.251/32 252.252.252.252/32 253.253.253.253/32 254.254.254.254/32 255.255.255.255/32 1.1.1.0/32 2.2.2.1/32 3.3.3.2/32 4.4.4.3/32 5.5.5.4/32 6.6.6.5/32 7.7.7.6/32 8.8.8.7/32 9.9.9.8/32 10.10.10.9/32 11.11.11.10/32 12.12.12.11/32 13.13.13.12/32 14.14.14.13/32 15.15.15.14/32 16.16.16.15/32 17.17.17.16/32 18.18.18.17/32 19.19.19.18/32 20.20.20.19/32 21.21.21.20/32 22.22.22.21/32 23.23.23.22/32 24.24.24.23/32 25.25.25.24/32 26.26.26.25/32 27.27.27.26/32 28.28.28.27/32 29.29.29.28/32 30.30.30.29/32 31.31.31.30/32 32.32.32.31/32 33.33.33.32/32 34.34.34.33/32 35.35.35.34/32 36.36.36.35/32 37.37.37.36/32 38.38.38.37/32 39.39.39.38/32 40.40.40.39/32 41.41.41.40/32 42.42.42.41/32 43.43.43.42/32 44.44.44.43/32 45.45.45.44/32 46.46.46.45/32 47.47.47.46/32 48.48.48.47/32 49.49.49.48/32 50.50.50.49/32 51.51.51.50/32 52.52.52.51/32 53.53.53.52/32 54.54.54.53/32 55.55.55.54/32 56.56.56.55/32 57.57.57.56/32 58.58.58.57/32 59.59.59.58/32 60.60.60.59/32 61.61.61.60/32 62.62.62.61/32 63.63.63.62/32 64.64.64.63/32 65.65.65.64/32 66.66.66.65/32 67.67.67.66/32 68.68.68.67/32 69.69.69.68/32 70.70.70.69/32 71.71.71.70/32 72.72.72.71/32 73.73.73.72/32 74.74.74.73/32 75.75.75.74/32 76.76.76.75/32 77.77.77.76/32 78.78.78.77/32 79.79.79.78/32 80.80.80.79/32 81.81.81.80/32 82.82.82.81/32 83.83.83.82/32 84.84.84.83/32 85.85.85.84/32 86.86.86.85/32 87.87.87.86/32 88.88.88.87/32 89.89.89.88/32 90.90.90.89/32 91.91.91.90/32 92.92.92.91/32 93.93.93.92/32 94.94.94.93/32 95.95.95.94/32 96.96.96.95/32 97.97.97.96/32 98.98.98.97/32 99.99.99.98/32 100.100.100.99/32 101.101.101.100/32 102.102.102.101/32 103.103.103.102/32 104.104.104.103/32 105.105.105.104/32 106.106.106.105/32 107.107.107.106/32 108.108.108.107/32 109.109.109.108/32 110.110.110.109/32 111.111.111.110/32 112.112.112.111/32 113.113.113.112/32 114.114.114.113/32 115.115.115.114/32 116.116.116.115/32 117.117.117.116/32 118.118.118.117/32 119.119.119.118/32 120.120.120.119/32 121.121.121.120/32 122.122.122.121/32 123.123.123.122/32 124.124.124.123/32 125.125.125.124/32 126.126.126.125/32 127.127.127.126/32 128.128.128.127/32 129.129.129.128/32 130.130.130.129/32 131.131.131.130/32 132.132.132.131/32 133.133.133.132/32 134.134.134.133/32 135.135.135.134/32 136.136.136.135/32 137.137.137.136/32 138.138.138.137/32 139.139.139.138/32 140.140.140.139/32 141.141.141.140/32 142.142.142.141/32 143.143.143.142/32 144.144.144.143/32 145.145.145.144/32 146.146.146.145/32 147.147.147.146/32 148.148.148.147/32 149.149.149.148/32 150.150.150.149/32 151.151.151.150/32 152.152.152.151/32 153.153.153.152/32 154.154.154.153/32 155.155.155.154/32 156.156.156.155/32 157.157.157.156/32 158.158.158.157/32 159.159.159.158/32 160.160.160.159/32 161.161.161.160/32 162.162.162.161/32 163.163.163.162/32 164.164.164.163/32 165.165.165.164/32 166.166.166.165/32 167.167.167.166/32 168.168.168.167/32 169.169.169.168/32 170.170.170.169/32 171.171.171.170/32 172.172.172.171/32 173.173.173.172/32 174.174.174.173/32 175.175.175.174/32 176.176.176.175/32 177.177.177.176/32 178.178.178.177/32 179.179.179.178/32 180.180.180.179/32 181.181.181.180/32 182.182.182.181/32 183.183.183.182/32 184.184.184.183/32 185.185.185.184/32 186.186.186.185/32 187.187.187.186/32 188.188.188.187/32 189.189.189.188/32 190.190.190.189/32 191.191.191.190/32 192.192.192.191/32 193.193.193.192/32 194.194.194.193/32 195.195.195.194/32 196.196.196.195/32 197.197.197.196/32 198.198.198.197/32 199.199.199.198/32 200.200.200.199/32 201.201.201.200/32 202.202.202.201/32 203.203.203.202/32 204.204.204.203/32 205.205.205.204/32 206.206.206.205/32 207.207.207.206/32 208.208.208.207/32 209.209.209.208/32 210.210.210.209/32 211.211.211.210/32 212.212.212.211/32 213.213.213.212/32 214.214.214.213/32 215.215.215.214/32 216.216.216.215/32 217.217.217.216/32 218.218.218.217/32 219.219.219.218/32 220.220.220.219/32 221.221.221.220/32 222.222.222.221/32 223.223.223.222/32 224.224.224.223/32 225.225.225.224/32 226.226.226.225/32 227.227.227.226/32 228.228.228.227/32 229.229.229.228/32 230.230.230.229/32 231.231.231.230/32 232.232.232.231/32 233.233.233.232/32 234.234.234.233/32 235.235.235.234/32 236.236.236.235/32 237.237.237.236/32 238.238.238.237/32 239.239.239.238/32 240.240.240.239/32 241.241.241.240/32 242.242.242.241/32 243.243.243.242/32 244.244.244.243/32 245.245.245.244/32 246.246.246.245/32 247.247.247.246/32 248.248.248.247/32 249.249.249.248/32 250.250.250.249/32 251.251.251.250/32 252.252.252.251/32 253.253.253.252/32 254.254.254.253/32 255.255.255.254/32 1.1.0.255/32 2.2.2.0/32 3.3.3.1/32 4.4.4.2/32 5.5.5.3/32 6.6.6.4/32 7.7.7.5/32 8.8.8.6/32 9.9.9.7/32 10.10.10.8/32 11.11.11.9/32 12.12.12.10/32 13.13.13.11/32 14.14.14.12/32 15.15.15.13/32 16.16.16.14/32 17.17.17.15/32 18.18.18.16/32 19.19.19.17/32 20.20.20.18/32 21.21.21.19/32 22.22.22.20/32 23.23.23.21/32 24.24.24.22/32 25.25.25.23/32 26.26.26.24/32 27.27.27.25/32 28.28.28.26/32 29.29.29.27/32 30.30.30.28/32 31.31.31.29/32 32.32.32.30/32 33.33.33.31/32 34.34.34.32/32 35.35.35.33/32 36.36.36.34/32 37.37.37.35/32 38.38.38.36/32 39.39.39.37/32 40.40.40.38/32 41.41.41.39/32 42.42.42.40/32 43.43.43.41/32 44.44.44.42/32 45.45.45.43/32 46.46.46.44/32 47.47.47.45/32 48.48.48.46/32 49.49.49.47/32 50.50.50.48/32 51.51.51.49/32 52.52.52.50/32 53.53.53.51/32 54.54.54.52/32 55.55.55.53/32 56.56.56.54/32 57.57.57.55/32 58.58.58.56/32 59.59.59.57/32 60.60.60.58/32 61.61.61.59/32 62.62.62.60/32 63.63.63.61/32 64.64.64.62/32 65.65.65.63/32 66.66.66.64/32 67.67.67.65/32 68.68.68.66/32 69.69.69.67/32 70.70.70.68/32 71.71.71.69/32 72.72.72.70/32 73.73.73.71/32 74.74.74.72/32 75.75.75.73/32 76.76.76.74/32 77.77.77.75/32 78.78.78.76/32 79.79.79.77/32 80.80.80.78/32 81.81.81.79/32 82.82.82.80/32 83.83.83.81/32 84.84.84.82/32 85.85.85.83/32 86.86.86.84/32 87.87.87.85/32 88.88.88.86/32 89.89.89.87/32 90.90.90.88/32 91.91.91.89/32 92.92.92.90/32 93.93.93.91/32 94.94.94.92/32 95.95.95.93/32 96.96.96.94/32 97.97.97.95/32 98.98.98.96/32 99.99.99.97/32 100.100.100.98/32 101.101.101.99/32 102.102.102.100/32 103.103.103.101/32 104.104.104.102/32 105.105.105.103/32 106.106.106.104/32 107.107.107.105/32 108.108.108.106/32 109.109.109.107/32 110.110.110.108/32 111.111.111.109/32 112.112.112.110/32 113.113.113.111/32 114.114.114.112/32 115.115.115.113/32 116.116.116.114/32 117.117.117.115/32 118.118.118.116/32 119.119.119.117/32 120.120.120.118/32 121.121.121.119/32 122.122.122.120/32 123.123.123.121/32 124.124.124.122/32 125.125.125.123/32 126.126.126.124/32 127.127.127.125/32 128.128.128.126/32 129.129.129.127/32 130.130.130.128/32 131.131.131.129/32 132.132.132.130/32 133.133.133.131/32 134.134.134.132/32 135.135.135.133/32 136.136.136.134/32 137.137.137.135/32 138.138.138.136/32 139.139.139.137/32 140.140.140.138/32 141.141.141.139/32 142.142.142.140/32 143.143.143.141/32 144.144.144.142/32 145.145.145.143/32 146.146.146.144/32 147.147.147.145/32 148.148.148.146/32 149.149.149.147/32 150.150.150.148/32 151.151.151.149/32 152.152.152.150/32 153.153.153.151/32 154.154.154.152/32 155.155.155.153/32 156.156.156.154/32 157.157.157.155/32 158.158.158.156/32 159.159.159.157/32 160.160.160.158/32 161.161.161.159/32 162.162.162.160/32 163.163.163.161/32 164.164.164.162/32 165.165.165.163/32 166.166.166.164/32 167.167.167.165/32 168.168.168.166/32 169.169.169.167/32 170.170.170.168/32 171.171.171.169/32 172.172.172.170/32 173.173.173.171/32 174.174.174.172/32 175.175.175.173/32 176.176.176.174/32 177.177.177.175/32 178.178.178.176/32 179.179.179.177/32 180.180.180.178/32 181.181.181.179/32 182.182.182.180/32 183.183.183.181/32 184.184.184.182/32 185.185.185.183/32 186.186.186.184/32 187.187.187.185/32 188.188.188.186/32 189.189.189.187/32 190.190.190.188/32 191.191.191.189/32 192.192.192.190/32 193.193.193.191/32 194.194.194.192/32 195.195.195.193/32 196.196.196.194/32 197.197.197.195/32 198.198.198.196/32 199.199.199.197/32 200.200.200.198/32 201.201.201.199/32 202.202.202.200/32 203.203.203.201/32 204.204.204.202/32 205.205.205.203/32 206.206.206.204/32 207.207.207.205/32 208.208.208.206/32 209.209.209.207/32 210.210.210.208/32 211.211.211.209/32 212.212.212.210/32 213.213.213.211/32 214.214.214.212/32 215.215.215.213/32 216.216.216.214/32 217.217.217.215/32 218.218.218.216/32 219.219.219.217/32 220.220.220.218/32 221.221.221.219/32 222.222.222.220/32 223.223.223.221/32 224.224.224.222/32 225.225.225.223/32 226.226.226.224/32 227.227.227.225/32 228.228.228.226/32 229.229.229.227/32 230.230.230.228/32 231.231.231.229/32 232.232.232.230/32 233.233.233.231/32 234.234.234.232/32 235.235.235.233/32 236.236.236.234/32 237.237.237.235/32 238.238.238.236/32 239.239.239.237/32 240.240.240.238/32 241.241.241.239/32 242.242.242.240/32 243.243.243.241/32 244.244.244.242/32 245.245.245.243/32 246.246.246.244/32 247.247.247.245/32 248.248.248.246/32 249.249.249.247/32 250.250.250.248/32 251.251.251.249/32 252.252.252.250/32 253.253.253.251/32 254.254.254.252/32 255.255.255.253/32 1.1.0.254/32 2.2.1.255/32 3.3.3.0/32 4.4.4.1/32 5.5.5.2/32 6.6.6.3/32 7.7.7.4/32 8.8.8.5/32 9.9.9.6/32 10.10.10.7/32 11.11.11.8/32 12.12.12.9/32 13.13.13.10/32 14.14.14.11/32 15.15.15.12/32 16.16.16.13/32 17.17.17.14/32 18.18.18.15/32 19.19.19.16/32 20.20.20.17/32 21.21.21.18/32 22.22.22.19/32 23.23.23.20/32 24.24.24.21/32 25.25.25.22/32 26.26.26.23/32 27.27.27.24/32 28.28.28.25/32 29.29.29.26/32 30.30.30.27/32 31.31.31.28/32 32.32.32.29/32 33.33.33.30/32 34.34.34.31/32 35.35.35.32/32 36.36.36.33/32 37.37.37.34/32 38.38.38.35/32 39.39.39.36/32 40.40.40.37/32 41.41.41.38/32 42.42.42.39/32 43.43.43.40/32 44.44.44.41/32 45.45.45.42/32 46.46.46.43/32 47.47.47.44/32 48.48.48.45/32 49.49.49.46/32 50.50.50.47/32 51.51.51.48/32 52.52.52.49/32 53.53.53.50/32 54.54.54.51/32 55.55.55.52/32 56.56.56.53/32 57.57.57.54/32 58.58.58.55/32 59.59.59.56/32 60.60.60.57/32 61.61.61.58/32 62.62.62.59/32 63.63.63.60/32 64.64.64.61/32 65.65.65.62/32 66.66.66.63/32 67.67.67.64/32 68.68.68.65/32 69.69.69.66/32 70.70.70.67/32 71.71.71.68/32 72.72.72.69/32 73.73.73.70/32 74.74.74.71/32 75.75.75.72/32 76.76.76.73/32 77.77.77.74/32 78.78.78.75/32 79.79.79.76/32 80.80.80.77/32 81.81.81.78/32 82.82.82.79/32 83.83.83.80/32 84.84.84.81/32 85.85.85.82/32 86.86.86.83/32 87.87.87.84/32 88.88.88.85/32 89.89.89.86/32 90.90.90.87/32 91.91.91.88/32 92.92.92.89/32 93.93.93.90/32 94.94.94.91/32 95.95.95.92/32 96.96.96.93/32 97.97.97.94/32 98.98.98.95/32 99.99.99.96/32 100.100.100.97/32 101.101.101.98/32 102.102.102.99/32 103.103.103.100/32 104.104.104.101/32 105.105.105.102/32 106.106.106.103/32 107.107.107.104/32 108.108.108.105/32 109.109.109.106/32 110.110.110.107/32 111.111.111.108/32 112.112.112.109/32 113.113.113.110/32 114.114.114.111/32 115.115.115.112/32 116.116.116.113/32 117.117.117.114/32 118.118.118.115/32 119.119.119.116/32 120.120.120.117/32 121.121.121.118/32 122.122.122.119/32 123.123.123.120/32 124.124.124.121/32 125.125.125.122/32 126.126.126.123/32 127.127.127.124/32 128.128.128.125/32 129.129.129.126/32 130.130.130.127/32 131.131.131.128/32 132.132.132.129/32 133.133.133.130/32 134.134.134.131/32 135.135.135.132/32 136.136.136.133/32 137.137.137.134/32 138.138.138.135/32 139.139.139.136/32 140.140.140.137/32 141.141.141.138/32 142.142.142.139/32 143.143.143.140/32 144.144.144.141/32 145.145.145.142/32 146.146.146.143/32 147.147.147.144/32 148.148.148.145/32 149.149.149.146/32 150.150.150.147/32 151.151.151.148/32 152.152.152.149/32 153.153.153.150/32 154.154.154.151/32 155.155.155.152/32 156.156.156.153/32 157.157.157.154/32 158.158.158.155/32 159.159.159.156/32 160.160.160.157/32 161.161.161.158/32 162.162.162.159/32 163.163.163.160/32 164.164.164.161/32 165.165.165.162/32 166.166.166.163/32 167.167.167.164/32 168.168.168.165/32 169.169.169.166/32 170.170.170.167/32 171.171.171.168/32 172.172.172.169/32 173.173.173.170/32 174.174.174.171/32 175.175.175.172/32 176.176.176.173/32 177.177.177.174/32 178.178.178.175/32 179.179.179.176/32 180.180.180.177/32 181.181.181.178/32 182.182.182.179/32 183.183.183.180/32 184.184.184.181/32 185.185.185.182/32 186.186.186.183/32 187.187.187.184/32 188.188.188.185/32 189.189.189.186/32 190.190.190.187/32 191.191.191.188/32 192.192.192.189/32 193.193.193.190/32 194.194.194.191/32 195.195.195.192/32 196.196.196.193/32 197.197.197.194/32 198.198.198.195/32 199.199.199.196/32 200.200.200.197/32 201.201.201.198/32 202.202.202.199/32 203.203.203.200/32 204.204.204.201/32 205.205.205.202/32 206.206.206.203/32 207.207.207.204/32 208.208.208.205/32 209.209.209.206/32 210.210.210.207/32 211.211.211.208/32 212.212.212.209/32 213.213.213.210/32 214.214.214.211/32 215.215.215.212/32 216.216.216.213/32 217.217.217.214/32 218.218.218.215/32 219.219.219.216/32 220.220.220.217/32 221.221.221.218/32 222.222.222.219/32 223.223.223.220/32 224.224.224.221/32 225.225.225.222/32 226.226.226.223/32 227.227.227.224/32 228.228.228.225/32 229.229.229.226/32 230.230.230.227/32 231.231.231.228/32 232.232.232.229/32 233.233.233.230/32 234.234.234.231/32 235.235.235.232/32 236.236.236.233/32 237.237.237.234/32 238.238.238.235/32 239.239.239.236/32 240.240.240.237/32 241.241.241.238/32 242.242.242.239/32 243.243.243.240/32 244.244.244.241/32 245.245.245.242/32 246.246.246.243/32 247.247.247.244/32 248.248.248.245/32 249.249.249.246/32 250.250.250.247/32 251.251.251.248/32 252.252.252.249/32 253.253.253.250/32 254.254.254.251/32 255.255.255.252/32 1.1.0.253/32 2.2.1.254/32 3.3.2.255/32 4.4.4.0/32 5.5.5.1/32 6.6.6.2/32 7.7.7.3/32 8.8.8.4/32 9.9.9.5/32 10.10.10.6/32 11.11.11.7/32 12.12.12.8/32 13.13.13.9/32 14.14.14.10/32 15.15.15.11/32 16.16.16.12/32 17.17.17.13/32 18.18.18.14/32 19.19.19.15/32 20.20.20.16/32 21.21.21.17/32 22.22.22.18/32 23.23.23.19/32 24.24.24.20/32 25.25.25.21/32 26.26.26.22/32 27.27.27.23/32 28.28.28.24/32 29.29.29.25/32 30.30.30.26/32 31.31.31.27/32 32.32.32.28/32 33.33.33.29/32 34.34.34.30/32 35.35.35.31/32 36.36.36.32/32 37.37.37.33/32 38.38.38.34/32 39.39.39.35/32 40.40.40.36/32 41.41.41.37/32 42.42.42.38/32 43.43.43.39/32 44.44.44.40/32 45.45.45.41/32 46.46.46.42/32 47.47.47.43/32 48.48.48.44/32 49.49.49.45/32 50.50.50.46/32 51.51.51.47/32 52.52.52.48/32 53.53.53.49/32 54.54.54.50/32 55.55.55.51/32 56.56.56.52/32 57.57.57.53/32 58.58.58.54/32 59.59.59.55/32 60.60.60.56/32 61.61.61.57/32 62.62.62.58/32 63.63.63.59/32 64.64.64.60/32 65.65.65.61/32 66.66.66.62/32 67.67.67.63/32 68.68.68.64/32 69.69.69.65/32 70.70.70.66/32 71.71.71.67/32 72.72.72.68/32 73.73.73.69/32 74.74.74.70/32 75.75.75.71/32 76.76.76.72/32 77.77.77.73/32 78.78.78.74/32 79.79.79.75/32 80.80.80.76/32 81.81.81.77/32 82.82.82.78/32 83.83.83.79/32 84.84.84.80/32 85.85.85.81/32 86.86.86.82/32 87.87.87.83/32 88.88.88.84/32 89.89.89.85/32 90.90.90.86/32 91.91.91.87/32 92.92.92.88/32 93.93.93.89/32 94.94.94.90/32 95.95.95.91/32 96.96.96.92/32 97.97.97.93/32 98.98.98.94/32 99.99.99.95/32 100.100.100.96/32 101.101.101.97/32 102.102.102.98/32 103.103.103.99/32 104.104.104.100/32 105.105.105.101/32 106.106.106.102/32 107.107.107.103/32 108.108.108.104/32 109.109.109.105/32 110.110.110.106/32 111.111.111.107/32 112.112.112.108/32 113.113.113.109/32 114.114.114.110/32 115.115.115.111/32 116.116.116.112/32 117.117.117.113/32 118.118.118.114/32 119.119.119.115/32 120.120.120.116/32 121.121.121.117/32 122.122.122.118/32 123.123.123.119/32 124.124.124.120/32 125.125.125.121/32 126.126.126.122/32 127.127.127.123/32 128.128.128.124/32 129.129.129.125/32 130.130.130.126/32 131.131.131.127/32 132.132.132.128/32 133.133.133.129/32 134.134.134.130/32 135.135.135.131/32 136.136.136.132/32 137.137.137.133/32 138.138.138.134/32 139.139.139.135/32 140.140.140.136/32 141.141.141.137/32 142.142.142.138/32 143.143.143.139/32 144.144.144.140/32 145.145.145.141/32 146.146.146.142/32 147.147.147.143/32 148.148.148.144/32 149.149.149.145/32 150.150.150.146/32 151.151.151.147/32 152.152.152.148/32 153.153.153.149/32 154.154.154.150/32 155.155.155.151/32 156.156.156.152/32 157.157.157.153/32 158.158.158.154/32 159.159.159.155/32 160.160.160.156/32 161.161.161.157/32 162.162.162.158/32 163.163.163.159/32 164.164.164.160/32 165.165.165.161/32 166.166.166.162/32 167.167.167.163/32 168.168.168.164/32 169.169.169.165/32 170.170.170.166/32 171.171.171.167/32 172.172.172.168/32 173.173.173.169/32 174.174.174.170/32 175.175.175.171/32 176.176.176.172/32 177.177.177.173/32 178.178.178.174/32 179.179.179.175/32 180.180.180.176/32 181.181.181.177/32 182.182.182.178/32 183.183.183.179/32 184.184.184.180/32 185.185.185.181/32 186.186.186.182/32 187.187.187.183/32 188.188.188.184/32 189.189.189.185/32 190.190.190.186/32 191.191.191.187/32 192.192.192.188/32 193.193.193.189/32 194.194.194.190/32 195.195.195.191/32 196.196.196.192/32 197.197.197.193/32 198.198.198.194/32 199.199.199.195/32 200.200.200.196/32 201.201.201.197/32 202.202.202.198/32 203.203.203.199/32 204.204.204.200/32 205.205.205.201/32 206.206.206.202/32 207.207.207.203/32 208.208.208.204/32 209.209.209.205/32 210.210.210.206/32 211.211.211.207/32 212.212.212.208/32 213.213.213.209/32 214.214.214.210/32 215.215.215.211/32 216.216.216.212/32 217.217.217.213/32 218.218.218.214/32 219.219.219.215/32 220.220.220.216/32 221.221.221.217/32 222.222.222.218/32 223.223.223.219/32 224.224.224.220/32 225.225.225.221/32 226.226.226.222/32 227.227.227.223/32 228.228.228.224/32 229.229.229.225/32 230.230.230.226/32 231.231.231.227/32 232.232.232.228/32 233.233.233.229/32 234.234.234.230/32 235.235.235.231/32 236.236.236.232/32 237.237.237.233/32 238.238.238.234/32 239.239.239.235/32 240.240.240.236/32 241.241.241.237/32 242.242.242.238/32 243.243.243.239/32 244.244.244.240/32 245.245.245.241/32 246.246.246.242/32 247.247.247.243/32 248.248.248.244/32 249.249.249.245/32 250.250.250.246/32 251.251.251.247/32 252.252.252.248/32 253.253.253.249/32 254.254.254.250/32 255.255.255.251/32 1.1.0.252/32 2.2.1.253/32 3.3.2.254/32 4.4.3.255/32 5.5.5.0/32 6.6.6.1/32 7.7.7.2/32 8.8.8.3/32 9.9.9.4/32 10.10.10.5/32 11.11.11.6/32 12.12.12.7/32 13.13.13.8/32 14.14.14.9/32 15.15.15.10/32 16.16.16.11/32 17.17.17.12/32 18.18.18.13/32 19.19.19.14/32 20.20.20.15/32 21.21.21.16/32 22.22.22.17/32 23.23.23.18/32 24.24.24.19/32 25.25.25.20/32 26.26.26.21/32 27.27.27.22/32 28.28.28.23/32 29.29.29.24/32 30.30.30.25/32 31.31.31.26/32 32.32.32.27/32 33.33.33.28/32 34.34.34.29/32 35.35.35.30/32 36.36.36.31/32 37.37.37.32/32 38.38.38.33/32 39.39.39.34/32 40.40.40.35/32 41.41.41.36/32 42.42.42.37/32 43.43.43.38/32 44.44.44.39/32 45.45.45.40/32 46.46.46.41/32 47.47.47.42/32 48.48.48.43/32 49.49.49.44/32 50.50.50.45/32 51.51.51.46/32 52.52.52.47/32 53.53.53.48/32 54.54.54.49/32 55.55.55.50/32 56.56.56.51/32 57.57.57.52/32 58.58.58.53/32 59.59.59.54/32 60.60.60.55/32 61.61.61.56/32 62.62.62.57/32 63.63.63.58/32 64.64.64.59/32 65.65.65.60/32 66.66.66.61/32 67.67.67.62/32 68.68.68.63/32 69.69.69.64/32 70.70.70.65/32 71.71.71.66/32 72.72.72.67/32 73.73.73.68/32 74.74.74.69/32 75.75.75.70/32 76.76.76.71/32 77.77.77.72/32 78.78.78.73/32 79.79.79.74/32 80.80.80.75/32 81.81.81.76/32 82.82.82.77/32 83.83.83.78/32 84.84.84.79/32 85.85.85.80/32 86.86.86.81/32 87.87.87.82/32 88.88.88.83/32 89.89.89.84/32 90.90.90.85/32 91.91.91.86/32 92.92.92.87/32 93.93.93.88/32 94.94.94.89/32 95.95.95.90/32 96.96.96.91/32 97.97.97.92/32 98.98.98.93/32 99.99.99.94/32 100.100.100.95/32 101.101.101.96/32 102.102.102.97/32 103.103.103.98/32 104.104.104.99/32 105.105.105.100/32 106.106.106.101/32 107.107.107.102/32 108.108.108.103/32 109.109.109.104/32 110.110.110.105/32 111.111.111.106/32 112.112.112.107/32 113.113.113.108/32 114.114.114.109/32 115.115.115.110/32 116.116.116.111/32 117.117.117.112/32 118.118.118.113/32 119.119.119.114/32 120.120.120.115/32 121.121.121.116/32 122.122.122.117/32 123.123.123.118/32 124.124.124.119/32 125.125.125.120/32 126.126.126.121/32 127.127.127.122/32 128.128.128.123/32 129.129.129.124/32 130.130.130.125/32 131.131.131.126/32 132.132.132.127/32 133.133.133.128/32 134.134.134.129/32 135.135.135.130/32 136.136.136.131/32 137.137.137.132/32 138.138.138.133/32 139.139.139.134/32 140.140.140.135/32 141.141.141.136/32 142.142.142.137/32 143.143.143.138/32 144.144.144.139/32 145.145.145.140/32 146.146.146.141/32 147.147.147.142/32 148.148.148.143/32 149.149.149.144/32 150.150.150.145/32 151.151.151.146/32 152.152.152.147/32 153.153.153.148/32 154.154.154.149/32 155.155.155.150/32 156.156.156.151/32 157.157.157.152/32 158.158.158.153/32 159.159.159.154/32 160.160.160.155/32 161.161.161.156/32 162.162.162.157/32 163.163.163.158/32 164.164.164.159/32 165.165.165.160/32 166.166.166.161/32 167.167.167.162/32 168.168.168.163/32 169.169.169.164/32 170.170.170.165/32 171.171.171.166/32 172.172.172.167/32 173.173.173.168/32 174.174.174.169/32 175.175.175.170/32 176.176.176.171/32 177.177.177.172/32 178.178.178.173/32 179.179.179.174/32 180.180.180.175/32 181.181.181.176/32 182.182.182.177/32 183.183.183.178/32 184.184.184.179/32 185.185.185.180/32 186.186.186.181/32 187.187.187.182/32 188.188.188.183/32 189.189.189.184/32 190.190.190.185/32 191.191.191.186/32 192.192.192.187/32 193.193.193.188/32 194.194.194.189/32 195.195.195.190/32 196.196.196.191/32 197.197.197.192/32 198.198.198.193/32 199.199.199.194/32 200.200.200.195/32 201.201.201.196/32 202.202.202.197/32 203.203.203.198/32 204.204.204.199/32 205.205.205.200/32 206.206.206.201/32 207.207.207.202/32 208.208.208.203/32 209.209.209.204/32 210.210.210.205/32 211.211.211.206/32 212.212.212.207/32 213.213.213.208/32 214.214.214.209/32 215.215.215.210/32 216.216.216.211/32 217.217.217.212/32 218.218.218.213/32 219.219.219.214/32 220.220.220.215/32 221.221.221.216/32 222.222.222.217/32 223.223.223.218/32 224.224.224.219/32 225.225.225.220/32 226.226.226.221/32 227.227.227.222/32 228.228.228.223/32 229.229.229.224/32 230.230.230.225/32 231.231.231.226/32 232.232.232.227/32 233.233.233.228/32 234.234.234.229/32 235.235.235.230/32 236.236.236.231/32 237.237.237.232/32 238.238.238.233/32 239.239.239.234/32 240.240.240.235/32 241.241.241.236/32 242.242.242.237/32 243.243.243.238/32 244.244.244.239/32 245.245.245.240/32 246.246.246.241/32 247.247.247.242/32 248.248.248.243/32 249.249.249.244/32 250.250.250.245/32 251.251.251.246/32 252.252.252.247/32 253.253.253.248/32 254.254.254.249/32 255.255.255.250/32 1.1.0.251/32 2.2.1.252/32 3.3.2.253/32 4.4.3.254/32 5.5.4.255/32 6.6.6.0/32 7.7.7.1/32 8.8.8.2/32 9.9.9.3/32 10.10.10.4/32 11.11.11.5/32 12.12.12.6/32 13.13.13.7/32 14.14.14.8/32 15.15.15.9/32 16.16.16.10/32 17.17.17.11/32 18.18.18.12/32 19.19.19.13/32 20.20.20.14/32 21.21.21.15/32 22.22.22.16/32 23.23.23.17/32 24.24.24.18/32 25.25.25.19/32 26.26.26.20/32 27.27.27.21/32 28.28.28.22/32 29.29.29.23/32 30.30.30.24/32 31.31.31.25/32 32.32.32.26/32 33.33.33.27/32 34.34.34.28/32 35.35.35.29/32 36.36.36.30/32 37.37.37.31/32 38.38.38.32/32 39.39.39.33/32 40.40.40.34/32 41.41.41.35/32 42.42.42.36/32 43.43.43.37/32 44.44.44.38/32 45.45.45.39/32 46.46.46.40/32 47.47.47.41/32 48.48.48.42/32 49.49.49.43/32 50.50.50.44/32 51.51.51.45/32 52.52.52.46/32 53.53.53.47/32 54.54.54.48/32 55.55.55.49/32 56.56.56.50/32 57.57.57.51/32 58.58.58.52/32 59.59.59.53/32 60.60.60.54/32 61.61.61.55/32 62.62.62.56/32 63.63.63.57/32 64.64.64.58/32 65.65.65.59/32 66.66.66.60/32 67.67.67.61/32 68.68.68.62/32 69.69.69.63/32 70.70.70.64/32 71.71.71.65/32 72.72.72.66/32 73.73.73.67/32 74.74.74.68/32 75.75.75.69/32 76.76.76.70/32 77.77.77.71/32 78.78.78.72/32 79.79.79.73/32 80.80.80.74/32 81.81.81.75/32 82.82.82.76/32 83.83.83.77/32 84.84.84.78/32 85.85.85.79/32 86.86.86.80/32 87.87.87.81/32 88.88.88.82/32 89.89.89.83/32 90.90.90.84/32 91.91.91.85/32 92.92.92.86/32 93.93.93.87/32 94.94.94.88/32 95.95.95.89/32 96.96.96.90/32 97.97.97.91/32 98.98.98.92/32 99.99.99.93/32 100.100.100.94/32 101.101.101.95/32 102.102.102.96/32 103.103.103.97/32 104.104.104.98/32 105.105.105.99/32 106.106.106.100/32 107.107.107.101/32 108.108.108.102/32 109.109.109.103/32 110.110.110.104/32 111.111.111.105/32 112.112.112.106/32 113.113.113.107/32 114.114.114.108/32 115.115.115.109/32 116.116.116.110/32 117.117.117.111/32 118.118.118.112/32 119.119.119.113/32 120.120.120.114/32 121.121.121.115/32 122.122.122.116/32 123.123.123.117/32 124.124.124.118/32 125.125.125.119/32 126.126.126.120/32 127.127.127.121/32 128.128.128.122/32 129.129.129.123/32 130.130.130.124/32 131.131.131.125/32 132.132.132.126/32 133.133.133.127/32 134.134.134.128/32 135.135.135.129/32 136.136.136.130/32 137.137.137.131/32 138.138.138.132/32 139.139.139.133/32 140.140.140.134/32 141.141.141.135/32 142.142.142.136/32 143.143.143.137/32 144.144.144.138/32 145.145.145.139/32 146.146.146.140/32 147.147.147.141/32 148.148.148.142/32 149.149.149.143/32 150.150.150.144/32 151.151.151.145/32 152.152.152.146/32 153.153.153.147/32 154.154.154.148/32 155.155.155.149/32 156.156.156.150/32 157.157.157.151/32 158.158.158.152/32 159.159.159.153/32 160.160.160.154/32 161.161.161.155/32 162.162.162.156/32 163.163.163.157/32 164.164.164.158/32 165.165.165.159/32 166.166.166.160/32 167.167.167.161/32 168.168.168.162/32 169.169.169.163/32 170.170.170.164/32 171.171.171.165/32 172.172.172.166/32 173.173.173.167/32 174.174.174.168/32 175.175.175.169/32 176.176.176.170/32 177.177.177.171/32 178.178.178.172/32 179.179.179.173/32 180.180.180.174/32 181.181.181.175/32 182.182.182.176/32 183.183.183.177/32 184.184.184.178/32 185.185.185.179/32 186.186.186.180/32 187.187.187.181/32 188.188.188.182/32 189.189.189.183/32 190.190.190.184/32 191.191.191.185/32 192.192.192.186/32 193.193.193.187/32 194.194.194.188/32 195.195.195.189/32 196.196.196.190/32 197.197.197.191/32 198.198.198.192/32 199.199.199.193/32 200.200.200.194/32 201.201.201.195/32 202.202.202.196/32 203.203.203.197/32 204.204.204.198/32 205.205.205.199/32 206.206.206.200/32 207.207.207.201/32 208.208.208.202/32 209.209.209.203/32 210.210.210.204/32 211.211.211.205/32 212.212.212.206/32 213.213.213.207/32 214.214.214.208/32 215.215.215.209/32 216.216.216.210/32 217.217.217.211/32 218.218.218.212/32 219.219.219.213/32 220.220.220.214/32 221.221.221.215/32 222.222.222.216/32 223.223.223.217/32 224.224.224.218/32 225.225.225.219/32 226.226.226.220/32 227.227.227.221/32 228.228.228.222/32 229.229.229.223/32 230.230.230.224/32 231.231.231.225/32 232.232.232.226/32 233.233.233.227/32 234.234.234.228/32 235.235.235.229/32 236.236.236.230/32 237.237.237.231/32 238.238.238.232/32 239.239.239.233/32 240.240.240.234/32 241.241.241.235/32 242.242.242.236/32 243.243.243.237/32 244.244.244.238/32 245.245.245.239/32 246.246.246.240/32 247.247.247.241/32 248.248.248.242/32 249.249.249.243/32 250.250.250.244/32 251.251.251.245/32 252.252.252.246/32 253.253.253.247/32 254.254.254.248/32 255.255.255.249/32 1.1.0.250/32 2.2.1.251/32 3.3.2.252/32 4.4.3.253/32 5.5.4.254/32 6.6.5.255/32 7.7.7.0/32 8.8.8.1/32 9.9.9.2/32 10.10.10.3/32 11.11.11.4/32 12.12.12.5/32 13.13.13.6/32 14.14.14.7/32 15.15.15.8/32 16.16.16.9/32 17.17.17.10/32 18.18.18.11/32 19.19.19.12/32 20.20.20.13/32 21.21.21.14/32 22.22.22.15/32 23.23.23.16/32 24.24.24.17/32 25.25.25.18/32 26.26.26.19/32 27.27.27.20/32 28.28.28.21/32 29.29.29.22/32 30.30.30.23/32 31.31.31.24/32 32.32.32.25/32 33.33.33.26/32 34.34.34.27/32 35.35.35.28/32 36.36.36.29/32 37.37.37.30/32 38.38.38.31/32 39.39.39.32/32 40.40.40.33/32 41.41.41.34/32 42.42.42.35/32 43.43.43.36/32 44.44.44.37/32 45.45.45.38/32 46.46.46.39/32 47.47.47.40/32 48.48.48.41/32 49.49.49.42/32 50.50.50.43/32 51.51.51.44/32 52.52.52.45/32 53.53.53.46/32 54.54.54.47/32 55.55.55.48/32 56.56.56.49/32 57.57.57.50/32 58.58.58.51/32 59.59.59.52/32 60.60.60.53/32 61.61.61.54/32 62.62.62.55/32 63.63.63.56/32 64.64.64.57/32 65.65.65.58/32 66.66.66.59/32 67.67.67.60/32 68.68.68.61/32 69.69.69.62/32 70.70.70.63/32 71.71.71.64/32 72.72.72.65/32 73.73.73.66/32 74.74.74.67/32 75.75.75.68/32 76.76.76.69/32 77.77.77.70/32 78.78.78.71/32 79.79.79.72/32 80.80.80.73/32 81.81.81.74/32 82.82.82.75/32 83.83.83.76/32 84.84.84.77/32 85.85.85.78/32 86.86.86.79/32 87.87.87.80/32 88.88.88.81/32 89.89.89.82/32 90.90.90.83/32 91.91.91.84/32 92.92.92.85/32 93.93.93.86/32 94.94.94.87/32 95.95.95.88/32 96.96.96.89/32 97.97.97.90/32 98.98.98.91/32 99.99.99.92/32 100.100.100.93/32 101.101.101.94/32 102.102.102.95/32 103.103.103.96/32 104.104.104.97/32 105.105.105.98/32 106.106.106.99/32 107.107.107.100/32 108.108.108.101/32 109.109.109.102/32 110.110.110.103/32 111.111.111.104/32 112.112.112.105/32 113.113.113.106/32 114.114.114.107/32 115.115.115.108/32 116.116.116.109/32 117.117.117.110/32 118.118.118.111/32 119.119.119.112/32 120.120.120.113/32 121.121.121.114/32 122.122.122.115/32 123.123.123.116/32 124.124.124.117/32 125.125.125.118/32 126.126.126.119/32 127.127.127.120/32 128.128.128.121/32 129.129.129.122/32 130.130.130.123/32 131.131.131.124/32 132.132.132.125/32 133.133.133.126/32 134.134.134.127/32 135.135.135.128/32 136.136.136.129/32 137.137.137.130/32 138.138.138.131/32 139.139.139.132/32 140.140.140.133/32 141.141.141.134/32 142.142.142.135/32 143.143.143.136/32 144.144.144.137/32 145.145.145.138/32 146.146.146.139/32 147.147.147.140/32 148.148.148.141/32 149.149.149.142/32 150.150.150.143/32 151.151.151.144/32 152.152.152.145/32 153.153.153.146/32 154.154.154.147/32 155.155.155.148/32 156.156.156.149/32 157.157.157.150/32 158.158.158.151/32 159.159.159.152/32 160.160.160.153/32 161.161.161.154/32 162.162.162.155/32 163.163.163.156/32 164.164.164.157/32 165.165.165.158/32 166.166.166.159/32 167.167.167.160/32 168.168.168.161/32 169.169.169.162/32 170.170.170.163/32 171.171.171.164/32 172.172.172.165/32 173.173.173.166/32 174.174.174.167/32 175.175.175.168/32 176.176.176.169/32 177.177.177.170/32 178.178.178.171/32 179.179.179.172/32 180.180.180.173/32 181.181.181.174/32 182.182.182.175/32 183.183.183.176/32 184.184.184.177/32 185.185.185.178/32 186.186.186.179/32 187.187.187.180/32 188.188.188.181/32 189.189.189.182/32 190.190.190.183/32 191.191.191.184/32 192.192.192.185/32 193.193.193.186/32 194.194.194.187/32 195.195.195.188/32 196.196.196.189/32 197.197.197.190/32 198.198.198.191/32 199.199.199.192/32 200.200.200.193/32 201.201.201.194/32 202.202.202.195/32 203.203.203.196/32 204.204.204.197/32 205.205.205.198/32 206.206.206.199/32 207.207.207.200/32 208.208.208.201/32 209.209.209.202/32 210.210.210.203/32 211.211.211.204/32 212.212.212.205/32 213.213.213.206/32 214.214.214.207/32 215.215.215.208/32 216.216.216.209/32 217.217.217.210/32 218.218.218.211/32 219.219.219.212/32 220.220.220.213/32 221.221.221.214/32 222.222.222.215/32 223.223.223.216/32 224.224.224.217/32 225.225.225.218/32 226.226.226.219/32 227.227.227.220/32 228.228.228.221/32 229.229.229.222/32 230.230.230.223/32 231.231.231.224/32 232.232.232.225/32 233.233.233.226/32 234.234.234.227/32 235.235.235.228/32 236.236.236.229/32 237.237.237.230/32 238.238.238.231/32 239.239.239.232/32 240.240.240.233/32 241.241.241.234/32 242.242.242.235/32 243.243.243.236/32 244.244.244.237/32 245.245.245.238/32 246.246.246.239/32 247.247.247.240/32 248.248.248.241/32 249.249.249.242/32 250.250.250.243/32 251.251.251.244/32 252.252.252.245/32 253.253.253.246/32 254.254.254.247/32 255.255.255.248/32 1.1.0.249/32 2.2.1.250/32 3.3.2.251/32 4.4.3.252/32 5.5.4.253/32 6.6.5.254/32 7.7.6.255/32 8.8.8.0/32 9.9.9.1/32 # 1025 IPv6 IPs MANY_IPV6 = 2620:15c:26:307:46e2:b969:7cf4:6cf7/128 2620:15c:26:307:ab01:1178:2be9:0c3a/128 2620:15c:26:307:e0d2:2d99:280a:b6c4/128 2620:15c:26:307:eaa4:b368:002f:bf30/128 2620:15c:26:307:bad1:208a:da98:0ed6/128 2620:15c:26:307:5a04:6064:305f:1f83/128 2620:15c:26:307:7595:1be7:4c8f:ec80/128 2620:15c:26:307:7f2a:67b6:4ad9:fad6/128 2620:15c:26:307:0c0f:3da1:4710:8a2d/128 2620:15c:26:307:9a8c:a711:9c42:56ba/128 2620:15c:26:307:9c14:e39c:2853:e5e6/128 2620:15c:26:307:b135:5186:7efd:3121/128 2620:15c:26:307:fa26:89c0:0e10:db16/128 2620:15c:26:307:8d58:6b24:30a8:9954/128 2620:15c:26:307:6ae8:1b31:2875:74f1/128 2620:15c:26:307:f257:85f3:f586:996d/128 2620:15c:26:307:1b7f:dece:9d4e:4ce1/128 2620:15c:26:307:1194:9447:84ce:ffba/128 2620:15c:26:307:ff54:404c:2502:bfc0/128 2620:15c:26:307:6d8a:f451:ca97:7f21/128 2620:15c:26:307:2649:8f41:319b:3a91/128 2620:15c:26:307:ded8:2834:903a:5fb0/128 2620:15c:26:307:ee81:0873:5037:45d8/128 2620:15c:26:307:556d:3576:b219:aa78/128 2620:15c:26:307:7231:b76e:9e52:7a1c/128 2620:15c:26:307:fe58:a099:c321:b9e9/128 2620:15c:26:307:ad67:ccc4:031b:def3/128 2620:15c:26:307:8ae0:f7f8:ffe4:6bce/128 2620:15c:26:307:3891:5226:71e5:acc3/128 2620:15c:26:307:b708:093d:d11b:3c82/128 2620:15c:26:307:5bbb:304b:96cb:729a/128 2620:15c:26:307:044e:ca1b:d34a:8919/128 2620:15c:26:307:1dd1:7c19:d12e:8e42/128 2620:15c:26:307:bd64:2e7d:fa16:e850/128 2620:15c:26:307:07e7:7bb5:bf0e:830d/128 2620:15c:26:307:af81:1a24:a2d0:e7af/128 2620:15c:26:307:5e11:8dee:54ad:a65c/128 2620:15c:26:307:0ff4:5a1d:4a4f:a4be/128 2620:15c:26:307:11e9:c7e4:ba30:3bde/128 2620:15c:26:307:d9aa:369c:6af1:d50e/128 2620:15c:26:307:5329:a74e:3699:993a/128 2620:15c:26:307:a7b9:5f80:02c0:5691/128 2620:15c:26:307:0f80:1d5c:3eed:137e/128 2620:15c:26:307:7605:a5ab:f93e:b6e0/128 2620:15c:26:307:006a:0449:ae48:af0f/128 2620:15c:26:307:50ca:5009:d1e4:28a5/128 2620:15c:26:307:e8e9:edc2:e806:382d/128 2620:15c:26:307:7e36:5fdf:a0cd:e48c/128 2620:15c:26:307:9204:9b97:4da2:6cba/128 2620:15c:26:307:b7c6:6ab6:9181:afc9/128 2620:15c:26:307:6fbf:9686:d066:7ac9/128 2620:15c:26:307:2fe5:1ed2:eb97:50d9/128 2620:15c:26:307:297d:1d7e:d960:9af6/128 2620:15c:26:307:22aa:74f5:8c64:d2ed/128 2620:15c:26:307:a057:aa8e:2235:3f71/128 2620:15c:26:307:6e3a:a8e1:d4c0:b967/128 2620:15c:26:307:dd86:0938:fd60:681f/128 2620:15c:26:307:2bac:2872:7078:9d5d/128 2620:15c:26:307:4a82:1edf:d702:9758/128 2620:15c:26:307:c4b6:8e83:1390:5125/128 2620:15c:26:307:3344:eac6:1176:0e61/128 2620:15c:26:307:9906:fde0:e93d:ed22/128 2620:15c:26:307:64b9:0616:fd8b:9ece/128 2620:15c:26:307:0213:5351:3350:eb44/128 2620:15c:26:307:40a0:7aee:d2dc:6062/128 2620:15c:26:307:9650:5731:c4c9:8d34/128 2620:15c:26:307:6297:c003:a14c:1b9c/128 2620:15c:26:307:2637:425a:8ce6:558f/128 2620:15c:26:307:1216:ecf5:e70c:41ed/128 2620:15c:26:307:bf8d:dfe1:488a:5d20/128 2620:15c:26:307:25b4:8b85:94af:64fa/128 2620:15c:26:307:5e61:20c1:804b:eb43/128 2620:15c:26:307:16ac:92bd:fd16:a67a/128 2620:15c:26:307:d83e:8140:91c6:5265/128 2620:15c:26:307:e7b4:1c64:1219:3aa9/128 2620:15c:26:307:ff48:83f7:a48c:a983/128 2620:15c:26:307:76c2:428c:535c:b7b9/128 2620:15c:26:307:f176:2f36:e927:89fe/128 2620:15c:26:307:cf25:e5ae:99b5:9893/128 2620:15c:26:307:1aba:8ce7:e200:987e/128 2620:15c:26:307:8d74:7f3b:f1ca:ec15/128 2620:15c:26:307:846d:7100:6dc6:e0e7/128 2620:15c:26:307:6e72:8f7a:166c:56dd/128 2620:15c:26:307:a1a3:60e7:88fc:349d/128 2620:15c:26:307:47a8:8588:666c:8b65/128 2620:15c:26:307:b798:661b:34a5:1e46/128 2620:15c:26:307:ffac:7a14:05d8:ef3a/128 2620:15c:26:307:bc84:ff29:e32b:f3c3/128 2620:15c:26:307:d6ca:7a4e:14e5:e317/128 2620:15c:26:307:d31f:c4f1:9394:468f/128 2620:15c:26:307:3953:c84e:4369:7894/128 2620:15c:26:307:9666:4d16:7dfa:59e9/128 2620:15c:26:307:3e3c:d2bf:6dcd:6509/128 2620:15c:26:307:4a3f:37f0:c992:8d49/128 2620:15c:26:307:6c46:9077:3e08:8f44/128 2620:15c:26:307:a72e:1958:d6ee:f0ca/128 2620:15c:26:307:5b02:fff8:1109:60d0/128 2620:15c:26:307:9a18:46b6:9c08:6b95/128 2620:15c:26:307:241c:d4c5:5f14:0ac4/128 2620:15c:26:307:b888:5fdf:0645:2e24/128 2620:15c:26:307:99f8:85ec:e1e9:f999/128 2620:15c:26:307:85a9:2a0b:2af4:029b/128 2620:15c:26:307:fd14:a818:c5af:e1d5/128 2620:15c:26:307:ad34:df3e:e413:e611/128 2620:15c:26:307:20e6:14ac:f72f:fe25/128 2620:15c:26:307:6cca:797b:3962:0d2c/128 2620:15c:26:307:abff:a0ad:8f33:7dc5/128 2620:15c:26:307:bc9e:4e88:af0e:efa2/128 2620:15c:26:307:9a44:3573:fc9c:a763/128 2620:15c:26:307:d7eb:c8fc:e58f:89e8/128 2620:15c:26:307:ea07:1b57:0b3d:0c8e/128 2620:15c:26:307:6164:952f:1dce:2c55/128 2620:15c:26:307:9deb:fd2d:5582:b45c/128 2620:15c:26:307:9f9f:b3b9:a724:a192/128 2620:15c:26:307:6b74:f53e:86ac:c611/128 2620:15c:26:307:dd9e:2788:91f1:3f68/128 2620:15c:26:307:ee73:df4a:fa01:aa51/128 2620:15c:26:307:4a39:f5a7:0b1f:8735/128 2620:15c:26:307:6f4d:032a:10f5:51d3/128 2620:15c:26:307:71ae:bbc6:e858:2f52/128 2620:15c:26:307:2980:0715:c497:77b0/128 2620:15c:26:307:d947:0eae:8b0c:2484/128 2620:15c:26:307:7e57:7cb5:8cb1:f6cc/128 2620:15c:26:307:de6f:0749:e82a:22b6/128 2620:15c:26:307:a5f1:6ee7:b647:03a7/128 2620:15c:26:307:c181:cb26:333b:166a/128 2620:15c:26:307:a086:2a8d:fd2b:50c6/128 2620:15c:26:307:7173:ac8c:6e2a:3aca/128 2620:15c:26:307:b5bb:be8f:c9c8:a15f/128 2620:15c:26:307:9403:f62f:201e:4c73/128 2620:15c:26:307:26ae:84cd:6afd:07d3/128 2620:15c:26:307:ed5e:5e85:e838:7439/128 2620:15c:26:307:7e5c:7044:67ee:9778/128 2620:15c:26:307:2d12:489e:8ab3:00ad/128 2620:15c:26:307:173e:4ff9:88c8:594e/128 2620:15c:26:307:b130:3948:ff97:9fdd/128 2620:15c:26:307:3585:ad77:e41c:b8b3/128 2620:15c:26:307:c2ea:b432:97bc:108f/128 2620:15c:26:307:7c87:1bc6:f7f8:8e7d/128 2620:15c:26:307:a34d:2dda:537c:cec9/128 2620:15c:26:307:619d:8dd3:f2da:6c15/128 2620:15c:26:307:71af:120f:7ea3:ce9d/128 2620:15c:26:307:6861:2eba:b85d:4ebd/128 2620:15c:26:307:5a15:947f:d4cd:539e/128 2620:15c:26:307:d74c:336a:87d0:a243/128 2620:15c:26:307:552a:ca14:dd27:55f0/128 2620:15c:26:307:f5bb:8d53:c133:4c11/128 2620:15c:26:307:1d8a:208d:9ee8:3e63/128 2620:15c:26:307:5227:f445:bcf1:5ce2/128 2620:15c:26:307:0188:59bf:1a77:65b8/128 2620:15c:26:307:6956:05f6:5d3c:05a9/128 2620:15c:26:307:9212:1bae:16e1:9ff9/128 2620:15c:26:307:42ca:0432:fd1e:0609/128 2620:15c:26:307:a09b:0441:1772:4d10/128 2620:15c:26:307:7d81:bc44:3228:f973/128 2620:15c:26:307:5fd2:858c:988a:54cd/128 2620:15c:26:307:0e56:45e7:4523:02b1/128 2620:15c:26:307:1a03:96c2:4e4a:aa77/128 2620:15c:26:307:dae3:24f2:bd08:33c1/128 2620:15c:26:307:0035:3086:48cc:7b88/128 2620:15c:26:307:d54e:4237:e89b:2d34/128 2620:15c:26:307:4a84:2a8e:fbc4:0767/128 2620:15c:26:307:22f2:57e9:4873:6747/128 2620:15c:26:307:e2d7:0fc3:2227:b4c2/128 2620:15c:26:307:2012:edd2:23c5:101b/128 2620:15c:26:307:8675:dd82:0fec:46a7/128 2620:15c:26:307:977c:257e:c05d:9a19/128 2620:15c:26:307:7784:a576:5195:f76c/128 2620:15c:26:307:8980:f775:2616:6776/128 2620:15c:26:307:8baf:b1e8:0cf1:2388/128 2620:15c:26:307:5d79:19c6:b682:e43d/128 2620:15c:26:307:3b6d:cd9d:b9e6:9e0c/128 2620:15c:26:307:ad3f:1a30:a0d0:f9a1/128 2620:15c:26:307:91da:2056:c9ca:dc9c/128 2620:15c:26:307:d49e:2d5b:8efe:d81b/128 2620:15c:26:307:0db8:09bb:cdff:1c70/128 2620:15c:26:307:82b0:abf1:b9a0:25e3/128 2620:15c:26:307:e1d4:24ef:5a54:061c/128 2620:15c:26:307:ec85:6ba5:6f82:ad68/128 2620:15c:26:307:00a5:54d1:1bce:6ec0/128 2620:15c:26:307:297e:4764:578c:366c/128 2620:15c:26:307:24f2:a112:6af1:d94f/128 2620:15c:26:307:09db:674b:26c8:1286/128 2620:15c:26:307:e279:93a2:559c:c16a/128 2620:15c:26:307:dae2:6585:a97d:6b94/128 2620:15c:26:307:ff09:9fc3:b7cb:6cd0/128 2620:15c:26:307:707e:fbda:dc77:15a8/128 2620:15c:26:307:e973:b520:7673:45f0/128 2620:15c:26:307:a344:3e54:0f4a:cfca/128 2620:15c:26:307:7a64:23a6:fbaa:b7ed/128 2620:15c:26:307:f98f:38fa:6954:5424/128 2620:15c:26:307:abc2:0283:2953:077a/128 2620:15c:26:307:7b4c:64bd:69ab:91b4/128 2620:15c:26:307:d16a:2f16:81e2:c49a/128 2620:15c:26:307:a8bc:223a:dd6c:3fd6/128 2620:15c:26:307:1a7a:cc0d:4e32:b670/128 2620:15c:26:307:2e18:3068:60c2:0f93/128 2620:15c:26:307:0445:ba77:4762:ccac/128 2620:15c:26:307:03cc:ffb3:ecf7:238d/128 2620:15c:26:307:7a1b:e420:92e1:eaa2/128 2620:15c:26:307:daed:1b8c:92e0:d448/128 2620:15c:26:307:5db3:1e96:06d4:2e2e/128 2620:15c:26:307:2d26:bb30:8d89:e8dd/128 2620:15c:26:307:6c36:0047:c1bc:e5ce/128 2620:15c:26:307:ca00:4d2e:fcab:4df7/128 2620:15c:26:307:9193:f379:0eac:f25b/128 2620:15c:26:307:0562:1439:d96a:1943/128 2620:15c:26:307:403f:4358:5587:1bb3/128 2620:15c:26:307:0e30:eb93:f4a5:d9ee/128 2620:15c:26:307:4691:d75e:fc82:504f/128 2620:15c:26:307:efac:e9e4:65f0:2317/128 2620:15c:26:307:a6d7:e437:a33c:e014/128 2620:15c:26:307:93fd:5fe0:1d2b:5604/128 2620:15c:26:307:bc72:a780:46bd:4a7a/128 2620:15c:26:307:7e88:a754:8fb0:d135/128 2620:15c:26:307:36db:f2b4:df90:f338/128 2620:15c:26:307:52c7:a5af:7098:2819/128 2620:15c:26:307:25a9:24a4:920a:9583/128 2620:15c:26:307:d02e:a49f:64d5:ad05/128 2620:15c:26:307:26ef:4ce8:4c1e:6593/128 2620:15c:26:307:19e5:fa82:40d0:63a9/128 2620:15c:26:307:1e6a:3bbc:0d2e:b652/128 2620:15c:26:307:4b17:91a2:88d7:45bb/128 2620:15c:26:307:50bd:6a4f:7fd6:0317/128 2620:15c:26:307:8629:2a53:93fd:d464/128 2620:15c:26:307:ca39:da66:e6ca:38aa/128 2620:15c:26:307:af33:485c:96f0:2462/128 2620:15c:26:307:680e:afae:718a:136d/128 2620:15c:26:307:88e1:d50e:e061:f77f/128 2620:15c:26:307:ed27:3b57:7027:d694/128 2620:15c:26:307:df54:91c5:ea6f:a705/128 2620:15c:26:307:961d:95f5:820a:106c/128 2620:15c:26:307:7f25:291a:ff4f:8272/128 2620:15c:26:307:9606:9a16:5c2e:1b90/128 2620:15c:26:307:66ce:37d1:a67f:0a3e/128 2620:15c:26:307:8894:1278:e179:9541/128 2620:15c:26:307:97aa:4693:a92d:4d8c/128 2620:15c:26:307:b87c:c115:b229:3947/128 2620:15c:26:307:a6d1:2297:eb66:5cae/128 2620:15c:26:307:5312:96a6:be67:c37b/128 2620:15c:26:307:5c55:e59b:0c83:9e9c/128 2620:15c:26:307:08e4:074a:35b8:6b76/128 2620:15c:26:307:f53c:83a1:10f5:5376/128 2620:15c:26:307:dd22:32e5:bae6:590e/128 2620:15c:26:307:566f:798a:bf2e:8b78/128 2620:15c:26:307:5aae:5b6c:891a:e5fb/128 2620:15c:26:307:b578:4e57:ab7b:ca69/128 2620:15c:26:307:06c8:993c:bda4:e0a0/128 2620:15c:26:307:6470:f6bd:eed3:6e0c/128 2620:15c:26:307:f010:b1a7:8379:68fc/128 2620:15c:26:307:647d:898f:8160:3ebc/128 2620:15c:26:307:77b6:caaa:e109:6bf2/128 2620:15c:26:307:873f:df9b:ca1a:1bbb/128 2620:15c:26:307:94e9:6df9:147f:f6ed/128 2620:15c:26:307:3f58:bfe4:8811:27b8/128 2620:15c:26:307:3163:b8b8:de17:e745/128 2620:15c:26:307:7569:f30c:c584:bb8c/128 2620:15c:26:307:cc55:4f71:f9d7:9282/128 2620:15c:26:307:c7bc:c613:ac2d:638e/128 2620:15c:26:307:8878:ea86:68e1:6d5e/128 2620:15c:26:307:214a:7670:a358:38ec/128 2620:15c:26:307:3113:31d0:4b2d:60b0/128 2620:15c:26:307:30f8:88a4:f18d:c956/128 2620:15c:26:307:ea72:1e01:8f5a:17ba/128 2620:15c:26:307:3260:621a:c63a:b8fc/128 2620:15c:26:307:cccf:14eb:fb7b:60f0/128 2620:15c:26:307:3945:a511:0369:4e21/128 2620:15c:26:307:bfab:4992:73c8:5a35/128 2620:15c:26:307:cc35:0c78:0dd0:e046/128 2620:15c:26:307:a5fe:1dac:3fd6:871c/128 2620:15c:26:307:86b1:4dfe:f8c8:1bbf/128 2620:15c:26:307:6eb6:1541:d8b2:1b27/128 2620:15c:26:307:ea28:f9b5:2e9e:307e/128 2620:15c:26:307:34da:3172:ce45:028d/128 2620:15c:26:307:36fd:7287:9375:8e58/128 2620:15c:26:307:ce21:dfa3:d2a5:a5df/128 2620:15c:26:307:c5d4:4d45:56fa:043d/128 2620:15c:26:307:febc:3d3f:a7e3:72c2/128 2620:15c:26:307:f52b:3310:efe8:08eb/128 2620:15c:26:307:cd3b:653e:c402:649c/128 2620:15c:26:307:fd6e:1744:6c0a:38d9/128 2620:15c:26:307:d8a7:ddaa:7e04:3c6e/128 2620:15c:26:307:a1b4:fe5a:ad74:4c17/128 2620:15c:26:307:2f7b:3ec3:cf5f:8234/128 2620:15c:26:307:4ab9:5f75:4533:3482/128 2620:15c:26:307:f517:8e7c:9e18:053e/128 2620:15c:26:307:cad7:b67c:85ef:b82e/128 2620:15c:26:307:5430:d49b:700c:efe9/128 2620:15c:26:307:6db5:7483:5bad:e5df/128 2620:15c:26:307:4f6c:26b4:1357:1d9f/128 2620:15c:26:307:a8d0:e8e2:25bf:ee70/128 2620:15c:26:307:fb5c:f1c3:4022:f8d5/128 2620:15c:26:307:4633:97cc:953e:e84e/128 2620:15c:26:307:b863:67d5:4653:0794/128 2620:15c:26:307:3f5d:f5ed:2090:613d/128 2620:15c:26:307:589e:a58a:7b3f:fac0/128 2620:15c:26:307:4d15:17fe:0e47:8460/128 2620:15c:26:307:5576:0def:6d15:fa13/128 2620:15c:26:307:76e5:a766:0fd5:7357/128 2620:15c:26:307:fa69:aaf0:7587:ff6c/128 2620:15c:26:307:5934:f78f:e8b2:db32/128 2620:15c:26:307:3db9:81ad:d878:5570/128 2620:15c:26:307:5eec:3f5a:c919:7880/128 2620:15c:26:307:e6c1:5f09:3131:22eb/128 2620:15c:26:307:7b2b:0a0d:3eff:25c1/128 2620:15c:26:307:5463:18c4:425f:c65a/128 2620:15c:26:307:c7f3:65ba:3927:86e9/128 2620:15c:26:307:3b08:c467:61bf:32d2/128 2620:15c:26:307:81ab:3ede:b388:0919/128 2620:15c:26:307:785e:2a07:1f7c:0bb4/128 2620:15c:26:307:5ffe:85b3:deae:8bcd/128 2620:15c:26:307:aedc:6ae4:693c:4c8b/128 2620:15c:26:307:4622:3ce1:9d21:2e6d/128 2620:15c:26:307:d2e6:5df4:3946:4d31/128 2620:15c:26:307:e9eb:7735:3e41:c2ba/128 2620:15c:26:307:20c3:7c46:2065:f845/128 2620:15c:26:307:b9d0:b579:f430:eb53/128 2620:15c:26:307:49cc:3f87:4160:5fde/128 2620:15c:26:307:343f:830b:111a:5da3/128 2620:15c:26:307:c60d:aecf:3ff8:9678/128 2620:15c:26:307:4fc5:ce5b:d230:ebd4/128 2620:15c:26:307:0334:b8ff:e068:8b0e/128 2620:15c:26:307:1ee9:9b45:1275:e031/128 2620:15c:26:307:caa6:6b6f:01c0:1c8f/128 2620:15c:26:307:678a:5c29:577b:8f46/128 2620:15c:26:307:5ffb:eb90:a8ac:2201/128 2620:15c:26:307:0a2e:a80e:9b5e:b98f/128 2620:15c:26:307:b7c6:98c6:ade4:4def/128 2620:15c:26:307:bdc8:a6d6:9c7b:d6e0/128 2620:15c:26:307:59c1:9153:e868:3707/128 2620:15c:26:307:d3d8:148b:0030:402d/128 2620:15c:26:307:8a38:6e2b:00fa:cd10/128 2620:15c:26:307:06cd:d1cf:2813:978c/128 2620:15c:26:307:ff30:2c69:578f:4e49/128 2620:15c:26:307:18eb:5054:bc5f:5a05/128 2620:15c:26:307:b785:6130:1664:f271/128 2620:15c:26:307:9c5a:1768:2c72:131b/128 2620:15c:26:307:3535:aa70:1276:91fd/128 2620:15c:26:307:b8f2:bc9c:ed19:bc32/128 2620:15c:26:307:eae9:c995:c525:0cd9/128 2620:15c:26:307:c68c:aec2:7479:e041/128 2620:15c:26:307:3635:9c06:e722:a839/128 2620:15c:26:307:22df:0f93:b063:6a72/128 2620:15c:26:307:d162:6612:4f2b:5af0/128 2620:15c:26:307:59cf:bbb3:3c36:3857/128 2620:15c:26:307:36ee:1b3c:9a9b:95bb/128 2620:15c:26:307:40d8:e1f7:0a58:b09d/128 2620:15c:26:307:c4d5:b8d5:3278:e6f4/128 2620:15c:26:307:b590:29ad:a864:4d35/128 2620:15c:26:307:4d2f:0dad:fdac:85e7/128 2620:15c:26:307:8372:9899:5ff6:dbc3/128 2620:15c:26:307:6eda:8f68:187c:92f8/128 2620:15c:26:307:71b8:da5c:7086:4306/128 2620:15c:26:307:01d2:1cdf:50cd:17b1/128 2620:15c:26:307:a34f:cbbc:90b2:64d7/128 2620:15c:26:307:a743:8647:71b5:4e51/128 2620:15c:26:307:271a:653b:0829:2441/128 2620:15c:26:307:96ab:22f1:d328:51d3/128 2620:15c:26:307:96f3:17fa:7a65:05b8/128 2620:15c:26:307:9856:f24b:cab5:37a4/128 2620:15c:26:307:ba50:f848:1ab3:7017/128 2620:15c:26:307:f195:91b5:5c37:c26c/128 2620:15c:26:307:32cf:64a1:7ef5:8b76/128 2620:15c:26:307:fcb1:4e6f:85ba:518f/128 2620:15c:26:307:cc27:dc10:53e7:a4ab/128 2620:15c:26:307:750c:1653:09fc:15de/128 2620:15c:26:307:9a28:92e7:40e6:f766/128 2620:15c:26:307:ae91:9962:6416:c69d/128 2620:15c:26:307:43ea:5db1:34dd:3071/128 2620:15c:26:307:5c32:9098:d351:e415/128 2620:15c:26:307:9197:ec4d:c759:4b6e/128 2620:15c:26:307:afe1:f7c5:8adc:f515/128 2620:15c:26:307:2a8d:cac0:93eb:fd46/128 2620:15c:26:307:6c9a:9877:9f67:eb5e/128 2620:15c:26:307:e62a:0bd0:8577:3cdf/128 2620:15c:26:307:56e8:def5:366a:e1e6/128 2620:15c:26:307:aafe:8caa:c6c2:5937/128 2620:15c:26:307:ce56:ef2e:8516:0289/128 2620:15c:26:307:42b3:94c8:eca0:e46a/128 2620:15c:26:307:da1b:f38d:f115:b051/128 2620:15c:26:307:c5b1:5dc8:49a7:4864/128 2620:15c:26:307:dcdf:fcb8:5ff8:36d1/128 2620:15c:26:307:983d:1763:1a3e:bac1/128 2620:15c:26:307:ea0d:3c19:7499:5595/128 2620:15c:26:307:ddb2:730b:5388:e584/128 2620:15c:26:307:ebf4:e4f5:f68e:562d/128 2620:15c:26:307:1377:e60a:fd64:1d22/128 2620:15c:26:307:992e:9d04:266e:c7cf/128 2620:15c:26:307:1ace:5186:0add:43f9/128 2620:15c:26:307:d63d:6af4:8ceb:7a15/128 2620:15c:26:307:80e4:76a8:1a7d:38aa/128 2620:15c:26:307:3a14:b5b1:72fe:7e2d/128 2620:15c:26:307:0644:3309:326f:3c28/128 2620:15c:26:307:b29c:255f:e896:d40c/128 2620:15c:26:307:01cb:6b90:9341:c11a/128 2620:15c:26:307:5e5d:bcf0:3c66:8c63/128 2620:15c:26:307:3931:86ee:2387:9eab/128 2620:15c:26:307:cf07:4c0c:6fe5:07f4/128 2620:15c:26:307:03be:da0f:c5e1:59a9/128 2620:15c:26:307:8da1:624f:ed0f:a8f5/128 2620:15c:26:307:865d:d2e6:f4e5:477e/128 2620:15c:26:307:1400:a512:06e7:9c46/128 2620:15c:26:307:8f22:7fd4:af34:f967/128 2620:15c:26:307:d2d3:a18c:91a0:3444/128 2620:15c:26:307:ca88:5fa6:20f4:ff01/128 2620:15c:26:307:753c:a9c0:1cf1:6cf8/128 2620:15c:26:307:4328:15ab:3323:5532/128 2620:15c:26:307:c12a:9478:3209:76fc/128 2620:15c:26:307:3251:acd0:0e21:e21e/128 2620:15c:26:307:be78:7552:2418:09c8/128 2620:15c:26:307:4c0e:54a0:6ec1:3dd9/128 2620:15c:26:307:73c0:1e6e:aec1:4ed2/128 2620:15c:26:307:9440:cf9c:1da1:573a/128 2620:15c:26:307:c1f4:87ab:f884:d539/128 2620:15c:26:307:d2d6:b8ac:8916:f8b9/128 2620:15c:26:307:89e3:8c52:5e65:556d/128 2620:15c:26:307:e30f:278a:7b3e:a2e3/128 2620:15c:26:307:a11f:011c:6f27:1cbf/128 2620:15c:26:307:1f52:0709:3d88:17c9/128 2620:15c:26:307:dbd6:5604:1b2f:2bfd/128 2620:15c:26:307:c6ab:99a0:e31b:5cc9/128 2620:15c:26:307:6fb6:1e49:e716:fc49/128 2620:15c:26:307:6f58:2f1d:40cf:4c50/128 2620:15c:26:307:ebfa:4a2c:a111:c308/128 2620:15c:26:307:3579:8568:d4df:977a/128 2620:15c:26:307:fd1d:7255:14b2:fa1d/128 2620:15c:26:307:2b15:56e8:7473:1efe/128 2620:15c:26:307:41ef:2b4b:04c4:3962/128 2620:15c:26:307:367e:7e06:20d0:e8e5/128 2620:15c:26:307:0777:aafa:8615:c10c/128 2620:15c:26:307:ea03:6aa5:2762:53cf/128 2620:15c:26:307:525a:8561:5427:7a19/128 2620:15c:26:307:9be1:8701:6d1b:b11b/128 2620:15c:26:307:394e:49bb:26c2:ebd5/128 2620:15c:26:307:76b5:5b09:7fc7:451e/128 2620:15c:26:307:450a:d871:6246:8574/128 2620:15c:26:307:6dd1:0e2c:1ab2:8975/128 2620:15c:26:307:0b0b:b74a:7364:ed4f/128 2620:15c:26:307:186c:c4d2:ffd3:01e1/128 2620:15c:26:307:e9e2:e06f:15b4:5f39/128 2620:15c:26:307:d006:6f36:2118:e911/128 2620:15c:26:307:50dc:795e:01a9:3a1a/128 2620:15c:26:307:63e1:9b45:1607:d4ba/128 2620:15c:26:307:8446:d60a:4676:8068/128 2620:15c:26:307:1f25:9166:c0fe:ee84/128 2620:15c:26:307:3e4b:b597:3c69:0581/128 2620:15c:26:307:f0ab:8fc0:1e77:4b3a/128 2620:15c:26:307:0473:b470:46f7:b100/128 2620:15c:26:307:64f6:b777:083f:4650/128 2620:15c:26:307:d7e5:bf2c:46a9:b2da/128 2620:15c:26:307:107f:d090:55ee:9af7/128 2620:15c:26:307:a7e9:bfe5:e66a:e5bb/128 2620:15c:26:307:c1e0:0a99:b587:ebbe/128 2620:15c:26:307:cf4a:297d:d3fd:d684/128 2620:15c:26:307:f0f8:40d1:a9c5:d3a3/128 2620:15c:26:307:9476:a6ac:9b26:aa05/128 2620:15c:26:307:5532:aafe:a4c8:0191/128 2620:15c:26:307:b54d:2d24:29f8:04b4/128 2620:15c:26:307:522b:17e6:8d42:1b08/128 2620:15c:26:307:b0ba:1683:afa9:95aa/128 2620:15c:26:307:1735:ee78:ae90:98d8/128 2620:15c:26:307:8a73:6be0:aa91:4674/128 2620:15c:26:307:3a44:a44b:ac47:fbce/128 2620:15c:26:307:13c4:96c7:8d3a:eae8/128 2620:15c:26:307:d1bf:cad0:9dcd:3260/128 2620:15c:26:307:091a:cea9:0d7c:7934/128 2620:15c:26:307:54a2:8883:a27d:9eae/128 2620:15c:26:307:bede:a39b:f9bf:1344/128 2620:15c:26:307:02e2:b463:eaab:5fac/128 2620:15c:26:307:36f8:ae78:d8d9:9014/128 2620:15c:26:307:03a0:2d81:ab9f:98c2/128 2620:15c:26:307:6aec:8805:6fff:6518/128 2620:15c:26:307:f3ea:71f1:2b54:8838/128 2620:15c:26:307:a2af:4694:b42c:696d/128 2620:15c:26:307:12c9:1eec:ec75:3df7/128 2620:15c:26:307:1ccc:29d2:e0c7:1cf7/128 2620:15c:26:307:27db:3baa:c9e0:3f16/128 2620:15c:26:307:5917:a4b1:b09d:17d3/128 2620:15c:26:307:a2c7:e775:29f3:2b6c/128 2620:15c:26:307:61cf:43e3:d091:66d3/128 2620:15c:26:307:2686:eb0f:3843:7691/128 2620:15c:26:307:70d0:b065:1c70:668e/128 2620:15c:26:307:390b:564b:f7c2:528f/128 2620:15c:26:307:4175:4d84:168d:6123/128 2620:15c:26:307:9b21:63a8:c03c:dbce/128 2620:15c:26:307:1832:6851:57d6:a6e0/128 2620:15c:26:307:048c:0782:24be:d9db/128 2620:15c:26:307:a4ee:2705:792f:ef8a/128 2620:15c:26:307:764a:4119:8b8b:b874/128 2620:15c:26:307:8b0f:1f3b:a898:8d10/128 2620:15c:26:307:44ac:7cb3:b537:eb57/128 2620:15c:26:307:d171:7831:118a:266e/128 2620:15c:26:307:21b1:abeb:9a39:0736/128 2620:15c:26:307:56a5:54be:acec:1cf9/128 2620:15c:26:307:9f4d:8407:58a0:70a6/128 2620:15c:26:307:35da:4282:e4b6:62c9/128 2620:15c:26:307:d06f:d9e0:0854:34c6/128 2620:15c:26:307:1dc6:2efa:7b61:4263/128 2620:15c:26:307:fcb9:9e11:81b1:62a3/128 2620:15c:26:307:2dc7:d430:4ba3:31e6/128 2620:15c:26:307:93e4:52ff:e5ea:d4ce/128 2620:15c:26:307:29f7:d960:84bc:b3c1/128 2620:15c:26:307:4d81:d4be:69fe:4c76/128 2620:15c:26:307:4c00:db74:0d6d:3771/128 2620:15c:26:307:e0e7:bce7:bb31:d52a/128 2620:15c:26:307:4d79:92af:a97e:0612/128 2620:15c:26:307:9bfe:cfa4:a8d2:460d/128 2620:15c:26:307:4574:b868:77b6:7682/128 2620:15c:26:307:b24e:8c93:db3f:a0ed/128 2620:15c:26:307:fd98:a97a:1ca3:fd15/128 2620:15c:26:307:dc84:17b8:69da:26e6/128 2620:15c:26:307:c52a:2f9a:0d55:f06c/128 2620:15c:26:307:7443:d0c5:cfc8:c9bf/128 2620:15c:26:307:c9cb:33c6:84ba:836e/128 2620:15c:26:307:1f23:9c24:5085:60d4/128 2620:15c:26:307:5bbe:b5df:922b:85bd/128 2620:15c:26:307:458e:41e4:39df:6135/128 2620:15c:26:307:ffed:1186:4865:4f7a/128 2620:15c:26:307:52de:58d6:b20d:9c13/128 2620:15c:26:307:8649:bd83:beaf:082a/128 2620:15c:26:307:cf79:f3c8:54ef:44bd/128 2620:15c:26:307:f608:1ba8:e838:71ef/128 2620:15c:26:307:3169:31af:f5ac:2948/128 2620:15c:26:307:401c:bcac:d651:f744/128 2620:15c:26:307:bc86:d127:98d2:0354/128 2620:15c:26:307:675f:b57a:b90c:16d3/128 2620:15c:26:307:33ac:dca6:adcb:44c2/128 2620:15c:26:307:806f:e0d7:43a8:29e0/128 2620:15c:26:307:430e:ce43:069c:81b2/128 2620:15c:26:307:3497:936f:df34:6c94/128 2620:15c:26:307:26be:e8f2:00c7:e1cc/128 2620:15c:26:307:a678:935c:c639:e0c5/128 2620:15c:26:307:239c:9679:f89e:cd70/128 2620:15c:26:307:ce19:0510:c546:17e4/128 2620:15c:26:307:e8fd:53de:b48e:4ed3/128 2620:15c:26:307:0c1d:b951:2ee5:6de8/128 2620:15c:26:307:dce4:5342:0463:77a9/128 2620:15c:26:307:32f7:da94:cec5:d1d4/128 2620:15c:26:307:b31c:09d9:c293:140b/128 2620:15c:26:307:9bc9:6dbe:af1f:6844/128 2620:15c:26:307:c907:518c:d70b:2200/128 2620:15c:26:307:40f7:e3ad:0d03:6f55/128 2620:15c:26:307:8e36:e4b0:0733:2e75/128 2620:15c:26:307:2d3c:e847:d2e7:4242/128 2620:15c:26:307:5c67:5664:b4ff:2a5c/128 2620:15c:26:307:2929:ff26:28d8:b80d/128 2620:15c:26:307:fd06:f82a:1b1e:9716/128 2620:15c:26:307:8157:30fd:3160:afc2/128 2620:15c:26:307:ed46:ae28:68a8:8fd6/128 2620:15c:26:307:7ad5:b752:29fb:392b/128 2620:15c:26:307:d9c5:b3bd:5c59:475c/128 2620:15c:26:307:a16f:1f10:178d:5c00/128 2620:15c:26:307:0a48:67d8:22c2:53fa/128 2620:15c:26:307:91fe:169a:0cce:d731/128 2620:15c:26:307:1994:2a48:de60:90e9/128 2620:15c:26:307:84c5:93d3:1174:b3d2/128 2620:15c:26:307:3a54:97c8:b829:a2be/128 2620:15c:26:307:d06d:542f:1cbe:c880/128 2620:15c:26:307:14d8:5cdd:abab:5f21/128 2620:15c:26:307:c1be:5b18:3cba:dbde/128 2620:15c:26:307:2ab2:4f82:7791:d6a0/128 2620:15c:26:307:2837:6dd0:cc51:4946/128 2620:15c:26:307:71ca:724d:d25d:f484/128 2620:15c:26:307:61f6:b538:bef2:4589/128 2620:15c:26:307:b038:0d1f:146f:bc28/128 2620:15c:26:307:4a3f:66d1:792f:5c92/128 2620:15c:26:307:961a:523b:05d3:dde6/128 2620:15c:26:307:3907:d566:b675:5b0d/128 2620:15c:26:307:56d7:695b:c6c9:01c2/128 2620:15c:26:307:a3fb:476a:0bbb:ea96/128 2620:15c:26:307:2116:5e42:f765:217a/128 2620:15c:26:307:4d6d:5b3b:3806:5aa5/128 2620:15c:26:307:a92e:df72:7e3d:0f24/128 2620:15c:26:307:7977:84b6:cc09:f94f/128 2620:15c:26:307:7a22:d8b1:b3c0:e78f/128 2620:15c:26:307:e554:159e:f610:5969/128 2620:15c:26:307:9044:8adb:b400:6af9/128 2620:15c:26:307:2313:c7b2:21f1:bf17/128 2620:15c:26:307:7d5e:d53b:e4ce:395d/128 2620:15c:26:307:4856:9c45:e9cf:255e/128 2620:15c:26:307:9ee6:e883:57ff:ddfd/128 2620:15c:26:307:01cb:7dad:53b9:4a44/128 2620:15c:26:307:86c7:af63:4e5d:9581/128 2620:15c:26:307:84ca:3f6b:e3ae:d7c8/128 2620:15c:26:307:138d:ca7c:2bce:241c/128 2620:15c:26:307:c084:f881:4acf:62ce/128 2620:15c:26:307:4ef3:c9f2:279b:595d/128 2620:15c:26:307:03cd:cf6a:1e54:4e35/128 2620:15c:26:307:d95e:549e:a0e1:1d83/128 2620:15c:26:307:b7ed:9321:fe25:9743/128 2620:15c:26:307:b1c1:096d:e10c:54d1/128 2620:15c:26:307:d38e:a788:8be6:7e66/128 2620:15c:26:307:5f84:4dc4:5a42:5846/128 2620:15c:26:307:c456:0810:51c6:eb8b/128 2620:15c:26:307:92eb:7b2f:5a25:c2f5/128 2620:15c:26:307:2753:7e32:0b3a:8601/128 2620:15c:26:307:3828:ad22:d2b6:ad0e/128 2620:15c:26:307:4cb5:4c15:18fc:e53e/128 2620:15c:26:307:bd0c:d7aa:1349:8648/128 2620:15c:26:307:f588:67d7:bcd0:dcb1/128 2620:15c:26:307:e7de:3137:6775:c82b/128 2620:15c:26:307:788e:d166:d340:ce39/128 2620:15c:26:307:4f30:247b:a89b:682b/128 2620:15c:26:307:36d6:ed49:76c5:517d/128 2620:15c:26:307:0c64:ee30:26eb:e012/128 2620:15c:26:307:fe5e:54df:b074:2284/128 2620:15c:26:307:2841:3e1f:a88a:5d6b/128 2620:15c:26:307:32e3:e033:c58f:7054/128 2620:15c:26:307:4eaf:8811:2b8f:06eb/128 2620:15c:26:307:3544:f8de:db04:d2d7/128 2620:15c:26:307:1b4c:4277:0fe7:cab0/128 2620:15c:26:307:3192:6cc1:09be:af7c/128 2620:15c:26:307:586a:7382:bef3:a07d/128 2620:15c:26:307:8313:efee:3202:8c73/128 2620:15c:26:307:16e5:28e6:5fe3:66ac/128 2620:15c:26:307:d38f:f216:a9a9:0367/128 2620:15c:26:307:3745:e99b:057e:1fb4/128 2620:15c:26:307:1ce3:db27:145a:c5fe/128 2620:15c:26:307:1d7a:663d:a84a:c2f9/128 2620:15c:26:307:0255:f969:8ea7:8066/128 2620:15c:26:307:9ce7:bfac:4875:8ae9/128 2620:15c:26:307:dc84:21ba:5d37:d77e/128 2620:15c:26:307:6d10:e9ae:436a:3b59/128 2620:15c:26:307:d70a:d014:5071:f2e0/128 2620:15c:26:307:ef91:e499:580a:d18c/128 2620:15c:26:307:2355:9f31:892b:f60d/128 2620:15c:26:307:2d3b:807c:8f04:cded/128 2620:15c:26:307:45d5:a82a:449c:dcaf/128 2620:15c:26:307:7406:645c:c137:f905/128 2620:15c:26:307:9ef8:d874:031c:a19a/128 2620:15c:26:307:4b1a:4ae6:ffb0:d05b/128 2620:15c:26:307:b68b:c6d4:403c:5ae5/128 2620:15c:26:307:3dd9:87cf:c0b2:57f3/128 2620:15c:26:307:025c:8820:f5a1:9e63/128 2620:15c:26:307:7a4b:9027:4a7d:6e6b/128 2620:15c:26:307:35bb:224e:e678:2afc/128 2620:15c:26:307:a226:10ac:335d:b9df/128 2620:15c:26:307:ea5f:3738:44b4:db44/128 2620:15c:26:307:e10e:87db:1ca0:e900/128 2620:15c:26:307:a6c4:d93a:c3fb:54ba/128 2620:15c:26:307:32c8:bab0:2b8c:749a/128 2620:15c:26:307:e760:7605:756c:9f21/128 2620:15c:26:307:c896:1c6d:cf52:f58d/128 2620:15c:26:307:e145:6f29:283a:ca85/128 2620:15c:26:307:9687:9d6b:45c9:8dfc/128 2620:15c:26:307:58ef:2f04:1a4f:7c3c/128 2620:15c:26:307:28cd:b7ad:2bdf:8050/128 2620:15c:26:307:7142:7f94:1197:3955/128 2620:15c:26:307:c12c:d7f1:23c6:d18a/128 2620:15c:26:307:3891:25a7:8dd7:6181/128 2620:15c:26:307:1806:d34b:2d5b:1a62/128 2620:15c:26:307:a846:3cf8:e605:455c/128 2620:15c:26:307:6944:9286:fdb4:f6f1/128 2620:15c:26:307:b9e1:e349:b7ad:7cbf/128 2620:15c:26:307:26ed:08c9:beae:5f64/128 2620:15c:26:307:b49b:f197:c120:124e/128 2620:15c:26:307:62ea:09cb:b72c:0a39/128 2620:15c:26:307:df18:4ca0:047b:26fb/128 2620:15c:26:307:5428:fb8c:d02a:51a1/128 2620:15c:26:307:8a66:0a3b:55b1:1e59/128 2620:15c:26:307:44b5:5608:d739:d5d1/128 2620:15c:26:307:61c1:88c2:5587:2083/128 2620:15c:26:307:4256:8793:9c32:36d2/128 2620:15c:26:307:247d:cc24:e45e:1429/128 2620:15c:26:307:b076:b046:49bd:b404/128 2620:15c:26:307:a77c:4391:be00:608c/128 2620:15c:26:307:dcb0:ab70:1936:bafc/128 2620:15c:26:307:ff28:e36f:1ee3:6230/128 2620:15c:26:307:577e:907a:f364:6b00/128 2620:15c:26:307:76d5:07db:5e74:8489/128 2620:15c:26:307:e7a5:d668:7105:9d43/128 2620:15c:26:307:e8c8:06d8:1aaa:5641/128 2620:15c:26:307:3bee:a466:fcc4:0814/128 2620:15c:26:307:d6d1:fce2:dbce:e816/128 2620:15c:26:307:14ce:3b31:427c:e028/128 2620:15c:26:307:e032:7ef1:8bb4:1e36/128 2620:15c:26:307:be0a:023d:6ec4:3bcd/128 2620:15c:26:307:b231:d784:3142:2182/128 2620:15c:26:307:e1df:6620:37cb:50c5/128 2620:15c:26:307:b4f1:c4f7:e720:3eb3/128 2620:15c:26:307:a669:641c:01f5:1e4b/128 2620:15c:26:307:4dca:be92:b200:1352/128 2620:15c:26:307:38ff:923d:5a10:c5d7/128 2620:15c:26:307:1eea:5e8c:5341:815e/128 2620:15c:26:307:9885:d867:58e5:2bed/128 2620:15c:26:307:7af3:59f1:adb6:b15d/128 2620:15c:26:307:6f50:7c57:2a71:e9e5/128 2620:15c:26:307:bae3:de4c:58af:92cf/128 2620:15c:26:307:0f11:eba5:7023:6336/128 2620:15c:26:307:c5dd:3612:6bba:6738/128 2620:15c:26:307:389c:3c59:4ab7:5238/128 2620:15c:26:307:f0d0:d96a:ce90:e5b1/128 2620:15c:26:307:2861:8a06:9cda:c8c9/128 2620:15c:26:307:eba7:cddd:3357:7f10/128 2620:15c:26:307:d2c5:bbe2:19f9:90c6/128 2620:15c:26:307:dbab:e863:09e6:c268/128 2620:15c:26:307:bcb0:546f:dc38:da86/128 2620:15c:26:307:1dc4:5492:b60a:bc48/128 2620:15c:26:307:71b6:bc3d:809c:629a/128 2620:15c:26:307:93be:af93:0a7e:51de/128 2620:15c:26:307:cf52:acb8:62b9:13ed/128 2620:15c:26:307:182a:eaa0:93d5:0ac5/128 2620:15c:26:307:44b6:c64e:911d:577b/128 2620:15c:26:307:7f64:afa7:35a7:d15f/128 2620:15c:26:307:f5ef:2508:22bf:edc7/128 2620:15c:26:307:7569:fed8:8cca:dffe/128 2620:15c:26:307:ede0:d9d5:de1c:1316/128 2620:15c:26:307:cc27:2e00:11f6:1f13/128 2620:15c:26:307:659b:5c19:cac2:911c/128 2620:15c:26:307:3983:434e:0b9d:3425/128 2620:15c:26:307:7133:5e3f:0548:a80c/128 2620:15c:26:307:dabf:e426:eff6:cfa1/128 2620:15c:26:307:1f71:0955:c588:051f/128 2620:15c:26:307:3a75:71d7:bfb6:d24c/128 2620:15c:26:307:ec70:7154:7130:5273/128 2620:15c:26:307:25e2:c6fc:0718:46f6/128 2620:15c:26:307:3789:61ae:c457:e74d/128 2620:15c:26:307:0926:e450:179a:b0c4/128 2620:15c:26:307:835b:e147:212f:b9ff/128 2620:15c:26:307:8c1e:088b:50b7:bdd3/128 2620:15c:26:307:69b8:f2c2:bf18:b0e8/128 2620:15c:26:307:ff00:726c:b6c7:f7e6/128 2620:15c:26:307:a474:4ac6:d222:9b3a/128 2620:15c:26:307:2428:0a2f:b5a1:0f9e/128 2620:15c:26:307:d4de:cd1f:84e9:9f8a/128 2620:15c:26:307:8060:95b5:3db9:421e/128 2620:15c:26:307:8602:d1ef:41b3:7ff0/128 2620:15c:26:307:1e41:7a25:89c4:973a/128 2620:15c:26:307:ad94:4e96:95d8:9017/128 2620:15c:26:307:e6b5:c46f:3a79:8850/128 2620:15c:26:307:df83:ffc5:66db:b4b7/128 2620:15c:26:307:ae87:8922:55fe:957b/128 2620:15c:26:307:d9a2:c611:3848:4661/128 2620:15c:26:307:b85d:7c4f:4ba4:09e5/128 2620:15c:26:307:a225:5334:7a9b:ff28/128 2620:15c:26:307:50d5:4469:0c95:97fd/128 2620:15c:26:307:cff7:7007:0e3a:da7a/128 2620:15c:26:307:30ea:3dd0:6e53:97fb/128 2620:15c:26:307:1f72:1b0c:e30c:394b/128 2620:15c:26:307:cfa2:6a5e:c486:25fb/128 2620:15c:26:307:c28c:9874:e7b6:02ae/128 2620:15c:26:307:5736:3ee5:944f:b1ea/128 2620:15c:26:307:6fbf:c25f:0c9a:884e/128 2620:15c:26:307:fd21:1123:d04d:5229/128 2620:15c:26:307:ff71:8bda:9919:a58a/128 2620:15c:26:307:201b:5a11:4530:3b60/128 2620:15c:26:307:ac79:2c20:b986:1001/128 2620:15c:26:307:1e38:4386:2d6b:f0c6/128 2620:15c:26:307:772b:9144:5bcf:64f4/128 2620:15c:26:307:5657:f2dd:7788:89b1/128 2620:15c:26:307:6091:667c:fbc6:d47c/128 2620:15c:26:307:5877:9125:ce4b:c6bd/128 2620:15c:26:307:88c2:1375:2fe9:e0ad/128 2620:15c:26:307:859b:ba6f:85ad:b21d/128 2620:15c:26:307:e155:4491:0115:3029/128 2620:15c:26:307:11c2:ccfe:4c79:2371/128 2620:15c:26:307:9d57:35df:7b9b:803a/128 2620:15c:26:307:9b82:733f:b461:037c/128 2620:15c:26:307:2b7f:deff:a23d:9da9/128 2620:15c:26:307:4603:fc39:0b52:4c76/128 2620:15c:26:307:e390:f217:ea96:1a49/128 2620:15c:26:307:4bef:6536:5d9a:48e9/128 2620:15c:26:307:1216:57c6:0867:7b5b/128 2620:15c:26:307:1e01:aa11:18b3:1bbf/128 2620:15c:26:307:d425:4ba3:38ee:11ac/128 2620:15c:26:307:9258:6878:23e0:5afa/128 2620:15c:26:307:9eb6:b18c:7f59:777d/128 2620:15c:26:307:fa08:cd73:c453:6570/128 2620:15c:26:307:4b0a:2c5d:3af4:adf0/128 2620:15c:26:307:87f7:e4e8:a7df:8148/128 2620:15c:26:307:7445:f9a7:4edc:89a9/128 2620:15c:26:307:11bc:1e9b:b2e6:2082/128 2620:15c:26:307:2472:5106:90dd:fee8/128 2620:15c:26:307:d9a9:2fcd:f455:d937/128 2620:15c:26:307:2482:b20e:35e5:89c9/128 2620:15c:26:307:ed7f:0927:88c3:bc82/128 2620:15c:26:307:2ae1:9c6c:4e02:d0eb/128 2620:15c:26:307:f295:4e82:01dd:3904/128 2620:15c:26:307:3d3f:57c3:c851:b98f/128 2620:15c:26:307:5e4e:acb4:2688:eb90/128 2620:15c:26:307:4544:58e3:fd1f:5f27/128 2620:15c:26:307:fe2e:598d:2b14:b1e5/128 2620:15c:26:307:69ed:957d:df03:6a64/128 2620:15c:26:307:d5b6:c05b:6138:8f51/128 2620:15c:26:307:4bef:da99:fa33:c6c8/128 2620:15c:26:307:9082:d3f9:9fc0:f4ca/128 2620:15c:26:307:79a4:b8ea:9873:2282/128 2620:15c:26:307:4512:56ae:2d0d:9daa/128 2620:15c:26:307:0dd8:f650:d603:74c4/128 2620:15c:26:307:e312:2615:18b5:f09f/128 2620:15c:26:307:b229:0cba:1958:6abf/128 2620:15c:26:307:a597:3012:7c0b:5878/128 2620:15c:26:307:85f5:9e0a:6acd:6350/128 2620:15c:26:307:20d7:11b1:f745:21fc/128 2620:15c:26:307:1af0:ef98:05be:71ec/128 2620:15c:26:307:186a:9e7a:a227:a888/128 2620:15c:26:307:e546:47ff:e94f:0e66/128 2620:15c:26:307:f5cb:bc09:a5e0:6a05/128 2620:15c:26:307:492a:a4af:ca0c:7b70/128 2620:15c:26:307:e9fd:29d8:12a5:fb8f/128 2620:15c:26:307:d883:b44c:7f47:e218/128 2620:15c:26:307:335a:fc33:07e9:094a/128 2620:15c:26:307:d631:93b8:8dc8:f64f/128 2620:15c:26:307:e95a:0e3f:8b9a:c645/128 2620:15c:26:307:b96d:24a5:2857:6508/128 2620:15c:26:307:064b:fa13:cc83:5be9/128 2620:15c:26:307:4bb2:1b56:0d06:f117/128 2620:15c:26:307:56e2:2dcd:9b15:712a/128 2620:15c:26:307:db8c:6c2c:a95a:74e8/128 2620:15c:26:307:fc73:adb8:dd32:ca94/128 2620:15c:26:307:c669:5cd2:0dcd:0268/128 2620:15c:26:307:8339:cfb1:50ed:42bd/128 2620:15c:26:307:6e2d:fc19:fe70:378f/128 2620:15c:26:307:b91e:6631:16d8:d626/128 2620:15c:26:307:d274:a6b2:a461:e51a/128 2620:15c:26:307:b42e:4943:70dd:0c9c/128 2620:15c:26:307:810b:b4d3:4743:b288/128 2620:15c:26:307:cfb5:09ff:8f19:bc9f/128 2620:15c:26:307:578b:af4d:c533:43a8/128 2620:15c:26:307:3629:437d:2602:dd6f/128 2620:15c:26:307:9fd0:6bd8:d9be:32eb/128 2620:15c:26:307:0a2a:cfb6:3e6a:c01f/128 2620:15c:26:307:eedb:77b2:ca0a:a491/128 2620:15c:26:307:4a7c:f6c1:ec4b:23b4/128 2620:15c:26:307:cc67:5c41:61a3:52aa/128 2620:15c:26:307:d458:62f3:83e5:3bc3/128 2620:15c:26:307:8bef:afe1:f207:1ade/128 2620:15c:26:307:efd9:1f82:0914:4a47/128 2620:15c:26:307:1001:03f3:5396:e7e1/128 2620:15c:26:307:49e6:0f9b:4a66:3d9a/128 2620:15c:26:307:1769:71d7:fa6a:d257/128 2620:15c:26:307:946f:90b3:dbc3:b053/128 2620:15c:26:307:a368:c8e1:7e03:2364/128 2620:15c:26:307:0364:1f88:cd84:54fe/128 2620:15c:26:307:e625:a6b8:f546:3ce0/128 2620:15c:26:307:0c34:53ee:1767:50d8/128 2620:15c:26:307:fa22:d6b6:0483:97c7/128 2620:15c:26:307:b067:455d:e3ca:593c/128 2620:15c:26:307:051f:5edc:8615:8818/128 2620:15c:26:307:296e:fd09:3137:0dc3/128 2620:15c:26:307:2daa:1e1d:17e2:087e/128 2620:15c:26:307:8882:df81:05df:18f2/128 2620:15c:26:307:e424:620c:e2ff:3083/128 2620:15c:26:307:fde4:1c2d:1dac:517d/128 2620:15c:26:307:9f89:448d:4161:6ff0/128 2620:15c:26:307:0d8c:a1ec:19f1:5638/128 2620:15c:26:307:88e1:6fb0:6463:777f/128 2620:15c:26:307:eb3d:7ffd:24dc:d4de/128 2620:15c:26:307:9482:ba02:5040:d480/128 2620:15c:26:307:79c4:d698:15ce:311a/128 2620:15c:26:307:139d:0a5c:d4ff:aa3a/128 2620:15c:26:307:a5a5:0a1e:3fe9:052b/128 2620:15c:26:307:00b4:d851:e0d5:d1a4/128 2620:15c:26:307:bb67:500b:6340:0f63/128 2620:15c:26:307:9c60:996a:28c4:9e99/128 2620:15c:26:307:f47d:a3f8:892a:8ca6/128 2620:15c:26:307:67c3:06d0:9877:214d/128 2620:15c:26:307:3598:ab21:ab2f:1050/128 2620:15c:26:307:61ba:be8a:e46d:a935/128 2620:15c:26:307:eff6:1bf3:1bcc:0c50/128 2620:15c:26:307:6e6f:a8a0:48cd:ddc9/128 2620:15c:26:307:26df:4320:57fe:c0e7/128 2620:15c:26:307:229b:9f56:d19a:6853/128 2620:15c:26:307:736f:0637:34e6:1b8e/128 2620:15c:26:307:d72f:e00b:6e65:a06b/128 2620:15c:26:307:2678:6042:f1ee:e468/128 2620:15c:26:307:91ee:1c4a:9a6f:b449/128 2620:15c:26:307:18c2:1ca8:48a7:993b/128 2620:15c:26:307:b876:34f5:e1b0:7679/128 2620:15c:26:307:78a7:ca20:5a66:05c5/128 2620:15c:26:307:4e6e:c81f:5b1b:173d/128 2620:15c:26:307:b952:69f8:e776:e1f1/128 2620:15c:26:307:69bf:a524:361b:0ad1/128 2620:15c:26:307:2f2a:cca7:9e2b:6737/128 2620:15c:26:307:41cc:705a:0cc9:dce9/128 2620:15c:26:307:d928:b34f:84c7:358d/128 2620:15c:26:307:e8d5:9102:3fae:1f40/128 2620:15c:26:307:35dc:22c3:51d2:05fc/128 2620:15c:26:307:59b6:f9ec:dcc1:9c34/128 2620:15c:26:307:a5c3:e7dd:7e73:b704/128 2620:15c:26:307:0457:f864:a0ec:13fc/128 2620:15c:26:307:a43e:5ecf:b718:878e/128 2620:15c:26:307:14a3:15e2:318d:3980/128 2620:15c:26:307:73f1:9f50:de3e:8798/128 2620:15c:26:307:a492:3182:cabd:294e/128 2620:15c:26:307:653e:53ad:ea7e:3619/128 2620:15c:26:307:98f9:9ce1:3daf:c025/128 2620:15c:26:307:e39a:68e9:e77d:a1d0/128 2620:15c:26:307:f14d:3c0b:35dd:4ebf/128 2620:15c:26:307:25f1:4c66:9eda:821e/128 2620:15c:26:307:bb06:4538:8b12:bedf/128 2620:15c:26:307:b77a:fea9:a666:6810/128 2620:15c:26:307:7f36:51d3:27e1:5400/128 2620:15c:26:307:b6fe:d32c:c202:4558/128 2620:15c:26:307:4a29:a9e3:0eed:882b/128 2620:15c:26:307:0c2b:ed98:ae17:617e/128 2620:15c:26:307:6f66:d8ae:7c28:bf99/128 2620:15c:26:307:95d3:09ad:5fe5:d1c3/128 2620:15c:26:307:da00:1a9e:355c:eec7/128 2620:15c:26:307:8c66:0d93:0b83:4d92/128 2620:15c:26:307:0257:484c:99c6:e030/128 2620:15c:26:307:f9e0:6d38:9721:f82f/128 2620:15c:26:307:3086:9209:480c:6f65/128 2620:15c:26:307:5791:0aac:51b1:7ab5/128 2620:15c:26:307:c65a:5d71:2f1c:be42/128 2620:15c:26:307:4514:edf8:1575:99cc/128 2620:15c:26:307:394c:7b99:fff3:f1bf/128 2620:15c:26:307:a16d:e5b1:a545:8e27/128 2620:15c:26:307:b649:b512:b30f:d0dd/128 2620:15c:26:307:55e4:76b5:8fc7:3953/128 2620:15c:26:307:799f:8556:0007:24f5/128 2620:15c:26:307:7e0a:6acd:ab71:a40a/128 2620:15c:26:307:4731:98fb:3881:dd60/128 2620:15c:26:307:5a9b:64cd:e7a4:cafa/128 2620:15c:26:307:6046:130b:f6fb:4955/128 2620:15c:26:307:d065:8c54:2882:2b74/128 2620:15c:26:307:c858:ed84:f9c6:0c38/128 2620:15c:26:307:b180:3b37:9bdb:247a/128 2620:15c:26:307:6252:7a1b:09c5:af9e/128 2620:15c:26:307:cce0:2cb2:c4d9:0f83/128 2620:15c:26:307:4280:ea15:9f12:577f/128 2620:15c:26:307:603e:e0a5:8ac9:2a4c/128 2620:15c:26:307:6da0:4647:7a3f:af8c/128 2620:15c:26:307:d236:82fd:a844:9a3f/128 2620:15c:26:307:1bd5:be71:55bb:ba40/128 2620:15c:26:307:47ac:9933:769e:e426/128 2620:15c:26:307:beed:6e6c:30e0:67a3/128 2620:15c:26:307:d98d:11ab:d767:8903/128 2620:15c:26:307:3255:dc34:ae33:d656/128 2620:15c:26:307:ef12:db1f:ae0c:3375/128 2620:15c:26:307:1186:a502:77c3:402c/128 2620:15c:26:307:dd96:90f0:7d97:e301/128 2620:15c:26:307:0578:7c13:d284:eae5/128 2620:15c:26:307:a794:f654:eeb4:643c/128 2620:15c:26:307:cea4:46c6:ed23:9a8d/128 2620:15c:26:307:71a3:93cc:9d12:62b5/128 2620:15c:26:307:bf1c:b8f8:3a75:3cb1/128 2620:15c:26:307:d1e0:af4b:8bf7:89df/128 2620:15c:26:307:c5ba:db56:6b7d:55d9/128 2620:15c:26:307:8869:8e60:53fb:3e99/128 2620:15c:26:307:29da:5c27:ea11:1dd8/128 2620:15c:26:307:2c77:d3e3:13ea:7435/128 2620:15c:26:307:8095:7b13:9bec:7c04/128 2620:15c:26:307:8611:c05e:49c3:b26f/128 2620:15c:26:307:5ca3:8301:83b4:9cba/128 2620:15c:26:307:6196:3f90:fb71:1508/128 2620:15c:26:307:aece:4f72:82ec:bd0c/128 2620:15c:26:307:be1c:fb09:fa95:caa3/128 2620:15c:26:307:48ba:a0a0:ab40:7d9b/128 2620:15c:26:307:1a48:2183:f2cf:9719/128 2620:15c:26:307:f277:e50f:c3bb:c591/128 2620:15c:26:307:9e74:7835:68c2:d5fa/128 2620:15c:26:307:34f9:4731:24f5:0087/128 2620:15c:26:307:efae:87c4:5c0e:b71c/128 2620:15c:26:307:db37:f365:7a10:684c/128 2620:15c:26:307:c5f2:8817:ef7a:d443/128 2620:15c:26:307:759f:a293:d0d8:8092/128 2620:15c:26:307:2372:da6d:bbd1:6504/128 2620:15c:26:307:4cf1:19f5:f891:0a60/128 2620:15c:26:307:5751:b56c:d6cf:df1d/128 2620:15c:26:307:4884:9737:8c4a:98c1/128 2620:15c:26:307:b5cb:36aa:d0cf:7cf2/128 2620:15c:26:307:bb36:03d7:b920:350f/128 2620:15c:26:307:3da8:fd18:d777:4b94/128 2620:15c:26:307:20a3:27ed:e2e8:4078/128 2620:15c:26:307:6701:513f:a854:3429/128 2620:15c:26:307:16c2:079e:b488:e09f/128 2620:15c:26:307:6f48:3cc7:75c8:2229/128 2620:15c:26:307:2060:f1f4:90dd:bba0/128 2620:15c:26:307:f227:539b:5d58:74c6/128 2620:15c:26:307:64ea:6caa:ca92:aa88/128 2620:15c:26:307:f2b4:973b:78c9:8930/128 2620:15c:26:307:8544:db54:71dd:883c/128 2620:15c:26:307:4e6f:96b1:443b:3db1/128 2620:15c:26:307:a6ea:1064:f149:0596/128 2620:15c:26:307:673c:13c9:c7d5:480e/128 2620:15c:26:307:25ce:46e0:84ef:f5a3/128 2620:15c:26:307:5d66:f8e6:7e3e:5c4f/128 2620:15c:26:307:2b16:d8a2:d548:8604/128 2620:15c:26:307:08ba:a89d:be16:2c95/128 2620:15c:26:307:55da:353d:3f0f:d104/128 2620:15c:26:307:59ec:a2f8:50f3:7234/128 2620:15c:26:307:2e08:e32f:185e:32a0/128 2620:15c:26:307:8bd6:fff3:34e6:ca9c/128 2620:15c:26:307:1702:c6e9:0121:9d6b/128 2620:15c:26:307:7519:fd54:d1e5:93d7/128 2620:15c:26:307:aad4:7cd7:a4e8:7f26/128 2620:15c:26:307:ec78:13cf:df79:b57a/128 2620:15c:26:307:2241:7999:77a4:e66d/128 2620:15c:26:307:49d1:548b:915e:7827/128 2620:15c:26:307:9d83:5530:f666:d934/128 2620:15c:26:307:305c:bc0a:d483:ada8/128 2620:15c:26:307:fd45:87a3:e713:253a/128 2620:15c:26:307:0fe1:481d:19cf:3517/128 2620:15c:26:307:8e97:9d1d:b152:8412/128 2620:15c:26:307:630c:900f:25c7:4e00/128 2620:15c:26:307:c13e:2cac:579f:9d5d/128 2620:15c:26:307:e852:d0b7:ab37:7ebb/128 2620:15c:26:307:e187:5959:a5ba:b907/128 2620:15c:26:307:a016:2755:7bab:0b36/128 2620:15c:26:307:415a:1453:5ab0:69cc/128 2620:15c:26:307:2b1c:f75e:30d7:8bfc/128 2620:15c:26:307:391b:a14a:51d7:e092/128 2620:15c:26:307:881d:9a46:73a1:f468/128 2620:15c:26:307:2d5f:9b8d:ed85:1470/128 2620:15c:26:307:236e:fb7a:0c62:3ea9/128 2620:15c:26:307:db15:c944:8125:b4ee/128 2620:15c:26:307:e607:2db1:ba61:fa31/128 2620:15c:26:307:69e6:026a:279b:6dcc/128 2620:15c:26:307:70b3:eb04:5e19:d8aa/128 2620:15c:26:307:236e:8754:a4fc:1537/128 2620:15c:26:307:cdcb:23ff:fa3f:1a5b/128 2620:15c:26:307:78a7:3b49:b985:7a2a/128 2620:15c:26:307:0cfe:3bb8:c9f7:a342/128 2620:15c:26:307:faca:249d:14b3:8909/128 2620:15c:26:307:2ac7:200e:ee23:84ac/128 2620:15c:26:307:f5d8:9632:cc6e:b220/128 2620:15c:26:307:3fdb:c86b:0b02:de2c/128 2620:15c:26:307:f8de:0b04:5342:e0b6/128 2620:15c:26:307:ba1a:1091:0b02:29b8/128 2620:15c:26:307:4042:a135:16d4:bd63/128 2620:15c:26:307:aa3d:5fd2:3d68:da0a/128 2620:15c:26:307:3df1:fa3c:3dfc:6ca3/128 2620:15c:26:307:5356:eb46:35b7:afe8/128 2620:15c:26:307:92e4:e8db:093f:38fa/128 2620:15c:26:307:8a47:6911:6208:a1cd/128 capirca-2.0.9/def/NETWORK.net000066400000000000000000000062131437377527500155510ustar00rootroot00000000000000# # Sample naming defintions for network objects # RFC1918 = 10.0.0.0/8 # non-public 172.16.0.0/12 # non-public 192.168.0.0/16 # non-public INTERNAL = RFC1918 LOOPBACK = 127.0.0.0/8 # loopback ::1/128 # ipv6 loopback RFC_3330 = 169.254.0.0/16 # special use IPv4 addresses - netdeploy RFC_6598 = 100.64.0.0/10 # Shared Address Space LINKLOCAL = FE80::/10 # IPv6 link-local SITELOCAL = FEC0::/10 # Ipv6 Site-local MULTICAST = 224.0.0.0/4 # IP multicast FF00::/8 # IPv6 multicast CLASS-E = 240.0.0.0/4 RESERVED = 0.0.0.0/8 # reserved RFC1918 LOOPBACK RFC_3330 RFC_6598 MULTICAST CLASS-E 0000::/8 # reserved by IETF 0100::/8 # reserved by IETF 0200::/7 # reserved by IETF 0400::/6 # reserved by IETF 0800::/5 # reserved by IETF 1000::/4 # reserved by IETF 4000::/3 # reserved by IETF 6000::/3 # reserved by IETF 8000::/3 # reserved by IETF A000::/3 # reserved by IETF C000::/3 # reserved by IETF E000::/4 # reserved by IETF F000::/5 # reserved by IETF F800::/6 # reserved by IETF FC00::/7 # unique local unicast FE00::/9 # reserved by IETF LINKLOCAL # link local unicast SITELOCAL # IPv6 site-local ANY = 0.0.0.0/0 ANY_V6 = ::/0 ANY_MIXED = ANY ANY_V6 # http://www.team-cymru.org/Services/Bogons/bogon-bn-agg.txt # 22-Apr-2011 BOGON = 0.0.0.0/8 192.0.0.0/24 192.0.2.0/24 198.18.0.0/15 198.51.100.0/24 203.0.113.0/24 MULTICAST CLASS-E 3FFE::/16 # 6bone 5F00::/8 # 6bone 2001:DB8::/32 # IPv6 documentation prefix GOOGLE_PUBLIC_DNS_ANYCAST = 8.8.4.4/32 # IPv4 Anycast 8.8.8.8/32 # IPv4 Anycast 2001:4860:4860::8844/128 # IPv6 Anycast 2001:4860:4860::8888/128 # IPv6 Anycast GOOGLE_DNS = GOOGLE_PUBLIC_DNS_ANYCAST CLOUDFLARE_PUBLIC_DNS = 2606:4700:4700::1111/128 1.1.1.1 # The following are sample entires intended for us in the included # sample policy file. These should be removed. WEB_SERVERS = 200.1.1.1/32 # Example web server 1 200.1.1.2/32 # Example web server 2 MAIL_SERVERS = 200.1.1.4/32 # Example mail server 1 200.1.1.5/32 # Example mail server 2 PUBLIC_NAT = 200.1.1.3/32 # Example company NAT address NTP_SERVERS = 10.0.0.1/32 # Example NTP server 10.0.0.2/32 # Example NTP server TACACS_SERVERS = 10.1.0.1/32 # Example tacacs server 10.1.0.2/32 # Example tacacs server PUBLIC_IPV6_SERVERS = 2606:700:e:550:b01a::b00a # Example public web server WEB_IPV6_SERVERS = 2620:15c:2c4:202:b0e7:158f:6a7a:3188/128 # Example web server capirca-2.0.9/def/SERVICES.svc000066400000000000000000000022231437377527500156450ustar00rootroot00000000000000# # Sample naming service definitions # WHOIS = 43/udp SSH = 22/tcp TELNET = 23/tcp SMTP = 25/tcp MAIL_SERVICES = SMTP ESMTP SMTP_SSL POP_SSL TIME = 37/tcp 37/udp TACACS = 49/tcp DNS = 53/tcp 53/udp BOOTPS = 67/udp # BOOTP server BOOTPC = 68/udp # BOOTP client DHCP = BOOTPS BOOTPC TFTP = 69/tcp 69/udp HTTP = 80/tcp WEB_SERVICES = HTTP HTTPS POP3 = 110/tcp RPC = 111/udp IDENT = 113/tcp 113/udp NNTP = 119/tcp NTP = 123/tcp 123/udp MS_RPC_EPMAP = 135/udp 135/tcp MS_137 = 137/udp MS_138 = 138/udp MS_139 = 139/tcp IMAP = 143/tcp SNMP = 161/udp SNMP_TRAP = 162/udp BGP = 179/tcp IMAP3 = 220/tcp LDAP = 389/tcp LDAP_SERVICE = LDAP LDAPS HTTPS = 443/tcp MS_445 = 445/tcp SMTP_SSL = 465/tcp IKE = 500/udp SYSLOG = 514/udp RTSP = 554/tcp ESMTP = 587/tcp LDAPS = 636/tcp IMAPS = 993/tcp POP_SSL = 995/tcp HIGH_PORTS = 1024-65535/tcp 1024-65535/udp MSSQL = 1433/tcp MSSQL_MONITOR = 1434/tcp RADIUS = 1812/tcp 1812/udp HSRP = 1985/udp NFSD = 2049/tcp 2049/udp NETFLOW = 2056/udp SQUID_PROXY = 3128/tcp MYSQL = 3306/tcp RDP = 3389/tcp IPSEC = 4500/udp POSTGRESQL = 5432/tcp TRACEROUTE = 33434-33534/udp capirca-2.0.9/dev-install000077500000000000000000000001571437377527500153230ustar00rootroot00000000000000#!/bin/bash pip install -e . pip install pre-commit # Install the pre-commit hooks as well pre-commit install capirca-2.0.9/doc/000077500000000000000000000000001437377527500137155ustar00rootroot00000000000000capirca-2.0.9/doc/generator_patterns.md000066400000000000000000000470521437377527500201550ustar00rootroot00000000000000# Common Patterns For Generators ## Objective The purpose of this document is to describe common patterns for new Capirca Generators. ## Security based requirements: ### Inet_version ‘Mixed’ Platform Support #### When the platform does not support “mixed” in a single access-list ##### Problem When the inet_version is set to ‘mixed’ it implies that the resultant policy should contain addresses from both families. Some platforms do not support both address families to exist in the same filter therefore leading to Capirca generators needing to handle the output differently for those platforms. Cisco is an example platform that does not support a mixed filter being generated, and therefore it requires two separate *access-list* filters. Platforms that support mixed family filters will simply generate filters that contain both address families. Some platforms (such as GCE) support "mixed" in a single access-list, but do not support "mixed" addresses in the same rule. ##### Desired Approach **The desired approach will be to have Capirca output two filters, one for each address family, for platforms that do not support ‘mixed’.** This currently occurs already with [cisco.py](https://github.com/google/capirca/blob/master/capirca/lib/cisco.py) which outputs two access-lists one that contains IPv4 and another that contains IPv6 addresses. This solves a problem of having to potentially maintain two different .pol so that in cases where vendor syntax of filter name is derived from .pol a syncing between v4 and v6 .pol do not need to be maintained. This may be misleading at first because when using Capirca the user expects that the output will be a single policy, but it is actually their lack of understanding about the vendor syntax that causes this belief. If the user does not want this output, then the user can simply issue two headers to Capirca one for IPv4 and one for IPv6. #### When the platform supports “mixed” in a single access-list This will require the policy to be generated correctly for the [following permutations](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L236-L322) of address family, and when “mixed” is supported, and with valid tests: 1. [MIXED_TO_V4](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L236) 1. [V4_TO_MIXED](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L245) 1. [MIXED_TO_V6](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L254) 1. [V6_TO_MIXED](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L262) 1. [MIXED_TO_MIXED](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L270) 1. [MIXED_TO_ANY](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L278) 1. [ANY_TO_MIXED](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L285) 1. [V4_TO_V4](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L92) 1. [V6_TO_V6](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L300) 1. [V4_TO_V6](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L308) 1. [V6_TO_V4](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/tests/lib/nsxv_test.py#L316) The junipersrx, junipermsmpc, paloalto and cloudarmor generators support this. #### When the platform supports “mixed” in a single access-list, but not "mixed" addresses in a single rule This will require a "mixed" policy to be generated by rendering a single rule for both "inet" and "inet6", to end up with 2 resultant rules, one with IPv4 addresses and one with IPv6 respectively. This processing should consider the following: 1. Logically similar IPv4 and IPv6 rules should be close to each other to be easy to read and reason about in the generated policy. So if the IPv4 and IPv6 rules originate from a single "mixed" rule, they should be close to each other. It is thus preferable to process each rule, once for "inet" and then "inet6", over processing all rules in the policy for "inet", and then all the rules for "inet6". This also helps adhere to [requirements regarding rule priority](#apply-priority-as-described-in-pol-files). 1. Special handling of "mixed" such as not re-using the code for "inet" and "inet6" processing may result in inconsistent handling of rules when dealing with "mixed" versus "inet6". As an example, the expected outcome is that a rule containing IPv6 addresses only, when processed under "mixed" or "inet6", should result in the same generated rule. 1. Rule names for such processing should preserve semantic meaning, but also differentiate between the IPv4 and IPv6 variants of the rule. Suffixes for the IPv6 rules (such as "-ipv6") are preferred to preserve sorting order of rules. An example for such handling is the [GCE generator](https://github.com/google/capirca/blob/72cfb69148e552e22b000098856169b22a4db5ef/capirca/lib/gce.py#L517). ### Processing rules with no explicit IP addresses There are several reasons why a source_address or destination_address might be empty while processing it in a generator. It is important to understand which case is being encountered, in order to correctly translate policy intent. This list may not be exhaustive. 1. When a source_address or destination_address is not explicitly set in a .pol file. 1. This implies allowing all addresses of the address family (inet_version) the policy is being rendered for. 1. source_address/destination_address is empty because it got filtered out due to the inet_version, since it has no addresses left of that specific address family. 1. This should not render the term for that specific inet_version. Logging is preferred. 1. source_address/destination_address is empty because it got removed due to source_address_exclude/destination_address_exclude, and has no addresses left. 1. Log, and do not render the term. 1. If it is a deny term, the log level should be a warning since the deny term is not being rendered. ### noverbose option is supported correctly noverberse must be supported if it makes sense for the platform. Noverbose removes all comments from the ACE terms, policies, etc. For example see the logic implemented in [juniper.py](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/juniper.py#L219). ### Sample.pol file is present A sample pol file should exist for that generator. Some examples are [here](https://github.com/google/capirca/tree/master/policies/pol). The sample.pol file for the new generator should have examples for all the filter option types used, with all supported IP types, along with any custom fields for that generator. ### Perform truncating of names for term name or comment based on max length This is to ensure that truncation of terms and comments that exceed a max_width, is supported by the generator. Not all platforms have length limitations, if the generator’s platform has none, then this requirement can be skipped. This applies only when the term name and comments are being incorporated into the policy. If they are actual # comments (which are not applied to the policy), then this requirement does not apply. This could be done using the existing [WrapWords()](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/aclgenerator.py#L549) or it may be done by a truncate function within the generator using a custom function such as in [juniper.py](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/juniper.py#L715). This wrapping should also be present for the term name, and should be using Capirca’s [FixTermLength()](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/aclgenerator.py#L463). This includes counter name limits as well. ### Logging is supported correctly for different types of logging There are different values of logging already created in [policy.py](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/policy.py#L49). Not all are supported by every platform. * LOG_BOTH is for logging session-init as well as session-close, which is currently supported by JuniperSRX. * DISABLE is a negative logging action. * Every other option are considered positive actions. General rules: * The generator should support all the types of logging it can. * For all unsupported positive logging actions, the generator should enable logging. * For DISABLE, if the platform can turn off logging, it must; if the platform does not support it, then it does nothing. Key part is DISABLE must not enable logging. ### DSMO Support DSMO is Discontinuous Subnet Masks and is used to save on TCAM space. This is supported by certain platforms such as Cisco, but is not supported by most platforms. If DSMO is not supported by a platform, this requirement can be safely ignored. If DSMO is supported by a platform it must be fully implemented and carefully unit tested. ### The usage of good and meta Unified Direction names Good unified direction names for the ACE terms, that are meta and not specific to that platform, are preferred. This is only for platforms that require direction. The meta directions [supported by Capirca](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/packetfilter.py#L77) are “in”, “out” and “”, but these should not be used as is. Different platforms use “ingress”, “egress” or “both” such as GCE. Do not rely on platform specific names for directions. “ingress” and “egress” are the preferred directions to be used, which can then be converted to the platform’s required specific names for directions. ### Term Expirations are handled correctly Term expirations need to be handled correctly in code. An [example from Juniper](https://github.com/google/capirca/blob/master/capirca/lib/juniper.py#L968-L974) is that when the term is close to [expiration](https://github.com/google/capirca/blob/c0ca9d9a3a34d3dab0b41510571448f5d82c033d/capirca/utils/config.py#L17), an INFO message is logged; and when it is expired, a WARNING message is logged and the term is not generated. This is also done similarly across other platforms such as Cisco/GCE. ### ICMP and ICMPv6 handling The generator needs to handle ICMP and ICMPv6 correctly. This is a broad requirement, but ICMP and ICMPv6 requires careful handling to **avoid rendering icmp terms under inet6, and icmpv6 under inet**. One commit that implements this for gcp_hf is [here](https://github.com/google/capirca/commit/b4af15a36b70593b7bbf043559405558e82c81bc). Some generators do not support icmp and icmp6 when the address family is mixed, such as [nftables.py](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/nftables.py#L103-L111). The expected correct behavior is that when “mixed” is specified in the inet_version, the rule for ICMP should only contain IPv4 addresses, and the rule for ICMPv6 should contain only IPv6 addresses. Tests must ensure that types and codes used are valid for the given address family. In the future, we hope to refactor the code to allow for general ICMP support, but for now this functionality is implemented per-platform in each generator. Note: In a related requirement, IGMP does not apply to IPv6, and thus rules containing IGMP should not be generated with IPv6 addresses. ### Makes an explicit determination about statefulness The generator author should check for “Am I stateful?”. It should clearly state in generator the result of this as a comment somewhere If it is, it should make sure that it is doing the right thing for terms. For example, for Juniper SRX, it is [possible to skip TCP-established](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/junipersrx.py#L450-L453) because it is stateful. You would also want to do the stateful check probably early in the code rather than later, since this may impact efficiency by being able to skip further code/ checks. A pro of checking early would be being able to skip any processing of terms not necessary for a stateful firewall such as skipping TCP-established. In contrast, in iptables.py, the check is made later, while formatting the terms to modify the term to [allow established and related terms](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/iptables.py#L474-L485). ### Syntax of the config from the generator and on-device should match when cryptographically verified The ACL that is generated from the generator, and the ACL that is obtained from the device when a show configuration command is used, should match bit-by-bit, such that it should be possible to run a hashing function (such as SHA-1) and obtain the same hash for both the ACL configurations. Another variant of this requirement is that the `diff` between these two policies should be empty. There can be certain exceptions, such as if there is a policy header comment that cannot be handled by Cisco devices and is thus not present in the Cisco ACL. This can be handled by checking the diff between them and skipping over the known mismatches that are acceptable because of the device’s incapability. Another example of an exception is the Juniper [control sequence such as ‘replace’,](https://github.com/google/capirca/blob/b3e605a54f12efa1e6b0b1cfd179ee6078313c9d/capirca/lib/juniper.py#L993) which indicates the device to replace, rather than merge the contents of the ACL, which does not show up on the device ACL. If there is a mismatch in the syntax of the 2 ACLs that cannot be fixed, then these mismatches and the technical reasoning behind the lack of a workaround or a fix should be listed in the associated Github issue for this generator. ### Apply priority as described in .pol files If a priority or order exists for the platform, and an input pol file doesn't set priority for ACEs, then autogenerate priorities in top down order based on the ACE order in the .pol file. ### Protocol support #### Call out support Make explicit which protocols the platform supports, and support them in the generator. If protocols are not supported by the platform, ensure that the generator explicitly does not support them, and gracefully handles these errors. #### Names vs numbers Names or numbers can be used to represent protocols within a generator. Throughout a given generators use only names or numbers, not a mix of both. The choice should be made based on the default representation for the device platform. (I.e. if the policy once applied to the device will show names in a "show config" command output, then use names within the generator. If the output contains numbers, use numbers within the generator.) #### Port support The following is a list of which IP protocols support ports. When supporting a protocol, make sure that the handling of port or lack thereof is correct. * HOPOPT = No * ICMP = No * IGMP = No * GGP = No * IPIP = No * TCP = Yes * EGP = No * IGP = No * UDP = *Yes* * RDP = *Yes (Uses different port ranges though, check RFC)* * IPV6 = No * IPV6_ROUTE = No * FRAGMENT = No * RSVP = No * GRE = No * ESP = No * AH = No * ICMPV6 = No * IPV6_NONXT = No * IPV6_OPTS = No * OSPF = No * PIM = No * VRRP = No * L2TP = No. (only uses UDP 1701) * SCTP = *Yes* * UDPLITE = *Yes* Note: DCCP also uses ports, but this is not currently supported. Source: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml ### Zone based firewall support Zone based firewalls should be implemented if supported by the platform. When implementing zone based firewalls, all combinations of zone types must be tested fully. Test for invalid and reserved zone names and illegal combinations of policies (i.e. any-> specific, or any->any). ### Address book support Address books should be implemented if they are present in a platform. The generator should explicitly state whether it is implementing a global or zone based address book. If both are available for the platform, both must be implemented, and an option must be added to choose between the two. When implementing address books, always filter for address family before building the book. Tests should ensure that address books are filtered by address family properly along with their relevant rules. ## Coding Style Requirements: ### Use builtin libraries Use only Python standard builtin libraries wherever possible. External dependencies are discouraged and must be justified. ### Structure generators for inheritance See Cisco and Juniper generators for examples. Wherever possible, use base classes, inheritance, etc to allow for common functions between generators in a "family". ### Reuse common functions Use functions from aclgenerator.py, policy.py, nacaddr.py, etc wherever possible instead of implementing your own. ### If output will be in a common exchange format, use a standard library for rendering This applies to common standards such as JSON, XML, YAML, protocol buffers, etc. Instead of building up such serialized output using string ops, use a standard library to produce the rendered output instead. (For example, to render JSON, use [json.dumps](https://docs.python.org/3/library/json.html#json.dumps). For XML, use some combination of the [standard libraries](https://docs.python.org/3/library/xml.html), such as xml.etree and xml.dom.) This should allow most generator code to interact with objects only, and serialize to a buffer at the end. Unit tests should operate on the object structures. Additional small unit tests should sanity check that the rendering library is producing valid output as expected. If a (JSON, XML, etc.) schema is available this should also be used to validate output in a test. ### Check various limits when rendering output Make sure that line, identifier, full output, etc. limits are applied when rendering final output. Some of these may be specific to a given platform. These should always include but are not limited to: * Maximum value of addresses and ports allowed in single rule. Generator must support automatically splitting into new rule when exceeded. * Maximum values allowed across entire policy for rule count, address, ports * Maximum length for comments, and support splitting across lines the correct way when over. Also check for max per-rule limit if one exists and truncate using the common Capirca functions if needed. * Max term length supported must be 24 or greater, in order to allow for meaningful term names. ### Test coverage #### General coverage Aim for as close to 100% test coverage as you can. Tests should cover a wide span of the vendor syntax, not just a single keyword. #### Custom exceptions All custom exceptions types added must be unit tested. ### Test Methods #### Running end-to-end Capirca tests Create a test .pol file. Build and run the ACL generator binary with the desired base and output directory. The following command simply outputs to the current directory. ```shell $ ./capirca/aclgen --base_directory ./ --output_directory ./ --recursive --optimize --definitions_directory capirca/def --logtostderr --policy_file path/to/test.pol ``` capirca-2.0.9/doc/generators/000077500000000000000000000000001437377527500160665ustar00rootroot00000000000000capirca-2.0.9/doc/generators/arista.md000066400000000000000000000055241437377527500177010ustar00rootroot00000000000000# Arista The arista header designation has the following format: ``` target:: arista [filter name] {standard|extended|object-group|inet6} ``` * _filter name_: defines the name of the arista filter. * _standard_: specifies that the output should be a standard access list * _extended_: specifies that the output should be an extended access list * _object-group_: specifies this is a arista extended access list, and that object-groups should be used for ports and addresses. * _inet6_: specifies the output be for IPv6 only filters. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _dscp_match::_ Match a DSCP number. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _is-fragment::_ Matches on if a packet is a fragment. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/arista_tp.md000066400000000000000000000067031437377527500204040ustar00rootroot00000000000000# Arista Traffic-Policy Use Notes ## supported tokens The following tokens are supported: - `action` - `comment` - `counter` - `destination-address` - `destination-exclude` - `destination-port` - `destination-prefix` - this should resolve to a configured field-set in traffic-policy format. - `fragment-offset` - `icmp-type` - `logging` - `option` - `established` - `tcp-established` - `initial` - `rst` - `first-fragment` - this will be rendered as a `fragment` match.` - `packet-length` - `source-address` - `source-exclude` - `source-port` - `source-prefix` - this should resolve to a configured field-set in traffic-policy format. - `verbatim` ## documentation The official documentation for traffic-policies can be found at the following URL. - ## filter types Traffic-policies are dual-address-family by default (i.e.: mixed). A term may be either of type ipv4 or ipv6. If the filter type is defined as mixed (the default), then match/action statements for each address family will be generated. If the operator wishes to create an ipv4 or ipv6 only filter, the inet and inet6 tokens within the header will be honored and only addresses from the respective address family will be rendered. However, EOS will still, by default, create an 'ipvX-default-all' term for the alternate address family. (see below) ## action The fully supported actions are: `accept`, and `deny`. Use of `reject`, or `reject-with-tcp-rst` will result in the generation of deny actions in the rendered traffic policy. Note, within traffic-policies not configuring an explicit `deny` action (or `reject` variant) will result in an implicit allow for a term. ### counters - If counters are specified in a term, a traffic-policy named-counter stanza will be generated in the rendered output. - Counter names should not contain a (`.`). If a (`.`) is embedded in a counter name it will be replaced w/a dash (`-`). ### (source|destination)-address-exclude Currently, (as of Jan-2021), EOS does not support the use of 'except' inline within match statements. If an exclude/except token is used, a traffic-policy field-set will be generated and referenced in the match-term output. This field-set will be named `-` where direction is either **src** or **dst** depending on the direction of the token in use. If the filter type is mixed, both address-families will have the respective field-sets generated. The field-set for the ipv4 address family will have the field-set generated with no prefix, while the ipv6 field-set will have `ipv6` inserted into the field-set name after the direction and before the name. (form: `src|dst-ipv6-term_name`) ## ports In EOS traffic-policies, ports can be configured using: - `source [ all | port-list | field-set ]` - `destination [ all | port-list | field-set ]` Currently, all and field-sets are not supported for ports. Only port-lists are supported. ## default-terms EOS has (2) default terms per traffic-policy, one for each address family: - `ipv4-default-all` - `ipv6-default-all` If there is no match criteria associated with a term _and_ the term name in the policy begins with `default-`, the contents will be rendered into the default terms for the appropriate address family. ## empty match criteria if there is no match criteria specified, and the term name does _not_ start with `default-` the term will not be rendered and a warning will be logged. capirca-2.0.9/doc/generators/aruba.md000066400000000000000000000026721437377527500175110ustar00rootroot00000000000000# Aruba The aruba header designation has the following format: ``` target:: aruba [filter name] {ipv6} ``` * _filter name_: defines the name of the arista filter. * _ipv6_: specifies the output be for IPv6 only filters. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ ### Option * _destination-is-user::_ Aruba option to specify that the destination should be a user. * _negate::_ Used with DSM summarizer, negates the DSM. * _source-is-user::_ Aruba option to specify that the source should be a user. capirca-2.0.9/doc/generators/brocade.md000066400000000000000000000045211437377527500200110ustar00rootroot00000000000000# Brocade See Cisco ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _dscp_match::_ Match a DSCP number. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _is-fragment::_ Matches on if a packet is a fragment. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/cisco.md000066400000000000000000000067031437377527500175160ustar00rootroot00000000000000# Cisco The cisco header designation has the following format: ``` target:: cisco [filter name] {extended|standard|object-group|inet6|mixed} {dsmo} ``` * _filter name_: defines the name or number of the cisco filter. * _extended_: specifies that the output should be an extended access list, and the filter name should be non-numeric. This is the default option. * _standard_: specifies that the output should be a standard access list, and the filter name should be numeric and in the range of 1-99. * _object-group_: specifies this is a cisco extended access list, and that object-groups should be used for ports and addresses. * _inet6_: specifies the output be for IPv6 only filters. * _mixed_: specifies output will include both IPv6 and IPv4 filters. * _dsmo_: Enable discontinuous subnet mask summarization. When _inet4_ or _inet6_ is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is _inet4_, and is implied if not other argument is given. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _restrict-address-family::_ Only include the term in the matching address family filter (eg. for mixed filters). * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _dscp_match::_ Match a DSCP number. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _is-fragment::_ Matches on if a packet is a fragment. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/ciscoasa.md000066400000000000000000000042001437377527500201710ustar00rootroot00000000000000# CiscoASA The ciscoasa header designation has the following format: ``` target:: ciscoasa [filter name] ``` ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. capirca-2.0.9/doc/generators/cisconx.md000066400000000000000000000063061437377527500200630ustar00rootroot00000000000000# CiscoNX The cisconx header designation has the following format: ``` target:: cisconx [filter name] {extended|object-group|inet6|mixed} {dsmo} ``` * _filter name_: defines the name or number of the cisconx filter. * _extended_: specifies that the output should be an extended access list, and the filter name should be non-numeric. This is the default option. * _object-group_: specifies this is a cisconx extended access list, and that object-groups should be used for ports and addresses. * _inet6_: specifies the output be for IPv6 only filters. * _mixed_: specifies output will include both IPv6 and IPv4 filters. * _dsmo_: Enable discontinuous subnet mask summarization. When _inet4_ or _inet6_ is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is _inet4_, and is implied if not other argument is given. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _dscp_match::_ Match a DSCP number. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. * _platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _is-fragment::_ Matches on if a packet is a fragment. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/ciscoxr.md000066400000000000000000000045211437377527500200640ustar00rootroot00000000000000# CiscoXR See Cisco ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _dscp_match::_ Match a DSCP number. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _is-fragment::_ Matches on if a packet is a fragment. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/gce.md000066400000000000000000000032331437377527500171470ustar00rootroot00000000000000# GCE The GCE header designation has the following format: ``` target:: gce [filter name] [direction] ``` * _filter name_: defines the name of the gce filter. * _direction_: defines the direction, valid inputs are INGRESS and EGRESS (default:INGRESS) ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination_tag::_ Tag name to be used for destination filtering. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _priority_ Relative priority of rules when evaluated on the platform. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-service-accounts::_ A service account that the term applies to. * _source-tag::_ Tag name used for source filtering. * _target-service-accounts::_ A service account that may make network connections. ## Sub Tokens ### Actions * _accept_ * _deny_ capirca-2.0.9/doc/generators/gce_vpc_tf.md000066400000000000000000000036431437377527500205150ustar00rootroot00000000000000# Terraform GCE The Terraform GCE header designation has the following format: ``` target:: gce_vpc_tf [filter name] [network name] [direction] [max policy cost] ``` * _filter name_: defines the name of the gce_vpc_tf filter. * _network name_: defines the name of the network the filter applies to. * _direction_: defines the direction, valid inputs are INGRESS and EGRESS (default:INGRESS) * _max policy cost_: maximum policy cost as an integer. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination_tag::_ Tag name to be used for destination filtering. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _priority_ Relative priority of rules when evaluated on the platform. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-service-accounts::_ A service account that the term applies to. * _source-tag::_ Tag name used for source filtering. * _target-service-accounts::_ A service account that the term applies to. For ingress rules it is the destination, for egress rules it is the source. ## Sub Tokens ### Actions * _accept_ * _deny_ capirca-2.0.9/doc/generators/ipset.md000066400000000000000000000073241437377527500175420ustar00rootroot00000000000000# Ipset Ipset is a system inside the Linux kernel, which can very efficiently store and match IPv4 and IPv6 addresses. This can be used to dramatically increase performance of iptables firewall. The Ipset header designation follows the Iptables format above, but uses the target platform of 'ipset': ``` target:: ipset [INPUT|OUTPUT|FORWARD|custom] {ACCEPT|DROP} {truncatenames} {nostate} {inet|inet6} ``` ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-interface::_ Specify specific interface a term should apply to (e.g. destination-interface:: eth3) * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _fragement-offset::_ specify a fragment offset of a fragmented packet * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-interface::_ specify specific interface a term should apply to (e.g. source-interface:: eth3). * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _ack::_ Match on ACK flag being present. * _all::_ Matches all protocols. * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _fin::_ Match on FIN flag being present. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _initial::_ Only matches on initial packet. * _is-fragment::_ Matches on if a packet is a fragment. * _none::_ Matches none. * _psh::_ Match on PSH flag being present. * _rst::_ Match on RST flag being present. * _sample::_ Samples traffic for netflow. * _syn::_ Match on SYN flag being present. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. * _urg::_ Match on URG flag being present. capirca-2.0.9/doc/generators/iptables.md000066400000000000000000000133101437377527500202110ustar00rootroot00000000000000# iptables NOTE: Iptables produces output that must be passed, line by line, to the 'iptables/ip6tables' command line. For 'iptables-restore' compatible output, please use the Speedway generator. The Iptables header designation has the following format: ``` target:: iptables [INPUT|OUTPUT|FORWARD|custom] {ACCEPT|DROP} {truncatenames} {nostate} {inet|inet6} INPUT: apply the terms to the input filter. OUTPUT: apply the terms to the output filter. FORWARD: apply the terms to the forwarding filter. custom: create the terms under a custom filter name, which must then be linked/jumped to from one of the default filters (e.g. iptables -A input -j custom) ACCEPT: specifies that the default policy on the filter should be 'accept'. DROP: specifies that the default policy on the filter should be to 'drop'. inet: specifies that the resulting filter should only render IPv4 addresses. inet6: specifies that the resulting filter should only render IPv6 addresses. truncatenames: specifies to abbreviate term names if necessary (see lib/iptables.py:CheckTerMLength for abbreviation table) nostate: specifies to produce 'stateless' filter output (e.g. no connection tracking) ``` ## Iptables NOTE: Iptables produces output that must be passed, line by line, to the 'iptables/ip6tables' command line. For 'iptables-restore' compatible output, please use the [Speedway](PolicyFormat#Speedway.md) generator. The Iptables header designation has the following format: ``` target:: iptables [INPUT|OUTPUT|FORWARD|custom] {ACCEPT|DROP} {truncatenames} {nostate} {inet|inet6} ``` * _INPUT_: apply the terms to the input filter. * _OUTPUT_: apply the terms to the output filter. * _FORWARD_: apply the terms to the forwarding filter. * _custom_: create the terms under a custom filter name, which must then be linked/jumped to from one of the default filters (e.g. iptables -A input -j custom) * _ACCEPT_: specifies that the default policy on the filter should be 'accept'. * _DROP_: specifies that the default policy on the filter should be to 'drop'. * _inet_: specifies that the resulting filter should only render IPv4 addresses. * _inet6_: specifies that the resulting filter should only render IPv6 addresses. * _truncatenames_: specifies to abbreviate term names if necessary (see lib/iptables.py:_CheckTerMLength for abbreviation table) *_nostate_: specifies to produce 'stateless' filter output (e.g. no connection tracking)_ ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-interface::_ Specify specific interface a term should apply to (e.g. destination-interface:: eth3) * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _fragement-offset::_ specify a fragment offset of a fragmented packet * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-interface::_ specify specific interface a term should apply to (e.g. source-interface:: eth3). * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _ack::_ Match on ACK flag being present. * _all::_ Matches all protocols. * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _fin::_ Match on FIN flag being present. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _initial::_ Only matches on initial packet. * _is-fragment::_ Matches on if a packet is a fragment. * _none::_ Matches none. * _psh::_ Match on PSH flag being present. * _rst::_ Match on RST flag being present. * _sample::_ Samples traffic for netflow. * _syn::_ Match on SYN flag being present. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. * _urg::_ Match on URG flag being present. capirca-2.0.9/doc/generators/juniper.md000066400000000000000000000134701437377527500200710ustar00rootroot00000000000000# Juniper The juniper header designation has the following format: ``` target:: juniper [filter name] {inet|inet6|bridge} filter name: defines the name of the juniper filter. inet: specifies the output should be for IPv4 only filters. This is the default format. inet6: specifies the output be for IPv6 only filters. bridge: specifies the output should render a Juniper bridge filter. ``` When inet4 or inet6 is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is `inet4`, and is implied if not other argument is given. ## Juniper The juniper header designation has the following format: ``` target:: juniper [filter name] {inet|inet6|bridge} {dsmo} {not-interface-specific} ``` * _filter name_: defines the name of the juniper filter. * _inet_: specifies the output should be for IPv4 only filters. This is the default format. * _inet6_: specifies the output be for IPv6 only filters. * _bridge_: specifies the output should render a Juniper bridge filter. * _dsmo_: Enable discontinuous subnet mask summarization. * _not-interface-specific_: Toggles "interface-specific" inside of a term. * _direction_: The direction of the filter on an interface (optional). Use when a term needs this signal. * _interface_: The type of interface on which the filter will be applied (optional). Use when a term needs this signal. When _inet4_ or _inet6_ is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is _inet4_, and is implied if not other argument is given. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _restrict-address-family::_ Only include the term in the matching address family filter (eg. for mixed filters). * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _destination-prefix_except::_ Specify destination-prefix exception(TODO:cmas Fill in more). * _dscp_except::_ Do not match the DSCP number. * _dscp_match::_ Match a DSCP number. * _dscp_set::_ Match a DSCP set. * _ether_type::_ Match EtherType field. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _filter-term::_ Include another filter * _flexible-match-range Filter based on flexible match options. * _forwarding-class::_ Specify the forwarding class to match. * _forwarding-class_except::_ Do not match the specified forwarding classes. * _fragement-offset::_ specify a fragment offset of a fragmented packet * _hop-limit::_ Match the hop limit to the specified hop limit or set of hop limits. * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _loss-priority::_ Specify loss priority. * _name::_ Name of the term. * _next-ip::_ Used in filter based forwarding. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _policer::_ specify which policer to apply to matching packets. * _port::_ Matches on source or destination ports. Takes a service token. * _port-mirror::_ Sends copies of the packets to a remote port, boolean value is used to render this config. * _precedence::_ specify precedence of range 0-7. May be a single integer, or a space separated list. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _protocol\_except::_ allow all protocol "except" specified. * _qos::_ apply quality of service classification to matching packets (e.g. qos:: af4) * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _source-prefix-except::_ specify destination-prefix exception(TODO:cmas Fill in more). * _traffic-class-count::_ * _traffic-type::_ specify traffic-type * _ttl::_ Matches on TTL. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _.*::_ wat * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _sample::_ Samples traffic for netflow. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/juniperevo.md000066400000000000000000000151101437377527500205740ustar00rootroot00000000000000# Juniper EVO The Juniper EVO header designation has the following format: ``` target:: juniperevo [filter name] {inet|inet6|bridge} filter name: defines the name of the Juniper EVO filter. inet: specifies the output should be for IPv4 only filters. This is the default format. inet6: specifies the output be for IPv6 only filters. bridge: specifies the output should render a Juniper EVO bridge filter. ``` When inet4 or inet6 is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is `inet4`, and is implied if not other argument is given. ## Juniper EVO The Juniper EVO header designation has the following format: ``` target:: juniperevo [filter name] {inet|inet6|bridge} {dsmo} {not-interface-specific} {direction} {interface} ``` * _filter name_: defines the name of the Juniper EVO filter. * _inet_: specifies the output should be for IPv4 only filters. This is the default format. * _inet6_: specifies the output be for IPv6 only filters. * _bridge_: specifies the output should render a Juniper EVO bridge filter. * _dsmo_: Enable discontinuous subnet mask summarization. * _direction_: The direction of the filter on an interface. Must be specified. * _interface_: The type of interface on which the filter will be applied. Default in physical (non-loopback) interface. When _inet4_ or _inet6_ is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. The default format is _inet4_, and is implied if not other argument is given. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _restrict-address-family::_ Only include the term in the matching address family filter (eg. for mixed filters). * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _destination-prefix_except::_ Specify destination-prefix exception(TODO:cmas Fill in more). * _dscp_except::_ Do not match the DSCP number. * _dscp_match::_ Match a DSCP number. * _dscp_set::_ Match a DSCP set. * _ether_type::_ Match EtherType field. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _filter-term::_ Include another filter * _flexible-match-range Filter based on flexible match options. * _forwarding-class::_ Specify the forwarding class to match. * _forwarding-class_except::_ Do not match the specified forwarding classes. * _fragement-offset::_ specify a fragment offset of a fragmented packet * _hop-limit::_ Match the hop limit to the specified hop limit or set of hop limits. * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _loss-priority::_ Specify loss priority. * _name::_ Name of the term. * _next-ip::_ Used in filter based forwarding. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _policer::_ specify which policer to apply to matching packets. * _port::_ Matches on source or destination ports. Takes a service token. * _port-mirror::_ Sends copies of the packets to a remote port, boolean value is used to render this config. * _precedence::_ specify precedence of range 0-7. May be a single integer, or a space separated list. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _protocol\_except::_ allow all protocol "except" specified. * _qos::_ apply quality of service classification to matching packets (e.g. qos:: af4) * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _source-prefix-except::_ specify destination-prefix exception(TODO:cmas Fill in more). * _traffic-class-count::_ * _traffic-type::_ specify traffic-type * _ttl::_ Matches on TTL. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _.*::_ wat * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _sample::_ Samples traffic for netflow. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. ## IPv6 Protocol Match For Juniper EVO, the direction of the filter on an interface and the interface type determines the syntax to use; either `next-header` or `payload-protocol`. The syntax usage is sumarized below for the extension headers as well as the payload header. * _Ingress (Physical)::_ `next-header hop-by-hop` | `next-header fragment` | `next-header routing` | `payload-protocol tcp|udp|ah|esp|icmpv6` * _Ingress (Loopback)::_ `payload-protocol 0` | `payload-protocol 44` | `payload-protocol 43` | `payload-protocol tcp|udp|ah|esp|icmpv6` * _Egress (Physical)::_ `payload-protocol 0` | `payload-protocol 44` | `payload-protocol 43` | `payload-protocol tcp|udp|ah|esp|icmpv6` * _Egress (Loopback)::_ `payload-protocol 0` | `payload-protocol 44` | `payload-protocol 43` | `payload-protocol tcp|udp|ah|esp|icmpv6` capirca-2.0.9/doc/generators/junipermsmpc.md000066400000000000000000000014271437377527500211300ustar00rootroot00000000000000# Juniper MSMPC The juniper header designation has the following format: ``` target:: juniper [filter name] {inet|inet6|mixed} {noverbose} {ingress|egress} filter name: defines the name of the juniper msmpc filter. inet6: specifies the output be for IPv6 only filters. mixed: specifies the output be for IPv4 and IPv6 filters. This is the default format. noverbose: omit additional term and address comments. ingress: filter will be applied in the input direction. egress: filter will be appliced in the output direction. ``` When inet4 or inet6 is specified, naming tokens with both IPv4 and IPv6 filters will be rendered using only the specified addresses. When neither ingress or egress is specified, the filter will be applied in both (input-output) directions. This is the default. capirca-2.0.9/doc/generators/junipersrx.md000066400000000000000000000060361437377527500206260ustar00rootroot00000000000000 ## JuniperSRX Note: The Juniper SRX generator is currently in beta testing. ``` target:: srx from-zone [zone name] to-zone [zone name] {inet} ``` * _from-zone_: static keyword, followed by user specified zone * _to-zone_: static keyword, followed by user specified zone * _inet_: Address family (only IPv4 tested at this time) ### Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination-zone::_ one or more destination zones tokens. Only supported by global policy * _dscp_except::_ Do not match the DSCP number. * _dscp_match::_ Match a DSCP number. * _dscp_set::_ Match a DSCP set. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that these packets should be logged. * Based on the input value the resulting logging actions will follow this logic: * _action_ is 'accept': * _logging_ is 'true': resulting SRX output will be 'log { session-close; }' * _logging_ is 'log-both': resulting SRX output will be 'log { session-init; session-close; }' * _action_ is 'deny': * _logging_ is 'true': resulting SRX output will be 'log { session-init; }' * _logging_ is 'log-both': resulting SRX output will be 'log { session-init; session-close; }' * See [here](https://kb.juniper.net/InfoCenter/index?page=content&id=KB16506) for explanation. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-zone::_ one or more source zones tokens. Only supported by global policy * _timeout::_ specify application timeout. (default 60) * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. * _vpn::_ Encapsulate outgoing IP packets and decapsulate incomfing IP packets. ### Sub Tokens #### Actions * _accept_ * _count_ * _deny_ * _dscp_ * _log_ * _reject_ capirca-2.0.9/doc/generators/k8s.md000066400000000000000000000022341437377527500171160ustar00rootroot00000000000000# K8s The K8s header designation has the following format: ``` target:: k8s [direction] ``` * _direction_: defines the direction, valid inputs are INGRESS and EGRESS (default:INGRESS) ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _name::_ Name of the term. * _owner::_ Owner of the term, used for organizational purposes. * _protocol::_ the network protocols this term will match, such as tcp, udp, or sctp. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. ## Sub Tokens ### Actions * _accept_ * _deny_: Only permitted for a default deny capirca-2.0.9/doc/generators/nftables.md000066400000000000000000000214541437377527500202140ustar00rootroot00000000000000# Nftables The NFTables header designation has the following format: ``` target:: newnftables [nf_address_family] [nf_hook] {default_policy_override} {int: base chain priority} {noverbose} ``` Unless otherwise stated, all fields are required unless they're marked optional. - nf_address_family: defines the IP address family for the policies. (inet, inet6, mixed) - nf_hook: defines the traffic direction and the nftables hook for the rules. (input, output) - default_policy_override: **OPTIONAL** defines the default action (ACCEPT, DROP) for non-matching packets. Default behavior is DROP. - priority: **OPTIONAL** By default, this generator creates base chains with a starting priority of 0. Defining an integer value will override this behavior. - noverbose: **OPTIONAL** Disable header and term comments in final ACL output. Default behavior is verbose. #### Important: stateful firewall only This NFTables ACL generator generates stateful policies via [conntrack](https://wiki.nftables.org/wiki-nftables/index.php/Matching_connection_tracking_stateful_metainformation). Each NFTables base chain will accept valid return packets via (`ct state established,related accept`). When a non-deny term is processed for ACL generation, the `ct state new` is added to the resulting policy to ensure only valid incoming connections for that term is accepted. This means invalid state packets are dropped by default. An implementation design for this generator is that terms with options 'established', 'tcp-established' will not rendered in the final NFT configuration. #### Reporting bugs When reporting bugs about this generator ensure to include: 1. Example policy (.pol file) 1. Observed output (.nft file) 1. Expected (correct) output in Nftables syntax (.nft syntax) ## Term Format - _action::_ The action to take when matched. Refer to Sub-tokens -> Actions for valid options. - _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. - _destination-address::_ One or more destination address tokens. - _destination-port::_ One or more service definition tokens. - _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) - _icmp-type::_ Specify icmp-type code to match. - _source-interface::_ input direction interface name (renders as: [iifname](https://wiki.nftables.org/wiki-nftables/index.php/Matching_packet_metainformation)) - _source-address::_ One or more source address tokens. - _source-port::_ One or more service definition tokens. - _destination-interface::_ output direction interface name (renders as: [oifname](https://wiki.nftables.org/wiki-nftables/index.php/Matching_packet_metainformation)) - _protocol::_ The network protocol(s) this term will match. - _logging::_ NFTables system logging (host-based). - _counter::_ NFTables counter for specific term. Note: combining source-interface and destination-interface tokens within a term is not supported. ## Sub-tokens ### Actions - _accept_ - _drop_ ### Logging - _disable_ no packets will be logged on syslog. All of the below values are accepted, but outcome is exactly the same. - _true_ - _syslog_ - _local_ ### Counter Any string sub-token in `counter` is accepted. Do note this generator _does not_ implement NFTables `named counters` - this is primarily due to original design decisions to keep each Term into its own chain structure, any support of named counters would simply make the configuration .nft file longer without any additional benefit with the possible exception of the ability to use a single counter-name for multiple terms. ### ICMP Types This generator normalizes certain capirca policy.py string types to NFTables semantically correct values. The below tables summarize the supported ICMP type codes, the policy.py parent class definition and the NFtables specific value for the same type. #### IPv4 | ICMPv4 type code | Capirca (policy.py) | NFtables manual | |------------------|----------------------|-------------------------| | 0 | echo-reply | echo-reply | | 3 | unreachable | destination-unreachable | | 4 | source-quench | source-quench | | 5 | redirect | redirect | | 6 | alternate-address | | | 8 | echo-request | echo-request | | 9 | router-advertisement | router-advertisement | | 10 | router-solicitation | router-solicitation | | 11 | time-exceeded | time-exceeded | | 12 | parameter-problem | parameter-problem | | 13 | timestamp-request | timestamp-request | | 14 | timestamp-reply | timestamp-reply | | 15 | information-request | info-request | | 16 | information-reply | info-reply | | 17 | mask-request | address-mask-request | | 18 | mask-reply | address-mask-reply | | 31 | conversion-error | | | 32 | mobile-redirect | | #### IPv6 | ICMPv6 type code | Capirca (policy.py) | NFtables manual | |------------------|------------------------------------------|---------------------------------------------| | 1 | destination-unreachable | destination-unreachable | | 2 | packet-too-big | packet-too-big | | 3 | time-exceeded | time-exceeded | | 4 | parameter-problem | parameter-problem | | 128 | echo-request | echo-request | | 129 | echo-reply | echo-reply | | 130 | multicast-listener-query | mld-listener-query | | 131 | multicast-listener-report | mld-listener-report | | 132 | multicast-listener-done | mld-listener-done OR mld-listener-reduction | | 133 | router-solicit | nd-router-solicit | | 134 | router-advertisement | nd-router-advert | | 135 | neighbor-solicit | nd-neighbor-solicit | | 136 | neighbor-advertisement | nd-neighbor-advert | | 137 | redirect-message | nd-redirect | | 138 | router-renumbering | router-renumbering | | 139 | icmp-node-information-query | | | 140 | icmp-node-information-response | | | 141 | inverse-neighbor-discovery-solicitation | ind-neighbor-solicit | | 142 | inverse-neighbor-discovery-advertisement | ind-neighbor-advert | | 143 | version-2-multicast-listener-report | mld2-listener-report | | 144 | home-agent-address-discovery-request | | | 145 | home-agent-address-discovery-reply | | | 146 | mobile-prefix-solicitation | | | 147 | mobile-prefix-advertisement | | | 148 | certification-path-solicitation | | | 149 | certification-path-advertisement | | | 151 | multicast-router-advertisement | | | 152 | multicast-router-solicitation | | | 153 | multicast-router-termination | | source: https://www.netfilter.org/projects/nftables/manpage.html ### Option - _tcp-established_ and _established_ will cause the term to not be rendered in the final NFT configuration. See 'Important' section above. capirca-2.0.9/doc/generators/nsxv.md000066400000000000000000000063141437377527500174120ustar00rootroot00000000000000# NSX The nsx header designation has the following format: ``` target:: nsxv {section_name} {inet|inet6|mixed} section-id securitygroup securitygroupId section_name: specifies the name of the section all terms in this header apply to. inet: specifies that the resulting filter should only render IPv4 addresses. inet6: specifies that the resulting filter should only render IPv6 addresses. mixed: specifies that the resulting filter should render both IPv4 and IPv6 addresses. sectionId: specifies the Id for the section [optional] securitygroup: specifies that the appliedTo should be security group [optional] securitygroupId: specifies the Id of the security group [mandatory if securitygroup is given] (Required keywords option and verbatim are not supported in NSX) ``` ## Nsxv The nsxv header designation has the following format: ``` target:: nsxv {section_name} {inet|inet6|mixed} section-id securitygroup securitygroupId ``` * _section_name_: specifies the name of the section all terms in this header apply to. [mandatory field] * _inet_: specifies the output should be for IPv4 only filters. This is the default format. * _inet6_: specifies the output be for IPv6 only filters. * _mixed_: specifies that the resulting filter should render both IPv4 and IPv6 addresses. * _sectionId_: specifies the Id for the section [optional] * _securitygroup_: specifies that the appliedTo should be security group [optional] * _securitygroupId_: specifies the Id of the security group [mandatory if securitygroup is given] (Required keywords option and verbatim are not supported in NSX) ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _reject_ * _reject-with-tcp-rst_ capirca-2.0.9/doc/generators/packetfilter.md000066400000000000000000000065041437377527500210720ustar00rootroot00000000000000# PacketFilter Note: The PF generator is currently in alpha testing. The output should be compatible with OpenBSD v4.7 PF and later. ``` target:: packetfilter filter-name {inet|inet6|mixed} {in|out} {nostate} ``` * _filter-name_: a short, descriptive policy identifier * _inet_: specifies that the resulting filter should only render IPv4 addresses. * _inet6_: specifies that the resulting filter should only render IPv6 addresses. * _mixed_: specifies that the resulting filter should only render IPv4 and IPv6 addresses (default). * _in_: match ingoing packets (default: both directions). * _out_: match outgoing packets (default: both directions). * _nostate_: do not keep state on connections (default: keep state). ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-interface::_ Specify the destination interface. Implicitly changes the term direction to *out* for this term. Mutually exclusive with _source-interface::_. * _source-interface::_ Specify the source interface. Implicitly changes the term direction to *in* for this term. Mutually exclusive with _destination-interface::_. * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ ### Option * _ack::_ Match on ACK flag being present. * _all::_ Matches all protocols. * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _fin::_ Match on FIN flag being present. * _is-fragment::_ Matches on if a packet is a fragment. * _psh::_ Match on PSH flag being present. * _rst::_ Match on RST flag being present. * _syn::_ Match on SYN flag being present. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _urg::_ Match on URG flag being present. capirca-2.0.9/doc/generators/paloaltofw.md000066400000000000000000000063061437377527500205650ustar00rootroot00000000000000# PaloAltoFW The paloalto header designation has the following format: ``` target:: paloalto from-zone [zone name] to-zone [zone name] [address family] [address objects] ``` * _from-zone_: static keyword, followed by the source zone * _to-zone_: static keyword, followed by the destination zone * _address family_: specifies the address family for the resulting filter - _inet_: the filter should only render IPv4 addresses (default) - _inet6_: the filter should only render IPv6 addresses - _mixed_: the filter should render IPv4 and IPv6 addresses * _address objects_: specifies whether custom address objects or network/mask definitions are used in security policy source and destination fields - _addr-obj_: specifies address groups are used in the security policy source and destination fields (default) - _no-addr-obj_: specifies network/mask definitions are used in the security policy source and destination fields * _unique-term-prefixes_: specifies whether each term name should be generated with unique prefixes. The unique prefix is a hexdigest of from_zone and to_zone fields. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens. * _destination-port::_ One or more service definition tokens. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _owner::_ Owner of the term, used for organizational purposes. * _platform::_ one or more target platforms for which this term should ONLY be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-port::_ one or more service definition tokens. * _timeout::_ specify application timeout. (default 60) ## Sub Tokens ### Actions * _accept_ * _count_ * _deny_ * _log_ * _reject_ ## Terms Section ### Optionally Supported Keywords * _pan-application_:: paloalto target only. Specify applications for the security policy which can be predefined applications (https://applipedia.paloaltonetworks.com/) and custom application objects. - _Security Policy Service Setting_ When no _protocol_ is specified in the term, the service will be _application-default_. When _protocol_ is tcp or udp, and no _source-port_ or _destination-port_ is specified, the service will be custom service objects for the protocols and all ports (0-65535). When _protocol_ is tcp or udp, and a _source-port_ or _destination-port_ is specified, the service will be custom service objects for the protocols and ports. _pan-application_ can only be used when no _protocol_ is specified in the term, or the protocols tcp and udp. capirca-2.0.9/doc/generators/pcap.md000066400000000000000000000042201437377527500173310ustar00rootroot00000000000000# PcapFilter FILL ME IN ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ ### Option * _ack::_ Match on ACK flag being present. * _all::_ Matches all protocols. * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _fin::_ Match on FIN flag being present. * _is-fragment::_ Matches on if a packet is a fragment. * _none::_ Matches none. * _psh::_ Match on PSH flag being present. * _rst::_ Match on RST flag being present. * _syn::_ Match on SYN flag being present. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _urg::_ Match on URG flag being present. capirca-2.0.9/doc/generators/sonic.md000066400000000000000000000023441437377527500175260ustar00rootroot00000000000000# SONiC The SONiC header designation has the following format: ``` target:: sonic filter-name {inet|inet6|mixed} ``` * _filter-name_: defines the name of the filter. This is a required field. Note that the filter name will be present as a key of every ACE (i.e. rule) in generated policy. For example if the filter-name is 'MyPolicy', each ACE will come out like: ``` { 'ACL_RULE': { 'MyPolicy|RULE_10': {...}, 'MyPolicy|RULE_20': {...}, ... } } ``` ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _destination-address::_ One or more destination address tokens. * _destination-port::_ One or more service definition tokens. * _expiration::_ Stop rendering this term after specified date. Date format: [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md). * _protocol::_ The network protocols this term will match, such as tcp, udp, or sctp. * _source-address::_ One or more source address tokens. * _source-port::_ One or more service definition tokens. ## Sub Tokens ### Actions * _accept_ * _deny_ ### Option * _tcp-established::_ Only match "established" connections. It is not stateful - any TCP packet with ACK and/or RST TCP flag set will match. capirca-2.0.9/doc/generators/speedway.md000066400000000000000000000130361437377527500202340ustar00rootroot00000000000000# Speedway NOTE: Speedway produces Iptables filtering output that is suitable for passing to the 'iptables-restore' command. The Speedway header designation has the following format: ``` target:: speedway [INPUT|OUTPUT|FORWARD|custom] {ACCEPT|DROP} {truncatenames} {nostate} {inet|inet6} INPUT: apply the terms to the input filter. OUTPUT: apply the terms to the output filter. FORWARD: apply the terms to the forwarding filter. custom: create the terms under a custom filter name, which must then be linked/jumped to from one of the default filters (e.g. iptables -A input -j custom) ACCEPT: specifies that the default policy on the filter should be 'accept'. DROP: specifies that the default policy on the filter should be to 'drop'. inet: specifies that the resulting filter should only render IPv4 addresses. inet6: specifies that the resulting filter should only render IPv6 addresses. truncatenames: specifies to abbreviate term names if necessary (see lib/iptables.py: CheckTermLength? for abbreviation table) nostate: specifies to produce 'stateless' filter output (e.g. no connection tracking) ``` # Speedway NOTE: Speedway produces Iptables filtering output that is suitable for passing to the 'iptables-restore' command. The Speedway header designation has the following format: ``` target:: speedway [INPUT|OUTPUT|FORWARD|custom] {ACCEPT|DROP} {truncatenames} {nostate} {inet|inet6} ``` * _INPUT_: apply the terms to the input filter. * _OUTPUT_: apply the terms to the output filter. * _FORWARD_: apply the terms to the forwarding filter. * _custom_: create the terms under a custom filter name, which must then be linked/jumped to from one of the default filters (e.g. iptables -A input -j custom) * _ACCEPT_: specifies that the default policy on the filter should be 'accept'. * _DROP_: specifies that the default policy on the filter should be to 'drop'. * _inet_: specifies that the resulting filter should only render IPv4 addresses. * _inet6_: specifies that the resulting filter should only render IPv6 addresses. * _truncatenames_: specifies to abbreviate term names if necessary (see lib/iptables.py: CheckTermLength for abbreviation table) * _nostate_: specifies to produce 'stateless' filter output (e.g. no connection tracking) ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-interface::_ Specify specific interface a term should apply to (e.g. destination-interface:: eth3) * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _fragement-offset::_ specify a fragment offset of a fragmented packet * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-interface::_ specify specific interface a term should apply to (e.g. source-interface:: eth3). * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _ack::_ Match on ACK flag being present. * _all::_ Matches all protocols. * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _fin::_ Match on FIN flag being present. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _initial::_ Only matches on initial packet. * _is-fragment::_ Matches on if a packet is a fragment. * _none::_ Matches none. * _psh::_ Match on PSH flag being present. * _rst::_ Match on RST flag being present. * _sample::_ Samples traffic for netflow. * _syn::_ Match on SYN flag being present. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. * _urg::_ Match on URG flag being present. capirca-2.0.9/doc/generators/srxlo.md000066400000000000000000000077571437377527500175770ustar00rootroot00000000000000# SRXlo SRX Loopback is a stateless Juniper ACL with minor changes. Please see code for changes. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _address::_ One or more network address tokens, matches source or destination. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _counter::_ Update a counter for matching packets * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _destination-prefix::_ Specify destination-prefix matching (e.g. source-prefix:: configured-neighbors-only) * _destination-prefix_except::_ Specify destination-prefix exception(TODO:cmas Fill in more). * _dscp_except::_ Do not match the DSCP number. * _dscp_match::_ Match a DSCP number. * _dscp_set::_ Match a DSCP set. * _ether_type::_ Match EtherType field. * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _forwarding-class::_ Specify the forwarding class to match. * _forwarding-class_except::_ Do not match the specified forwarding classes. * _fragement-offset::_ specify a fragment offset of a fragmented packet * _hop-limit::_ Match the hop limit to the specified hop limit or set of hop limits. * _icmp-code::_ Specifies the ICMP code to filter on. * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _logging::_ Specify that this packet should be logged via syslog. * _loss-priority::_ Specify loss priority. * _name::_ Name of the term. * _next-ip::_ Used in filter based forwarding. * _option::_ See platforms supported Options section. * _owner::_ Owner of the term, used for organizational purposes. * _packet-length::_ specify packet length. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _policer::_ specify which policer to apply to matching packets. * _port::_ Matches on source or destination ports. Takes a service token. * _precedence::_ specify precedence of range 0-7. May be a single integer, or a space separated list. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _protocol\_except::_ allow all protocol "except" specified. * _qos::_ apply quality of service classification to matching packets (e.g. qos:: af4) * _routing-instance::_ specify routing instance for matching packets. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. * _source-prefix::_ specify source-prefix matching (e.g. source-prefix:: configured-neighbors-only). * _source-prefix-except::_ specify destination-prefix exception(TODO:cmas Fill in more). * _traffic-class-count::_ * _traffic-type::_ specify traffic-type * _ttl::_ Matches on TTL. * _verbatim::_ this specifies that the text enclosed within quotes should be rendered into the output without interpretation or modification. This is sometimes used as a temporary workaround while new required features are being added. ## Sub Tokens ### Actions * _accept_ * _deny_ * _next_ * _reject_ * _reject-with-tcp-rst_ ### Option * _.*::_ wat * _established::_ Only match established connections, implements tcp-established for tcp and sets destination port to 1024- 65535 for udp if destination port is not defined. * _first-fragment::_ Only match on first fragment of a fragmented pakcet. * _sample::_ Samples traffic for netflow. * _tcp-established::_ Only match established tcp connections, based on statefull match or TCP flags. Not supported for other protocols. * _tcp-initial::_ Only match initial packet for TCP protocol. capirca-2.0.9/doc/generators/windows_advfirewall.md000066400000000000000000000061661437377527500224730ustar00rootroot00000000000000# WindowsAdvFirewall The Windows Advanced Firewall header designation has the following format: ``` target:: windows_advfirewall {out|in} {inet|inet6|mixed} ``` * _out_: Specifies that the direction of packet flow is out. (default) * _in_: Specifies that the direction of packet flow is in. * _inet_: specifies that the resulting filter should only render IPv4 addresses. * _inet6_: specifies that the resulting filter should only render IPv6 addresses. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _icmp-type::_ Specify icmp-type code to match, see section [ICMP TYPES](PolicyFormat#ICMP_TYPES.md) for list of valid arguments * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. ## Sub Tokens ### Actions * _accept_ * _deny_ ## WindowsIPSec The Windows IPSec header designation has the following format: ``` target:: windows_advfirewall [filter_name] ``` * _filter name_: defines the name of the Windows IPSec filter. ## Term Format * _action::_ The action to take when matched. See Actions section for valid options. * _comment::_ A text comment enclosed in double-quotes. The comment can extend over multiple lines if desired, until a closing quote is encountered. * _destination-address::_ One or more destination address tokens * _destination-exclude::_ Exclude one or more address tokens from the specified destination-address * _destination-port::_ One or more service definition tokens * _expiration::_ stop rendering this term after specified date. [YYYY](YYYY.md)-[MM](MM.md)-[DD](DD.md) * _name::_ Name of the term. * _option::_ See platforms supported Options section. * _platform::_ one or more target platforms for which this term should ONLY be rendered. *_platform-exclude:: one or more target platforms for which this term should NEVER be rendered. * _protocol::_ the network protocols this term will match, such as tcp, udp, icmp, or a numeric value. * _source-address::_ one or more source address tokens. * _source-exclude::_ exclude one or more address tokens from the specified source-address. * _source-port::_ one or more service definition tokens. ## Sub Tokens ### Actions * _accept_ * _deny_ capirca-2.0.9/doc/wiki/000077500000000000000000000000001437377527500146605ustar00rootroot00000000000000capirca-2.0.9/doc/wiki/AclCheck-library.md000066400000000000000000000130051437377527500203000ustar00rootroot00000000000000# aclcheck library The `aclcheck` library (see `aclcheck.py`) to allow simple and easy checks on how a particular network session will react when it passes through a policy file. ## Goals * Create an ACL verification library that permits easy integration into future tools * Create ability to ensure critical services are not blocked when ACL changes occur resulting in service outages * Allow secops engineers and customers to easily verify how specific connections may be handled by network filters * Include command-line functionality for standalone usage ## Methods An `AclCheck` object has the following methods available: * `Matches()`: Return a list of aclcheck.Match objects. * `ExactMatches()`: Do not return matches that are conditional, such as requiring an established TCP connection, or would continue on to the next term with an action of 'next'. * `ActionMatch(action='foo')`: Only return matches where the action taken would be one of the following: _'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'_. * `DescribeMatches()`: Returns a text blob describing the matches that would occur. ## Match Object Methods Most `AclCheck` methods return "match" objects which have the following properties: * `action`: The action that will be taken in this matching term * `filter`: The name of the filter containing this term * `possibles`: A list of strings containing reasons why this may or may not match * `qos`: The quality of service level applied to this term * `term`: The name of the matching term ## Usage The `AclCheck` code is designed to be used as a library. However, for ease of use a command-line interface is provided in the top directory of the installation. ``` ./aclcheck_cmdline.py: --definitions-directory: Directory where the definitions can be found. (default: './def') -p,--policy-file: The NAC policy file to check (default='./policies/sample.pol') -d,--destination: Desintation IP address (default='200.1.1.1') -s,--source: Source IP address (default='11.1.1.1') --protocol: Protocol (default='tcp') --destination-port: Destination port number (default=80) --source-port: Source port number (default=1025) ``` e.g.: ``` ./aclcheck.py --source-port 4096 --destination-port 80 -s 64.142.101.1 \ -d 200.1.1.0/24 --protocol tcp -p ./policies/sample.pol ``` ## Initialization The `AclCheck` library must be initialized with the following arguments: * [policy filename](PolicyFormat.md) (filename, text-blob of a policy, or policy object) * source address (ip address) * destination address (ip address) * source port (numeric port) * destination port (numeric port) * protocol (tcp, udp, icmp, etc.) The initialization process immediately processes the information to generate a list of possible matches. These matches are objects of type aclcheck.Match A list of aclcheck.Match objects can be retrieved by calling the Matches() method. The Match objects have the following properties: * filter (the specific filter within the policy that this match occured) * term (the specific term within the filter that this match occured) * action (the action specified by the matched term) * possibles (a list of characteristics that may cause this term to match or not match, such as fragmentation or tcp flags. ## Primer The following code snippet generates the prediction about a particular network flow. ```py from lib import naming from lib import policy from lib import aclcheck defs = naming.Naming('./def') pol = policy.ParsePolicy(open('./policies/sample.pol').read(), defs) src = '64.142.101.126' dst = '200.1.1.1' sport = '4096' dport = '25' proto = 'tcp' check = aclcheck.AclCheck(pol, src, dst, sport, dport, proto) print str(check) ``` The output follows: ``` filter: edge-inbound term: permit-tcp-established (possible match) accept if ['tcp-est'] term: default-deny deny filter: edge-outbound term: default-accept accept ``` Alternatively, the individual details of each match can be used as follows: ```py for match in check.Matches(): print match.filter print match.term print match.action for next in match.possibles: print next print '---' ``` The output follows: ``` edge-inbound permit-tcp-established accept tcp-est --- edge-inbound default-deny deny --- edge-outbound default-accept accept --- ``` When exact matches are desired (e.g. not tcp-established, action "next", etc.), you can access the ExactMatches() method: ```py for match in check.ExactMatches(): print match.filter print match.term print match.action for next in match.possibles: print next print '---' ``` The output follows: ``` edge-inbound default-deny deny --- edge-outbound default-accept accept --- ``` Notice that `ExactMatches()` method output differs from Matches() in that the term "permit-tcp-established" no longer appears, since the terms has the "optional" argument requiring the session be an established TCP session. ## Future Development The `AclCheck` was written to provide a common library for the development of network access control assurance and investigative tools. The `AclCheck` class supports taking a policy argument that consists of either a filename, text-blob, or a policy object. If a policy object is passed to `AclCheck`, the [naming](NamingLibrary.md) definitions_directory argument is ignored and may be set to `None`. By passing an already existing policy object to `AclCheck`, the run-time is greatly reduced for making multiple calls compared to re-reading the policy and definitions for individual checks. capirca-2.0.9/doc/wiki/Capirca-design.md000066400000000000000000000357361437377527500200310ustar00rootroot00000000000000# Capirca Design Doc Status: Final Author: Tony Watson Created: Nov 2, 2007 Last Updated: May 5, 2010 ## Objective Define a common meta-language to describe security policies, and a standardized interconnect between meta-language and actual policy rules. The meta-language should be flexible enough to support most common network access control (NAC) devices, but simple enough to be clear and easy to understand. The interconnect should provide a common understanding of how and where the meta-language and actual policy rules are stored. ## Goals * Provide a standard meta-language to describe NAC policies * Avoid the proliferation of differing ACL meta-language formats * Provide a common framework for maintaining both meta-language policies and the actual applied NAC policy * Provide a foundation for expanding automation of NAC processes * Eliminate confusion and guesswork when implementing a new output format generator ## Background Currently, the security group utilizes a variety of tools to automate the generation of ACL, F10, JCL, and Iptables policies. Historically, many automation tools have been built using Ruby around the naming.rb library. As these tools have been developed they have usually had unique limitations or requirements that resulted in slightly differing input and output formats. The problem is not serious today, but must be resolved soon in order to avoid serious headaches in the future. ## Problems A standardized model is needed to bring existing tools into a happy co-existence, as well as to prevent continued deviations in future tools. The following is a list of some of the existing concerns: * JCL meta-policy is embedded within comments inside the actual JCL policy files. The resulting output simply replaces in-line terms and replaces the original input file with the generated output file. Meta-policies are maintained in comments immediately after the policies own 'term' statement and any non-replaced lines in the policy are appended verbatim to the output. * Speedway uses separate meta-policy and generated iptables policy files. Meta-policies are parsed and the output sent to the policy module files in another directory. Speedway meta-policy defines new policies using the 'policy' keyword and all other content in the meta is appended verbatim to the output. * F10 meta-policy uses separate meta-policy and generated ACL policy files. Meta-policies are parsed and the output sent to the policy module files in the same directory. F10 generator meta-policy defines new policies using the 'term' keyword and all non-term content is ignored. Generator Type Meta-Policy Definition Location New Policy Keyword File Naming Standards Comments and Non-Meta-Policy Lines Juniper inline uses existing policy 'term' statement inline with .jcl files non-replaced term lines are appended verbatim to output Speedway separate files, different directories policy xyz { meta-policy filename mirrors generated policy in different directories non meta-policy lines are appended verbatim to output Cisco separate files, same directory term xyz { meta-policy and generated policy have .pol and .acl extension in same directory non meta-policy lines are ignored others ... ## Meta-Policy Integration **Files:** The policy file will consist of terms, comments, and other directives. Terms are the specific rules relating a variety of properties such as source/destination addresses, protocols, ports, actions, etc. Directives may be used to specific that a particular policy should only be generated for a specific platform target, such as cisco. The policy file has the following properties: * Comments are ignored. They are not accessible outside the policy class. * Directives are acted on by the compiler. They may be accessible through the policy class. * Terms are interpreted and used to populate the policy object. **File Names and Locations:** The policy files shall be named appropriately to describe their functionality and purpose. Policy files will use a .pol file extension. The network ACL perforce repository will maintain separate 'pol' sub-directory beneath each 'acl' directory, to contain the policy description file and the generated filters respectively. The following diagram illustrates the suggested directory structure: ``` ./network/acl | -+------------+------------+------------+- | | | | Def Corp Prod Sysops | | | | -+------------+------------+------------+- | | | Policy Policy Policy ``` Generated output will be stored in files with identical filenames, but lacking the .pol extension. ## Policy Description Language Definition The NAC team needs a standardized meta-policy that can support a wide variety of platforms such as Cisco, Juniper, F10, Netscreen, and Iptables. The language needs to be flexible enough to support diverse platforms, but rigid enough in its definition to ensure consistency between policy definitions. Each policy description file consists of one or more sections. Each section must begin with a 'header' block, followed by one or more 'term' blocks. ## Header Description Format The header section is used to specify options that apply to all terms blocks within the policy, such as the target output platform and any arguments needed by output platform generator. ``` comment:: [doublequoted text]3 target:: [platform] [arguments] ``` The arguments for each platform are passed directly to the output generator as a list, and vary depending on the needs of the generator. Below is a list of currently supported generators and their argument lists. Arguments in [.md](.md)'s are required, arguments in {}'s are optional. ``` target:: cisco [named-access-list] {extended|standard|object-group|inet6|mixed} target:: juniper [filter-name] {inet | inet6 | bridge}[1] target:: iptables [INPUT | OUTPUT | FORWARD] {ACCEPT | DROP}[2] ```` * `[1]` The juniper generator defaults to inet (ipv4) output, but ipv6 and bridge filters can also be specified in the optional filter\_type argument. * `[2]` The iptables generator target must specify a filter which the terms will apply to. The optional 'default action' of ACCEPT or DROP may be used to include output to set the default action of the named filter. Example: ``` header { comment:: "This is an example header " comment:: "used in policy definition files..." target:: juniper inbound-edge-filter inet6 target:: iptables INPUT DROP } ``` ## Term Definition Format Tokens / keywords that must be supported. * source-address:: [token](token.md) * source-exclude:: [token](token.md) * destination-address:: [token](token.md) * destination-exclude:: [token](token.md) * source-port:: [token](token.md) * destination-port:: [token](token.md) * protocol:: \[tcp,udp,icmp, or protocol #\] * action:: \[accept/reject/deny/next\] * option:: \[established, sample, rst, initial, other arbitrary user supplied\] * verbatim:: [platform](target.md) [text field](doublequoted.md) Tokens / keywords that may be supported. * packet-length:: \[text,None (default None)\] * fragment-offset:: \[text,None (default None)\] * counter:: \[text,None (default None)\] * policer:: \[text,None (default None)\] * logging:: \[text,None(default)\] * direction:: \[inbound, outbound, both(default)\] * qos:: (text,None (default None) for juniper = forwarding-class) * target:: \[juniper, cisco, iptables\] [type; inet, inet6, bridge (juniper specific)](filter.md) \[options; default filter action in iptables\] * comment:: "doublequoted text field" * source-prefix:: "text" (prefix lists are used in juniper and are comparable to address directives except that they're defined on the router itself) * destination-prefix:: "text" * Policy files should render equivalent output for any given renderer/target. * Generators may not support all keywords, they can ignore keywords as desired but should produce warnings. * Generators must produce equivalent access lists from the same policy file. * Documentation comments consist of any hash mark (#) through EOL and should be passed to generators in the order they appear in the meta policy. * Generators should ignore comments. * Per term comments in meta-policy can be included in sections such as header and terms, using the following notation: comment:: "[text](text.md)". All text between double quotes, including newlines, becomes the comment * Terms in meta-policy will be indicated by opening and closing identifiers: term x { .... } * A header section shall begin each meta-policy. The header section shall be denoted by the following notation: header { ... } * A header must contain at least one target:: section, which specifies the platform or platforms for which the following terms will be rendered * A header section may span multiple lines. * A header may contain a comment section, denoted as comment:: "[text](text.md)" * The option 'established' shall imply adding high-ports to terms with TCP or UDP only protocols, tcp-flag checking on TCP only terms, and may imply stateful checking for generators that support it. * The option 'tcp-established' shall imply tcp-flag checking for terms where only the TCP protocol is specified. It may imply stateful checking for generators that support it. * other? ### Policy Object A policy object is collection of sections, such as header and terms, as well as their associated properties. Each section includes a variety of properties such as source/destination addresses, protocols, ports, actions, etc. The policy.py module generates policy objects from policy files. #### ParsePolicy A policy object can be created by passing a string containing a policy to the ParsePolicy() class. ``` policy = policy.ParsePolicy(policy_text) ``` #### Headers ``` for header, terms in policy.filters: header.target header.target.filter_name ``` #### Terms ``` for header, terms in policy.filters: terms[x].action[] # addresses - lists of google3.ops.security.lib.nacaddr objects terms[x].address[] terms[x].destination_address[] terms[x].destination_address_exclude[] terms[x].source_address[] terms[x].source_address_exclude[] # ports - list of tuples. terms[x].port[] terms[x].destination_port[] terms[x].source_port[] # list of strings terms[x].comment[] terms[x].protocol[] terms[x].option[] terms[x].verbatim[x].value # string terms[x].counter terms[x].name ``` #### Example a contrived example follows: ``` header { comment:: "this is an example filter" target:: junniper example-filter target:: cisco example-filter inet } term term-1 { source-address:: BIG_NETWORK destination-address:: BIG_NETWORK protocol:: tcp action:: accept } ``` this would output a juniper filter of: ``` family inet { replace: filter example-filter { interface-specific; term term-1 { from { source-address { 10.0.0.0/8; } destination-address { 10.0.0.0/8; } protocol tcp; } then { accept; } } } } ``` and a cisco filter of: ``` no ip access-list extended example-filter ip access-list extended example-filter permit tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.255.255.255 ``` #### IPv6 IPv6 support has been added to the policy language. Currently only Cisco, Juniper and Iptables can render ipv6 filters. The syntax for an ipv6 filter is exactly the same as ipv4 filters except for the inet6 keyword on the target line. Making an ipv6 filter is as easy as ``` header { target:: juniper some-v6-filter inet6 } ``` Be sure that the addresses you reference in your subsequent terms have ipv6 definitions. ie, if you have ``` term my-v6-term { source-address:: PRODUCTION_NETWORK destination-address:: CORPORATE_NETWORK protocol:: tcp action:: accept } ``` When PRODUCTION\_NETWORK or CORPORATE\_NETWORK tokens are only defined with ipv4 addresses, this will error out. Tokens can include both IPv4 and IPv6 addresses, and rendering IPv6 output will include only IPv6 addresses associated with a given token. ### Definitions The following are words that we have defined for the purposes of NAC discourse and this project. Some of these words may be defined somewhat differently than you are used to. Generator: A program that utilizes the data contained in a Policy to create an output rulebase suitable for applying to a specific target platform. Generators will be specific for each target platform, such as juniper, cisco, f10, iptables, etc. Global Directive: Keywords contained outside of a term or comment within a policy, that define a default value for a particular term property. Global directives can be overwritten within an individual term by specific redefinition within the term. Global directives are limited to only those keywords allowed within a term definition. NAC: Network Access Control. Concerning issues related to security at layer 3 and 4 in the OSI model. Flow: A network flow, given as a tuple of the following form: (src(s), dst(s), src-port(s), dst-port(s), protocol) Service: A set of tuples of the form ((server/network), port, protocol, [application](application.md) ) that share a common logical function. Term: A flow to/from a service, the action applied to this flow, and where this action is enforced (e.g., what PEP(s)). A term is expressed in the form of a tuple: (src(s), dst(s), src-port(s), dst-port(s), protocol, action = {permit/drop/deny, etc.}, modifier(s) = {QOS, negation, counters, etc.}, PEP(s) ) Policy: A policy is the set of all terms which apply to a particular service. Rule: A rule is the device-specific implementation of a term. Rulebase: A rulebase is the set of all rules on a device. Logical Rule: The set of all device-specific implementations of a term. Logical Rulebase: The set of all device-specific implementations of terms pertaining to a particular policy. Narrative: The narrative is the English language description of a given service's policy, along with the justification for these policies and meta-information about the service. (Who is authorized to make changes, what the procedure is for making changes, etc.). One requirement is that the English language description is sufficiently unique that we have a mapping between section of narrative -> terms of policy -> rules of rulebase. PEP: Policy enforcement point. A PEP is any location on the network where the terms of a policy can be enforced as rules. LPEP: Logical PEP. The set of all devices and/or interfaces that enforce a logical security boundary. capirca-2.0.9/doc/wiki/Naming-library.md000066400000000000000000000067151437377527500200660ustar00rootroot00000000000000# Introduction The naming library is used by the capirca system to parse definitions of network and service data. These definitions are based on 'tokens' that are used in the high-level [policy language](Policy-format.md). ## Basic Usage **Create a directory to hold the definitions files** ``` mkdir /path/to/definitions/directory ``` **Create network definitions files** _(network defintions files must end in '.net')_ ``` cat > /path/to/definitions/directory/NETWORKS.net INTERNAL = 10.0.0.0/8 # RFC1918 172.16.0.0/12 # RFC1918 192.168.0.0/16 # RFC1918 WEBSERVERS = 200.3.2.1/32 # webserver-1 200.3.2.4/32 # webserver-2 MAILSERVER = 200.3.2.5/32 # mailserver-1 ^D ``` **Create service definitions files** _(service defintions files must end in '.svc')_ ``` cat > /path/to/definitions/directory/SERVICES.svc HTTP = 80/tcp # web traffic MAIL = 25/tcp # smtp port 465/tcp # smtp over ssl DNS = 53/tcp 53/udp ^D ``` **Create a naming object** ``` from capirca import naming defs = naming.Naming('/path/to/definitions/directory') ``` **Access Definitions From the Naming Object** ``` defs.GetNet('INTERNAL') defs.GetService('MAIL') defs.GetServiceByProto('DNS','udp') ``` ## Methods ``` **GetIpParents(self, query)** > Return network tokens that contain IP in query. > Args: > > query: an ip string ('10.1.1.1') or nacaddr.IP object > Returns: > > rval2: a list of tokens containing this IP **GetNet(self, query)** > Expand a network token into a list of nacaddr.IP objects. > Args: > > query: Network definition token which may include comment text > Raises: > > BadNetmaskTypeError: Results when an unknown netmask\_type is > > specified. Acceptable values are 'cidr', 'netmask', and 'hostmask'. > Returns: > > List of nacaddr.IP objects > Raises: > > UndefinedAddressError: for an undefined token value **GetNetAddr(self, token)** > Given a network token, return a list of nacaddr.IP objects. > Args: > > token: A name of a network definition, such as 'INTERNAL' > Returns: > > A list of nacaddr.IP objects. > Raises: > > UndefinedAddressError: if the network name isn't defined. **GetService(self, query)** > Given a service name, return a list of associated ports and protocols. > Args: > > query: Service name symbol or token. > Returns: > > A list of service values such as ['80/tcp', '443/tcp', '161/udp', ...] **GetServiceByProto(self, query, proto)** > Given a service name, return list of ports in the service by protocol. > Args: > > query: Service name to lookup. > > proto: A particular protocol to restrict results by, such as 'tcp'. > Returns: > > A list of service values of type 'proto', such as ['80', '443', ...] **GetServiceParents(self, query)** > Given a service, return any tokens containing the value. > Args: > > query: a service or token name, such as 53/tcp or DNS > Returns: > > rval2: a list of tokens that contain query or parents of query **ParseNetworkList(self, data)** > Take an array of network data and import into class. > This method allows us to pass an array of data that contains network > definitions that are appended to any definitions read from files. > Args: > > data: array of text lines containing net definitions. **ParseServiceList(self, data)** > Take an array of service data and import into class. > This method allows us to pass an array of data that contains service > definitions that are appended to any definitions read from files. > Args: > > data: array of text lines containing service definitions. ``` capirca-2.0.9/doc/wiki/Policy-library.md000066400000000000000000000055021437377527500201050ustar00rootroot00000000000000# policy library The policy library (see `policy.py`) is intended for parsing the generic high-level policy files and returning a policy object for acl rendering. The policy library depends on a [naming library](Naming-library.md) object to be able to interpret network and service tokens. ## Basic Usage A policy object is created based on a policy text file. For information on how to define policy text files, please read the [Policy Format](PolicyFormat.md) documentation. For testing, you can use the policies provided in [policies/pol/](../../../policies/pol/). directory ## Using Policy Objects in Generators The following section is intended to help developers who would like to create new output generators, or to modify existing generators. ### Policy Object A policy object is collection of sections, such as header and terms, as well as their associated properties. Each section includes a variety of properties such as source/destination addresses, protocols, ports, actions, etc. The `policy.py` module generates policy objects from policy files. The `ParsePolicy()` creates a policy object by passing a string containing a policy to the `ParsePolicy()` class. ### Creating a Policy Object The steps are: 1. Create a [naming object](Naming-library.md) 1. Read the policy definition data in 1. Generate the policy object ```py from capirca import naming from capirca import policy definitions = naming.Naming('./def/') policy_text = open('./policies/sample.pol').read() policy_object = policy.ParsePolicy(policy_text, definitions) ``` The policy object is now available for use. Typically, this policy object will next be passed to one of the output generators for rendering an access control filter. ```py from capirca import juniper print juniper.Juniper(policy_object) # Headers for header, terms in policy.filters: > header.target > header.target.filter\_name # Terms for header, terms in policy.filters: # addresses - lists of nacaddr objects terms[x].address[] terms[x].destination_address[] terms[x].destination_address_exclude[] terms[x].source_address[] terms[x].source_address_exclude[] # ports - list of tuples. e.g. [(80, 80), (1024, 65535)] terms[x].port[] terms[x].destination_port[] terms[x].source_port[] # list of strings terms[x].action[] terms[x].comment[] terms[x].destination_prefix[] terms[x].protocol[] terms[x].protocol_except[] terms[x].option[] terms[x].source_prefix[] terms[x].traffic_type[] terms[x].verbatim[x].value[] # string terms[x].name terms[x].counter terms[x].ether_type terms[x].logging terms[x].loss_priority terms[x].packet_length terms[x].policer terms[x].precedence terms[x].qos terms[x].routing_instance terms[x].source_interface # integer terms[x].fragment_offset ``` capirca-2.0.9/doc/wiki/PolicyReader-library.md000066400000000000000000000054521437377527500212340ustar00rootroot00000000000000# Introduction The policy reader library is library that allows other code to easily examine policy source files. The policy library only reads policies for the purpose of rendering objects for passing to generators. For some tools, we needed to be able to easily examine the various filters and terms for programmatically. Policy reader renders simple objects that allow us to do this handy for a variety of tools, such as rendering policies in a Web UI for example ## Overview Import the policyreader library from the top Capirca directory. Load a policy and set of definitions: ```py p = policyreader.Policy('policy_path', 'definitions_path') ``` Print out the policy: ``` print(p) ``` Search for terms matching specific criteria: ``` >>> p.Matches(src='1.1.1.1', dport='53/udp') [[0, 1]] ``` The result tuple indicates that a matching rule was found in Filter 0 at Term 1. You can print out the name of this term with: ``` print p.filter[0].term[1].name accept-to-honestdns ``` You can also display this entire specific term using: ``` print p.filter[0].term[1] Term: accept-to-honestdns Source-address:: Destination-address:: GOOGLE_DNS Source-port:: Destination-port:: DNS Protocol:: udp Option:: Action:: accept ``` You can examine the values of addresses or services as follows: ``` print p.defs.GetNet('GOOGLE_DNS') [IPv4('8.8.4.4/32'), IPv4('8.8.8.8/32'), IPv6('2001:4860:4860::8844/128'), IPv6('2001:4860:4860::8888/128')] >>> print p.defs.GetService('DNS') ['53/tcp', '53/udp'] ``` ## Example Usage ``` $ python >>> from lib import policyreader >>> p=policyreader.Policy('./policies/sample_cisco_lab.pol', './def/') >>> print p Filter: allowtointernet ----------------------- Term: accept-dhcp Source-address:: Destination-address:: Source-port:: Destination-port:: DHCP Protocol:: udp Option:: Action:: accept Term: accept-to-honestdns Source-address:: Destination-address:: GOOGLE_DNS Source-port:: Destination-port:: DNS Protocol:: udp Option:: Action:: accept Term: accept-tcp-replies Source-address:: Destination-address:: INTERNAL Source-port:: Destination-port:: Protocol:: tcp Option:: tcp-established Action:: accept Term: deny-to-internal Source-address:: Destination-address:: INTERNAL Source-port:: Destination-port:: Protocol:: Option:: Action:: deny Term: deny-to-specific_hosts Source-address:: Destination-address:: WEB_SERVERS MAIL_SERVERS Source-port:: Destination-port:: Protocol:: Option:: Action:: deny Term: default-permit Source-address:: Destination-address:: Source-port:: Destination-port:: Protocol:: Option:: Action:: accept >>> >>> p.defs.GetNet('INTERNAL') [IPv4('10.0.0.0/8'), IPv4('172.16.0.0/12'), IPv4('192.168.0.0/16')] >>> >>> p.defs.GetService('DNS') ['53/tcp', '53/udp'] ``` capirca-2.0.9/make_dist.sh000077500000000000000000000016151437377527500154520ustar00rootroot00000000000000#!/bin/bash # # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: watson@google.com (Tony Watson) rev=`svn up|awk '{print $3}'` archive="capirca-r"$rev"tgz" filedir='./capirca' echo "Building: $archive" find . -name \*.pyc -exec rm {} \; pushd . > /dev/null cd .. tar -czf $archive --exclude-vcs $filedir mv $archive $filedir popd > /dev/null ls -al $archive echo "Done." capirca-2.0.9/policies/000077500000000000000000000000001437377527500147575ustar00rootroot00000000000000capirca-2.0.9/policies/includes/000077500000000000000000000000001437377527500165655ustar00rootroot00000000000000capirca-2.0.9/policies/includes/untrusted-networks-blocking.inc000066400000000000000000000005601437377527500247560ustar00rootroot00000000000000term deny-from-bogons { comment:: "this is a sample edge input filter with a very very very long and multi-line comment that" comment:: "also has multiple entries." source-address:: BOGON action:: deny } term deny-from-reserved { source-address:: RESERVED action:: deny } term deny-to-rfc1918 { destination-address:: RFC1918 action:: deny } capirca-2.0.9/policies/pol/000077500000000000000000000000001437377527500155515ustar00rootroot00000000000000capirca-2.0.9/policies/pol/sample_arista_tp.pol000066400000000000000000000134131437377527500216160ustar00rootroot00000000000000header { comment:: "sample arista traffic policy" target:: arista_tp MIXED-TRAFFIC-POLICY mixed } term accept-icmp { protocol:: icmp counter:: icmp-loopback icmp-type:: echo-request echo-reply action:: accept } term wonky-prots { protocol:: igmp egp rdp counter:: wonky-prots-loopback action:: accept owner:: foo@arista.com } term wonky-prots-except { protocol-except:: igmp egp rdp hopopt counter:: wonky-prots-loopback action:: accept } term accept-traceroute { comment:: "allow inbound traceroute from any source." destination-port:: TRACEROUTE protocol:: udp counter:: inbound-traceroute action:: accept expiration:: 2001-12-31 } term accept-bgp-requests { comment:: "Allow BGP requests from peers." source-prefix:: configured-neighbors-only destination-port:: BGP protocol:: tcp counter:: bgp-requests action:: accept } term accept-bgp-replies { comment:: "Allow inbound replies to BGP requests." source-prefix:: configured-neighbors-only source-port:: BGP protocol:: tcp option:: tcp-established counter:: bgp-replies action:: accept } term accept-ospf { comment:: "Allow outbound OSPF traffic from other RFC1918 routers." source-address:: INTERNAL protocol:: ospf counter:: ospf action:: accept } term LONG_MULTI_LINE_COMMENTS { comment:: "this is a sample inet6 edge input filter that has very long comments" comment:: "10 this term has several lines in its output." comment:: "20 this term has several lines in its output." comment:: "30 this term has several lines in its output." protocol:: icmp action:: accept } term allow-vrrp { protocol:: vrrp counter:: vrrp action:: accept } term accept-ike { source-port:: IKE destination-port:: IKE protocol:: udp counter:: ipsec-ike action:: accept } term accept-ipsec { protocol:: esp counter:: ipsec-esp action:: accept } term accept-pim { source-address:: INTERNAL protocol:: pim action:: accept } term accept-igmp { source-address:: INTERNAL protocol:: igmp action:: accept } term accept-ssh-requests { source-address:: INTERNAL destination-port:: SSH protocol:: tcp counter:: ssh action:: accept } term accept-ssh-replies { source-port:: SSH protocol:: tcp option:: tcp-established counter:: ssh-replies action:: accept } term accept-snmp-requests { source-address:: INTERNAL destination-address:: INTERNAL destination-port:: SNMP protocol:: udp action:: accept } term accept-dns-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: DNS protocol:: udp option:: established counter:: dns-replies action:: accept } term allow-ntp-request { source-address:: NTP_SERVERS destination-address:: INTERNAL destination-port:: NTP protocol:: udp counter:: ntp-request action:: accept } term allow-ntp-replies { source-address:: INTERNAL destination-address:: NTP_SERVERS source-port:: NTP protocol:: udp option:: established counter:: ntp-replies action:: accept } term allow-radius-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: RADIUS protocol:: udp counter:: radius-replies action:: accept } term allow-tacacs-requests { source-address:: INTERNAL destination-address:: TACACS_SERVERS destination-port:: TACACS protocol:: tcp counter:: tacacs-requests action:: accept } term allow-tacacs-replies { source-address:: TACACS_SERVERS destination-address:: INTERNAL source-port:: TACACS protocol:: tcp option:: tcp-established counter:: tacacs-replies action:: accept } # feature conflict, will not be rendered term allow-dns-fragments { source-address:: ANY source-exclude:: PUBLIC_NAT destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp udp option:: is-fragment action:: accept } # will be rendered term allow-dns-foo-exc-test { source-address:: ANY source-exclude:: GOOGLE_DNS destination-port:: DNS protocol:: tcp udp action:: accept } term ratelimit-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 counter:: large-dns-counter action:: accept } term invalid-action-next { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 counter:: large-dns-counter action:: next } term reject-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 action:: reject } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp counter:: reject.imap.requests action:: reject-with-tcp-rst } term MIXED_INET { source-address:: GOOGLE_DNS destination-address:: INTERNAL protocol:: tcp udp action:: accept } term INET_MIXED { source-address:: INTERNAL destination-address:: GOOGLE_DNS protocol:: tcp udp action:: accept } term MIXED_INET6 { source-address:: GOOGLE_DNS destination-address:: SITELOCAL action:: accept } term INET6_MIXED { source-address:: SITELOCAL destination-address:: GOOGLE_DNS action:: accept } term MIXED_MIXED { source-address:: GOOGLE_DNS destination-address:: GOOGLE_DNS action:: accept } term MIXED_ANY { source-address:: GOOGLE_DNS action:: accept } term ANY_MIXED { destination-address:: GOOGLE_DNS action:: accept } term INET_INET { source-address:: NTP_SERVERS destination-address:: INTERNAL action:: accept } term INET6_INET6 { source-address:: SITELOCAL destination-address:: SITELOCAL action:: accept } term INET_INET6 { source-address:: INTERNAL destination-address:: SITELOCAL action:: accept } term INET6_INET { source-address:: SITELOCAL destination-address:: INTERNAL action:: accept } term default-discard { counter:: default-discard action:: deny } capirca-2.0.9/policies/pol/sample_cisco_lab.pol000066400000000000000000000023501437377527500215440ustar00rootroot00000000000000header { comment:: "Denies all traffic to internal IPs except established tcp replies." comment:: "Also denies access to certain public allocations." comment:: "Ideal for some internal lab/testing types of subnets that are" comment:: "not well trusted, but allowing internal users to access." comment:: "Apply to ingress interface (to filter traffic coming from lab)" target:: cisco allowtointernet } term accept-dhcp { comment:: "Optional - allow forwarding of DHCP requests." destination-port:: DHCP protocol:: udp action:: accept } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term accept-tcp-replies { comment:: "Allow tcp replies to internal hosts." destination-address:: INTERNAL protocol:: tcp option:: tcp-established action:: accept } term deny-to-internal { comment:: "Deny access to rfc1918/internal." destination-address:: INTERNAL action:: deny } term deny-to-specific_hosts { comment:: "Deny access to specified public." destination-address:: WEB_SERVERS MAIL_SERVERS action:: deny } term default-permit { comment:: "Allow what's left." action:: accept } capirca-2.0.9/policies/pol/sample_cisconx.pol000066400000000000000000000023521437377527500212760ustar00rootroot00000000000000header { comment:: "Denies all traffic to internal IPs except established tcp replies." comment:: "Also denies access to certain public allocations." comment:: "Ideal for some internal lab/testing types of subnets that are" comment:: "not well trusted, but allowing internal users to access." comment:: "Apply to ingress interface (to filter traffic coming from lab)" target:: cisconx allowtointernet } term accept-dhcp { comment:: "Optional - allow forwarding of DHCP requests." destination-port:: DHCP protocol:: udp action:: accept } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term accept-tcp-replies { comment:: "Allow tcp replies to internal hosts." destination-address:: INTERNAL protocol:: tcp option:: tcp-established action:: accept } term deny-to-internal { comment:: "Deny access to rfc1918/internal." destination-address:: INTERNAL action:: deny } term deny-to-specific_hosts { comment:: "Deny access to specified public." destination-address:: WEB_SERVERS MAIL_SERVERS action:: deny } term default-permit { comment:: "Allow what's left." action:: accept } capirca-2.0.9/policies/pol/sample_cloudarmor.pol000066400000000000000000000011541437377527500217760ustar00rootroot00000000000000# # This is an sample policy for capirca # Note: Address family 'mixed' includes both IPv4 and IPv6; use 'inet' for IPv4 # and 'inet6' for IPv6 # header { comment:: "This is a sample policy to generate a CloudArmor filter" target:: cloudarmor mixed } term allow-trusted-sources { comment:: "Allow access from company's trusted IP space" source-address:: PUBLIC_NAT action:: accept } term deny-bogon-sources { comment:: "Deny access to sources with bogon IP addresses" source-address:: BOGON action:: deny } term default-deny { comment:: "Default deny rule" source-address:: ANY action:: deny } capirca-2.0.9/policies/pol/sample_gce.pol000066400000000000000000000047571437377527500204010ustar00rootroot00000000000000# # This is an example policy for capirca # Target defaults to INGRESS is not specified in the header # header { comment:: "this is a sample policy to generate GCE filter" target:: gce global/networks/default inet } term test-ssh { comment:: "Allow SSH access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-web { comment:: "Allow HTTP/S to instances with webserver tag." source-address:: ANY protocol:: tcp destination-port:: HTTP destination-tag:: webserver action:: accept } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from company. This should not be rendered." source-address:: PUBLIC_NAT protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from company." source-address:: PUBLIC_NAT protocol:: igmp action:: accept } term test-multiple-protocols { comment:: "Allow TCP/UDP access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6. This should only render tcp." source-address:: PUBLIC_NAT protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-internal { comment:: "Allow all GCE network internal traffic." source-address:: RFC1918 protocol:: tcp udp action:: accept } term default-deny { action:: deny } # # Sample EGRESS policy # If source-tag is included, it maps to targetTags in the GCP Egress rule # header { comment:: "this is a sample policy to generate EGRESS GCE filter" target:: gce EGRESS global/networks/default inet } term test-egress-address { comment:: "Outbound to Mail Server" protocol:: tcp destination-port:: SMTP destination-address:: MAIL_SERVERS action:: accept } term test-egress-tag { comment:: "Outbound to RFC1918" protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_NAT source-tag:: webserver action:: accept } term test-egress-address-v6-only { comment:: "Outbound to IPv6 Server. This should not be rendered." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_gce_vpc_tf.pol000066400000000000000000000053231437377527500217300ustar00rootroot00000000000000# # This is an example policy for capirca # Target defaults to INGRESS is not specified in the header # header { comment:: "this is a sample policy to generate gce_vpc_tf config" target:: gce_vpc_tf global/networks/default inet } term test-ssh { comment:: "Allow SSH access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-web { comment:: "Allow HTTP/S to instances with webserver tag." source-address:: ANY protocol:: tcp destination-port:: HTTP destination-tag:: webserver action:: accept } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from company. This should not be rendered." source-address:: PUBLIC_NAT protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from company." source-address:: PUBLIC_NAT protocol:: igmp action:: accept } term test-multiple-protocols { comment:: "Allow TCP/UDP access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6. This should only render tcp." source-address:: PUBLIC_NAT protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-internal { comment:: "Allow all GCE network internal traffic." source-address:: RFC1918 protocol:: tcp udp action:: accept } term test-service-account { comment:: "Allow Service account access to SSH." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH target-service-accounts:: acct@blah.com action:: accept } term default-deny { action:: deny } # # Sample EGRESS policy # If source-tag is included, it maps to targetTags in the GCP Egress rule # header { comment:: "this is a sample policy to generate EGRESS GCE filter" target:: gce EGRESS global/networks/default inet } term test-egress-address { comment:: "Outbound to Mail Server" protocol:: tcp destination-port:: SMTP destination-address:: MAIL_SERVERS action:: accept } term test-egress-tag { comment:: "Outbound to RFC1918" protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_NAT source-tag:: webserver action:: accept } term test-egress-address-v6-only { comment:: "Outbound to IPv6 Server. This should not be rendered." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_gcp_hf.pol000066400000000000000000000041751437377527500210630ustar00rootroot00000000000000header { comment:: "This is sample policy for GCP HF" target:: gcp_hf testhf INGRESS inet beta 200 } term folder-ssh{ comment:: "Allow SSH into folder-test" source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: next } term folder-rdp { comment:: "Allow RDP to targetResources. Making long comment to show truncation." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: RDP target-resources:: (proj-1,vpc1) action:: next } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from company. This should not be rendered." source-address:: PUBLIC_NAT protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from company." source-address:: PUBLIC_NAT protocol:: igmp action:: accept } term test-multiple-protocols { comment:: "Allow TCP/UDP access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6. This should only render tcp." source-address:: PUBLIC_NAT protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_NAT protocol:: tcp icmp action:: accept } term default-deny { action:: deny } header { comment:: "This is sample policy for GCP HF" target:: gcp_hf testhf EGRESS inet 200 } term folder-smtp{ comment:: "Allow egress to mail servers" protocol:: tcp destination-port:: SMTP destination-address:: MAIL_SERVERS action:: next } term folder-ssh-nat{ comment:: "Allow egress ssh to RFC1918" protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_NAT target-resources:: (proj-2,vpc2) action:: next } term test-egress-address-v6-only { comment:: "Outbound to IPv6 Server. This should not be rendered." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_inet6_gce.pol000066400000000000000000000063441437377527500215000ustar00rootroot00000000000000# # This is an example inet6 (i.e IPv6) policy for capirca # Target defaults to INGRESS is not specified in the header # header { comment:: "this is a sample policy to generate GCE filter" target:: gce global/networks/default inet6 } term test-ssh-v6 { comment:: "Allow SSH access from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp destination-port:: SSH action:: accept } term test-ssh-v4 { comment:: "This term should not be rendered since it is IPv4 only." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-multiple-protocols { comment:: "Allow high port access from a public IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6 from IPv6 Server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp from IPv6 server. This should only render" comment:: "tcp." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmp action:: accept } term test-web { comment:: "Allow HTTP/S to instances with webserver tag and any IPs." source-tag:: webserver source-address:: ANY_V6 protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-web-tag-only { comment:: "Allow HTTP/S to instances with webserver tag only." source-tag:: webserver protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-web-tag-v4-only { comment:: "Allow HTTP/S to instances with webserver tag. This should not" comment:: "be rendered." source-address:: ANY source-tag:: webserver protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-icmp { comment:: "Allow ICMP from company. This should not be rendered." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from IPv6 server. This should not be rendered." source-address:: PUBLIC_IPV6_SERVERS protocol:: igmp action:: accept } term default-deny { action:: deny } # # Sample EGRESS policy # If source-tag is included, it maps to targetTags in the GCP Egress rule # header { comment:: "this is a sample policy to generate EGRESS GCE filter" target:: gce EGRESS global/networks/default inet6 } term test-egress-address { comment:: "Outbound to IPv6 Server." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term test-egress-tag { comment:: "Outbound to IPv6 Server." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS source-tag:: webserver action:: accept } term test-egress-tag-v4-only { comment:: "Outbound to RFC1918. This should not be rendered." protocol:: tcp destination-port:: SSH destination-address:: RFC1918 source-tag:: webserver action:: accept } term egress-default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_inet6_gcp_hf.pol000066400000000000000000000047371437377527500221740ustar00rootroot00000000000000header { comment:: "This is sample IPv6 policy for GCP HF" target:: gcp_hf testhf INGRESS inet6 ga 200 } term folder-ssh-v6 { comment:: "Allow SSH into folder-test" source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp destination-port:: SSH action:: next } term test-folder-ssh-v4 { comment:: "This term should not be rendered since it is IPv4 only." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: next } term folder-rdp { comment:: "Allow RDP to targetResources. Making long comment to show truncation." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp destination-port:: RDP target-resources:: (proj-1,vpc1) action:: next } term test-multiple-protocols { comment:: "Allow high port access from a public IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6 from IPv6 Server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp from IPv6 server. This should only render" comment:: "tcp." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmp action:: accept } term test-icmp { comment:: "Allow ICMP from company. This should not be rendered." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from IPv6 server. This should not be rendered." source-address:: PUBLIC_IPV6_SERVERS protocol:: igmp action:: accept } term default-deny { action:: deny } header { comment:: "This is sample IPv6 policy for GCP HF" target:: gcp_hf testhf EGRESS inet6 ga 200 } term folder-smtp{ comment:: "Outbound SMTP to IPv6 Server." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: next } term folder-ssh-nat{ comment:: "Outbound to IPv6 Server." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS target-resources:: (proj-2,vpc2) action:: next } term test-egress-address-v4-only { comment:: "Outbound to RFC1918. This should not be rendered." protocol:: tcp destination-port:: SSH destination-address:: RFC1918 action:: accept } term egress-default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_ipset.pol000066400000000000000000000004571437377527500207600ustar00rootroot00000000000000header { target:: ipset OUTPUT DROP } term deny-to-reserved { destination-address:: RESERVED action:: deny } term deny-to-bogons { destination-address:: RESERVED action:: deny } term allow-web-to-mail { source-address:: WEB_SERVERS destination-address:: MAIL_SERVERS action:: accept }capirca-2.0.9/policies/pol/sample_juniper_loopback.pol000066400000000000000000000100261437377527500231530ustar00rootroot00000000000000header { comment:: "Sample Juniper lookback filter" target:: juniper LOOPBACK } term accept-icmp { protocol:: icmp counter:: icmp-loopback policer:: rate-limit-icmp action:: accept } term accept-traceroute { comment:: "Allow inbound traceroute from any source." destination-port:: TRACEROUTE protocol:: udp counter:: inbound-traceroute policer:: rate-limit-to-router action:: accept expiration:: 2001-12-31 owner:: jeff } term accept-bgp-requests { comment:: "Allow BGP requests from peers." source-prefix:: configured-neighbors-only destination-port:: BGP protocol:: tcp counter:: bgp-requests action:: accept } term accept-bgp-replies { comment:: "Allow inbound replies to BGP requests." source-prefix:: configured-neighbors-only source-port:: BGP protocol:: tcp option:: tcp-established counter:: bgp-replies action:: accept } term accept-ospf { comment:: "Allow outbound OSPF traffic from other RFC1918 routers." source-address:: INTERNAL protocol:: ospf counter:: ospf action:: accept } term allow-vrrp { protocol:: vrrp counter:: vrrp action:: accept } term accept-ike { source-port:: IKE destination-port:: IKE protocol:: udp counter:: ipsec-ike action:: accept } term accept-ipsec { protocol:: esp counter:: ipsec-esp action:: accept } term accept-pim { source-address:: INTERNAL protocol:: pim action:: accept } term accept-igmp { source-address:: INTERNAL protocol:: igmp action:: accept } term accept-ssh-requests { source-address:: INTERNAL destination-port:: SSH protocol:: tcp counter:: ssh action:: accept } term accept-ssh-replies { source-port:: SSH protocol:: tcp option:: tcp-established counter:: ssh-replies action:: accept } term accept-snmp-requests { source-address:: INTERNAL destination-address:: INTERNAL destination-port:: SNMP protocol:: udp action:: accept } term accept-dns-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: DNS protocol:: udp option:: established counter:: dns-replies action:: accept } term allow-ntp-request { source-address:: NTP_SERVERS destination-address:: INTERNAL destination-port:: NTP protocol:: udp counter:: ntp-request action:: accept } term allow-ntp-replies { source-address:: INTERNAL destination-address:: NTP_SERVERS source-port:: NTP protocol:: udp option:: established counter:: ntp-replies action:: accept } term allow-radius-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: RADIUS protocol:: udp counter:: radius-replies action:: accept } term allow-tacacs-requests { source-address:: INTERNAL destination-address:: TACACS_SERVERS destination-port:: TACACS protocol:: tcp counter:: tacacs-requests action:: accept } term allow-tacacs-replies { source-address:: TACACS_SERVERS destination-address:: INTERNAL source-port:: TACACS protocol:: tcp option:: tcp-established counter:: tacacs-replies action:: accept } term allow-dns-fragments { source-address:: ANY source-exclude:: PUBLIC_NAT destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp udp option:: is-fragment action:: accept } term ratelimit-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 counter:: large-dns-counter policer:: large-dns-policer option:: sample action:: next } term reject-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 action:: reject } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp action:: reject-with-tcp-rst } term next-filter { filter-term:: my-next-filter } term af-mismatch { comment:: "Will not be generated as target is inet" comment:: "but address_family is inet6" destination-address:: INTERNAL restrict-address-family:: inet6 action:: reject } term discard-default { counter:: discard-default action:: deny } capirca-2.0.9/policies/pol/sample_juniperevo_loopback.pol000066400000000000000000000100631437377527500236660ustar00rootroot00000000000000header { comment:: "Sample Juniper EVO lookback filter" target:: juniperevo LOOPBACK inet ingress loopback } term accept-icmp { protocol:: icmp counter:: icmp-loopback policer:: rate-limit-icmp action:: accept } term accept-traceroute { comment:: "Allow inbound traceroute from any source." destination-port:: TRACEROUTE protocol:: udp counter:: inbound-traceroute policer:: rate-limit-to-router action:: accept expiration:: 2001-12-31 owner:: jeff } term accept-bgp-requests { comment:: "Allow BGP requests from peers." source-prefix:: configured-neighbors-only destination-port:: BGP protocol:: tcp counter:: bgp-requests action:: accept } term accept-bgp-replies { comment:: "Allow inbound replies to BGP requests." source-prefix:: configured-neighbors-only source-port:: BGP protocol:: tcp option:: tcp-established counter:: bgp-replies action:: accept } term accept-ospf { comment:: "Allow outbound OSPF traffic from other RFC1918 routers." source-address:: INTERNAL protocol:: ospf counter:: ospf action:: accept } term allow-vrrp { protocol:: vrrp counter:: vrrp action:: accept } term accept-ike { source-port:: IKE destination-port:: IKE protocol:: udp counter:: ipsec-ike action:: accept } term accept-ipsec { protocol:: esp counter:: ipsec-esp action:: accept } term accept-pim { source-address:: INTERNAL protocol:: pim action:: accept } term accept-igmp { source-address:: INTERNAL protocol:: igmp action:: accept } term accept-ssh-requests { source-address:: INTERNAL destination-port:: SSH protocol:: tcp counter:: ssh action:: accept } term accept-ssh-replies { source-port:: SSH protocol:: tcp option:: tcp-established counter:: ssh-replies action:: accept } term accept-snmp-requests { source-address:: INTERNAL destination-address:: INTERNAL destination-port:: SNMP protocol:: udp action:: accept } term accept-dns-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: DNS protocol:: udp option:: established counter:: dns-replies action:: accept } term allow-ntp-request { source-address:: NTP_SERVERS destination-address:: INTERNAL destination-port:: NTP protocol:: udp counter:: ntp-request action:: accept } term allow-ntp-replies { source-address:: INTERNAL destination-address:: NTP_SERVERS source-port:: NTP protocol:: udp option:: established counter:: ntp-replies action:: accept } term allow-radius-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: RADIUS protocol:: udp counter:: radius-replies action:: accept } term allow-tacacs-requests { source-address:: INTERNAL destination-address:: TACACS_SERVERS destination-port:: TACACS protocol:: tcp counter:: tacacs-requests action:: accept } term allow-tacacs-replies { source-address:: TACACS_SERVERS destination-address:: INTERNAL source-port:: TACACS protocol:: tcp option:: tcp-established counter:: tacacs-replies action:: accept } term allow-dns-fragments { source-address:: ANY source-exclude:: PUBLIC_NAT destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp udp option:: is-fragment action:: accept } term ratelimit-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 counter:: large-dns-counter policer:: large-dns-policer option:: sample action:: next } term reject-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp packet-length:: 500-5000 action:: reject } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp action:: reject-with-tcp-rst } term next-filter { filter-term:: my-next-filter } term af-mismatch { comment:: "Will not be generated as target is inet" comment:: "but address_family is inet6" destination-address:: INTERNAL restrict-address-family:: inet6 action:: reject } term discard-default { counter:: discard-default action:: deny } capirca-2.0.9/policies/pol/sample_k8s.pol000066400000000000000000000034431437377527500203370ustar00rootroot00000000000000# # This is an example policy for capirca # Target defaults to INGRESS is not specified in the header # header { comment:: "this is a sample policy to generate Kubernetes NetworkPolicy filter" target:: k8s } term test-ssh { comment:: "Allow SSH access to all pods from company." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-web { comment:: "Allow HTTP to pods" source-address:: ANY protocol:: tcp destination-port:: HTTP action:: accept } term test-multiple-protocols { comment:: "Allow TCP/UDP access to all pods from company." source-address:: PUBLIC_NAT protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-sctp { comment:: "Allow all tcp and sctp." source-address:: PUBLIC_NAT protocol:: tcp sctp action:: accept } term test-internal { comment:: "Allow all network internal traffic." source-address:: RFC1918 protocol:: tcp udp action:: accept } term default-deny { action:: deny } # # Sample EGRESS policy # If source-tag is included, it maps to targetTags in the Kubernetes NetworkPolicy Egress rule # header { comment:: "this is a sample policy to generate EGRESS Kubernetes NetworkPolicy filter" target:: k8s EGRESS } term test-egress-address { comment:: "Outbound to Mail Server" protocol:: tcp destination-port:: SMTP destination-address:: MAIL_SERVERS action:: accept } term test-egress-tag { comment:: "Outbound to RFC1918" protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_NAT action:: accept } term test-egress-address-v6-only { comment:: "Outbound to IPv6 Server" protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_mixed_gce.pol000066400000000000000000000075631437377527500215650ustar00rootroot00000000000000# # This is an example inet6 (i.e IPv6) policy for capirca # Target defaults to INGRESS is not specified in the header # header { comment:: "this is a sample policy to generate GCE filter" target:: gce global/networks/default mixed } term test-ssh-mixed { comment:: "Allow SSH access from Server and Company with mixed addresses." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-ssh-v6 { comment:: "Allow SSH access from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp destination-port:: SSH action:: accept } term test-ssh-v4 { comment:: "Allow SSH access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: accept } term test-multiple-protocols { comment:: "Allow high port access from a public IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6 from IPv6 Server." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-multiple-protocols-tcp-icmpv6-v6-only { comment:: "Allow all tcp and icmpv6." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp-v4-only { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-web { comment:: "Allow HTTP/S to instances with webserver tag and any IPs." source-tag:: webserver source-address:: ANY_MIXED protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-web-tag-only { comment:: "Allow HTTP/S to instances with webserver tag only." source-tag:: webserver protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-web-tag-v4-only { comment:: "Allow HTTP/S to instances with webserver tag." source-address:: ANY source-tag:: webserver protocol:: tcp destination-port:: HTTP destination-tag:: other-webserver action:: accept } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from server and company with mixed addresses." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: igmp action:: accept } term default-deny { action:: deny } # # Sample EGRESS policy # If source-tag is included, it maps to targetTags in the GCP Egress rule # header { comment:: "this is a sample policy to generate EGRESS GCE filter" target:: gce EGRESS global/networks/default mixed } term test-egress-address { comment:: "Outbound to Server with mixed addresses." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT action:: accept } term test-egress-tag { comment:: "Outbound to Server with tag." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT source-tag:: webserver action:: accept } term test-egress-tag-v4-only { comment:: "Outbound to RFC1918." protocol:: tcp destination-port:: SSH destination-address:: RFC1918 source-tag:: webserver action:: accept } term test-egress-tag-v6-only { comment:: "Outbound to IPv6 Server." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS source-tag:: webserver action:: accept } term egress-default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_mixed_gcp_hf.pol000066400000000000000000000064051437377527500222470ustar00rootroot00000000000000header { comment:: "This is sample IPv6 policy for GCP HF" target:: gcp_hf testhf INGRESS mixed ga 200 } term test-folder-ssh-mixed { comment:: "Allow SSH from Server and Company with mixed addresses." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: next } term test-folder-ssh-v6 { comment:: "Allow SSH access from IPv6 server." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp destination-port:: SSH action:: next } term test-folder-ssh-v4 { comment:: "Allow SSH access to all instances from company." source-address:: PUBLIC_NAT protocol:: tcp destination-port:: SSH action:: next } term folder-rdp { comment:: "Allow RDP to targetResources. Making long comment to show truncation." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp destination-port:: RDP target-resources:: (proj-1,vpc1) action:: next } term test-multiple-protocols { comment:: "Allow high port access from mixed address servers." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } term test-multiple-protocols-tcp-icmpv6 { comment:: "Allow all tcp and icmpv6 from mixed address servers." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp { comment:: "Allow all tcp and icmp from mixed address servers." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-multiple-protocols-tcp-icmpv6-v6-only { comment:: "Allow all tcp and icmpv6." source-address:: PUBLIC_IPV6_SERVERS protocol:: tcp icmpv6 action:: accept } term test-multiple-protocols-tcp-icmp-v4-only { comment:: "Allow all tcp and icmp." source-address:: PUBLIC_NAT protocol:: tcp icmp action:: accept } term test-icmp { comment:: "Allow ICMP from mixed server." source-address:: PUBLIC_NAT PUBLIC_IPV6_SERVERS protocol:: icmp action:: accept } term test-icmpv6 { comment:: "Allow ICMPv6 from mixed server." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: icmpv6 action:: accept } term test-igmp { comment:: "Allow IGMP from mixed server. This should rendered for IPv4 only." source-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT protocol:: igmp action:: accept } term default-deny { action:: deny } header { comment:: "This is sample IPv6 policy for GCP HF" target:: gcp_hf testhf EGRESS mixed ga 200 } term folder-smtp{ comment:: "Outbound SMTP to mixed address servers." protocol:: tcp destination-port:: SMTP destination-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT action:: next } term folder-ssh-nat{ comment:: "Outbound to mixed address servers." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS PUBLIC_NAT target-resources:: (proj-2,vpc2) action:: next } term test-egress-address-v4-only { comment:: "Outbound SSH to RFC1918." protocol:: tcp destination-port:: SSH destination-address:: RFC1918 action:: accept } term test-egress-address-v6-only { comment:: "Outbound SSH to public IPv6 server." protocol:: tcp destination-port:: SSH destination-address:: PUBLIC_IPV6_SERVERS action:: accept } term egress-default-deny { action:: deny } capirca-2.0.9/policies/pol/sample_msmpc.pol000066400000000000000000000066151437377527500207550ustar00rootroot00000000000000header { comment:: "this is a sample edge input filter that generates" comment:: "msmpc output formats." target:: msmpc edge-inbound mixed ingress } #include 'includes/untrusted-networks-blocking.inc' term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } term permit-web-services { destination-address:: WEB_SERVERS protocol:: tcp destination-port:: WEB_SERVICES action:: accept } term permit-tcp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp option:: tcp-established action:: accept } term permit-udp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: udp source-port:: HIGH_PORTS action:: accept } term default-deny { action:: deny logging:: true } header { comment:: "this is a sample inet edge input filter that generates" comment:: "msmpc output formats." target:: msmpc edge-inbound-ipv4 inet ingress } #include 'includes/untrusted-networks-blocking.inc' term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } term permit-web-services { destination-address:: WEB_SERVERS protocol:: tcp destination-port:: WEB_SERVICES action:: accept } term permit-tcp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp option:: tcp-established action:: accept } term permit-udp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: udp source-port:: HIGH_PORTS action:: accept } term default-deny { action:: deny logging:: true } header { comment:: "this is a sample inet6 edge input filter that generates" comment:: "msmpc output formats." target:: msmpc edge-inbound-ipv6 inet6 ingress } #include 'includes/untrusted-networks-blocking.inc' term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } term permit-web-services { destination-address:: WEB_SERVERS protocol:: tcp destination-port:: WEB_SERVICES action:: accept } term permit-tcp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp option:: tcp-established action:: accept } term permit-udp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: udp source-port:: HIGH_PORTS action:: accept } term default-deny { action:: deny logging:: true } header { comment:: "this is a sample msmpc output filter" target:: msmpc edge-outbound mixed egress } term deny-to-bad-destinations { destination-address:: RFC1918 BOGON RESERVED action:: deny } term default-accept { action:: accept } header { comment:: "this is a sample msmpc input-output filter" target:: msmpc edge-outbound mixed } term deny-to-bad-destinations { destination-address:: RFC1918 BOGON RESERVED action:: deny } term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } term permit-web-services { destination-address:: WEB_SERVERS protocol:: tcp destination-port:: WEB_SERVICES action:: accept } term permit-mail-outbound { source-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp udp action:: accept } term default-deny { action:: deny logging:: true } capirca-2.0.9/policies/pol/sample_multitarget.pol000066400000000000000000000035111437377527500221670ustar00rootroot00000000000000# # This is an example policy for capirca # header { comment:: "this is a sample edge input filter that generates" comment:: "multiple output formats." # NOTES: iptables produces filter 'lines' that must be used as args to the # '$ iptables' cmd, while Speedway produces stateful iptables filters # compatible with iptables-restore (most people will prefer speedway) target:: juniper edge-inbound inet target:: juniperevo edge-inbound inet6 ingress target:: msmpc edge-inbound mixed ingress target:: cisco edge-inbound mixed target:: speedway INPUT target:: ciscoasa asa_in target:: demo edge-inbound target:: arista edge-inbound target:: arista_tp edge-inbound target:: brocade edge-inbound target:: cisconx edge-inbound target:: ciscoxr edge-inbound } #include 'includes/untrusted-networks-blocking.inc' term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } term permit-web-services { destination-address:: WEB_SERVERS protocol:: tcp destination-port:: WEB_SERVICES action:: accept } term permit-tcp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp option:: tcp-established action:: accept } term permit-udp-established { destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: udp source-port:: HIGH_PORTS action:: accept } term default-deny { action:: deny } header { comment:: "this is a sample output filter" target:: juniper edge-outbound target:: msmpc edge-outbound mixed egress target:: arista_tp edge-outbound target:: cisco edge-outbound mixed target:: speedway OUTPUT target:: ciscoasa asa_out } term deny-to-bad-destinations { destination-address:: RFC1918 BOGON RESERVED action:: deny } term default-accept { action:: accept } capirca-2.0.9/policies/pol/sample_nftables-dev.pol000066400000000000000000000017501437377527500222030ustar00rootroot00000000000000# # NFTables generator policy example. # Intended to render ICMP terms for both IPv4 and IPv6 families. # header { comment:: "This policy validates handling of term.option tcp-established." comment:: "and UDP 'established'" target:: nftables inet6 INPUT } term accept-webserver-traffic { comment:: "Allow webserver inbound traffic." destination-address:: WEB_SERVERS destination-port:: WEB_SERVICES protocol:: tcp action:: accept } term test-tcp-established { comment:: "Allow tcp-established traffic." destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp udp action:: accept } term permit-tcp-replies { option:: tcp-established action:: accept } term test-dns-replies { comment:: "Allow DNS replies, and test udp established option." comment:: "This should not be generated since this is a stateful policy." source-port:: DNS destination-address:: INTERNAL protocol:: udp logging:: syslog counter:: dns-counter action:: accept } capirca-2.0.9/policies/pol/sample_nftables-mixed-icmp.pol000066400000000000000000000027311437377527500234610ustar00rootroot00000000000000# # NFTables generator policy example. # Intended to render ICMP terms for both IPv4 and IPv6 families. # header { comment:: "This policy expected to test every combination of REQUIRED keywords." target:: nftables mixed INPUT } term multi-all-families { comment:: "Validate multiple protocols." protocol:: udp tcp icmp icmpv6 action:: accept } term multi-proto-term { comment:: "Validate multiple protocols. IPv6 icmp should not render." protocol:: udp tcp icmp action:: accept } term test-tcp-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: udp tcp icmp action:: accept } term test-icmp { comment:: "Allow ICMP from source-address." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmp-type-ip4 { comment:: "IPv4 icmp-type test" icmp-type:: echo-request echo-reply protocol:: icmp action:: accept } term test-icmp-type-ip6 { comment:: "IPv6 icmp-type test" icmp-type:: multicast-listener-done router-solicit router-advertisement protocol:: icmpv6 action:: accept } term full-combo-term { comment:: "Test src/dest 80 - No addresses" source-address:: CLOUDFLARE_PUBLIC_DNS destination-address:: GOOGLE_PUBLIC_DNS_ANYCAST protocol:: tcp source-port:: HTTP destination-port:: HTTP action:: accept } term base-traceroute-in { comment:: "Ensure we can output a port-range." protocol:: udp source-port:: TRACEROUTE destination-port:: HIGH_PORTS action:: accept } capirca-2.0.9/policies/pol/sample_nftables-mixed-multiple-headers-combo.pol000066400000000000000000000033251437377527500270720ustar00rootroot00000000000000# Test rendering for multiple nftables policies. header { comment:: "Noverbose + custom priority policy example" target:: nftables mixed INPUT 300 } term multi-all-families { comment:: "Validate multiple protocols." protocol:: udp tcp icmp icmpv6 action:: accept } term multi-proto-term { comment:: "Validate multiple protocols. IPv6 icmp should not render." protocol:: udp tcp icmp action:: accept } header { comment:: "Noverbose + custom priority policy example" target:: newnftables mixed INPUT 400 } term test-tcp-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: udp tcp icmp action:: accept } header { comment:: "Noverbose + custom priority policy example" target:: newnftables mixed OUTPUT } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmp-type-ip4 { comment:: "IPv4 icmp-type test" icmp-type:: echo-request echo-reply protocol:: icmp action:: accept } term test-icmp-type-ip6 { comment:: "IPv6 icmp-type test" icmp-type:: multicast-listener-done router-solicit router-advertisement protocol:: icmpv6 action:: accept } term full-combo-term { comment:: "Test src/dest 80 - with addresses" source-address:: CLOUDFLARE_PUBLIC_DNS destination-address:: GOOGLE_PUBLIC_DNS_ANYCAST protocol:: tcp source-port:: HTTP destination-port:: HTTP action:: accept } term multi-all-families { comment:: "Validate multiple protocols." protocol:: udp tcp icmp icmpv6 action:: accept } term base-traceroute-in { comment:: "Ensure we can output a port-range." protocol:: udp source-port:: TRACEROUTE destination-port:: HIGH_PORTS action:: accept } capirca-2.0.9/policies/pol/sample_nftables.pol000066400000000000000000000072551437377527500214350ustar00rootroot00000000000000# # This is a WIP example policy for capirca/nftables # Policy terms will be added as code evolves to handle them. # header { comment:: "Noverbose + custom priority policy example" target:: nftables mixed OUTPUT 300 noverbose } term default-accept { comment:: "non-protocol specific allow anything test" action:: accept } header { comment:: "Inbound traffic nftables policy example" target:: newnftables inet INPUT } term allow-anything { action:: accept } header { comment:: "2 Inbound traffic nftables policy example" target:: newnftables inet INPUT ACCEPT } term allow-anything { action:: accept } header { comment:: "Outbound dual-stack traffic nftables policy example" target:: newnftables mixed OUTPUT } term default-deny { action:: deny } term full-tuple-term { source-address:: WEB_SERVERS source-port:: DNS destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp action:: accept } term ssh-deny { protocol:: tcp destination-port:: SSH destination-port:: DNS action:: deny } term source-address-term { source-address:: RFC1918 BOGON RESERVED action:: deny } header { comment:: "Outbound IPv6 traffic nftables policy example" target:: newnftables inet6 OUTPUT } term default-deny { action:: deny } header { comment:: "Priority outbound IPv6" target:: newnftables inet6 OUTPUT 100 } term awesome-term { comment:: "Awesomeness." action:: accept } term multiline-comment-term { comment:: "First line of comment." comment:: "Second line of defense." comment:: "Third base." action:: accept } term awesome-term3 { comment:: "Awesomeness." action:: accept } header { comment:: "This policy expected to test every combination of REQUIRED keywords." target:: newnftables inet INPUT } term test-icmp { comment:: "Allow ICMP from company." source-address:: PUBLIC_NAT protocol:: icmp action:: accept } term test-icmp-type-ip4 { comment:: "IPv4 icmp-type test" icmp-type:: echo-request echo-reply protocol:: icmp action:: accept } term test-icmp-type-ip6 { comment:: "IPv6 icmp-type test" icmp-type:: multicast-listener-done router-solicit router-advertisement protocol:: icmpv6 action:: accept } term test-protocol-udp { comment:: "All UDP traffic for both IPv4 and IPv6." protocol:: udp action:: accept } term test-protocol-tcp { comment:: "All UDP traffic for both IPv4 and IPv6." protocol:: tcp action:: accept } term test-conntrack-established { comment:: "only permit established connections" comment:: "implements tcp-established flag if protocol is tcp only" comment:: "otherwise adds 1024-65535 to required destination-ports" destination-address:: INTERNAL protocol:: udp option:: established action:: accept } term test-conntrack-tcp-replies { comment:: "only permit established tcp connections, usually checked based on TCP flag settings." comment:: "If protocol UDP is included in term" comment:: "only adds 1024-65535 to required destination-ports." destination-address:: INTERNAL protocol:: tcp option:: tcp-established action:: accept } term test-port-snmp { comment:: "Test SNMP port 161 UDP - No addresses" protocol:: udp destination-port:: SNMP action:: accept } term test-src-rdp { comment:: "Test source port RDP 3389/tcp - No addresses" protocol:: tcp source-port:: RDP logging:: syslog counter:: somecountername action:: accept } term test-combined-port { comment:: "Test src/dest 80 - No addresses" protocol:: tcp source-port:: HTTP destination-port:: HTTP action:: accept } term high-ports { comment:: "Test tcp + udp high ports - No addresses" protocol:: tcp udp destination-port:: HIGH_PORTS action:: accept } capirca-2.0.9/policies/pol/sample_nsxv.pol000066400000000000000000000055721437377527500206350ustar00rootroot00000000000000header { comment:: "Sample NSXV filter" target:: nsxv sample_nsxv_filter mixed 1234 securitygroup securitygroupId } term accept-icmp { protocol:: icmp action:: accept } term accept-traceroute { comment:: "Allow inbound traceroute from any source." destination-port:: TRACEROUTE protocol:: udp action:: accept expiration:: 2001-12-31 owner:: jeff } term accept-bgp-requests { comment:: "Allow BGP requests from peers." destination-port:: BGP protocol:: tcp action:: accept } term accept-bgp-replies { comment:: "Allow inbound replies to BGP requests." source-port:: BGP protocol:: tcp action:: accept } term accept-ospf { comment:: "Allow outbound OSPF traffic from other RFC1918 routers." source-address:: INTERNAL protocol:: ospf action:: accept } term allow-vrrp { protocol:: vrrp action:: accept } term accept-ike { source-port:: IKE destination-port:: IKE protocol:: udp action:: accept } term accept-ipsec { protocol:: esp action:: accept } term accept-pim { source-address:: INTERNAL protocol:: pim action:: accept } term accept-igmp { source-address:: INTERNAL protocol:: igmp action:: accept } term accept-ssh-requests { source-address:: INTERNAL destination-port:: SSH protocol:: tcp action:: accept } term accept-ssh-replies { source-port:: SSH protocol:: tcp action:: accept } term accept-snmp-requests { source-address:: INTERNAL destination-address:: INTERNAL destination-port:: SNMP protocol:: udp action:: accept } term accept-dns-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: DNS protocol:: udp action:: accept } term allow-ntp-request { source-address:: NTP_SERVERS destination-address:: INTERNAL destination-port:: NTP protocol:: udp action:: accept } term allow-ntp-replies { source-address:: INTERNAL destination-address:: NTP_SERVERS source-port:: NTP protocol:: udp action:: accept } term allow-radius-replies { source-address:: INTERNAL destination-address:: INTERNAL source-port:: RADIUS protocol:: udp action:: accept } term allow-tacacs-requests { source-address:: INTERNAL destination-address:: TACACS_SERVERS destination-port:: TACACS protocol:: tcp action:: accept } term allow-tacacs-replies { source-address:: TACACS_SERVERS destination-address:: INTERNAL source-port:: TACACS protocol:: tcp action:: accept } term allow-dns-fragments { source-address:: ANY source-exclude:: PUBLIC_NAT destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp udp action:: accept } term reject-large-dns { destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: reject } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp action:: reject-with-tcp-rst } term discard-default { action:: deny } capirca-2.0.9/policies/pol/sample_openconfig.pol000066400000000000000000000020231437377527500217520ustar00rootroot00000000000000header { comment:: "Accepts DHCP, DNS, HTTPS, SMTP." comment:: "Denies access to internal addressing." target:: openconfig inet } term accept-dhcp { comment:: "Optional - allow DHCP requests." destination-port:: DHCP protocol:: udp action:: accept } term accept-from-googledns { comment:: "Allow name resolution responses from Google DNS." source-address:: GOOGLE_DNS destination-address:: INTERNAL source-port:: DNS protocol:: tcp udp action:: accept } term sport-dport { comment:: "DENY SMTP." destination-address:: INTERNAL source-address:: BOGON source-port:: SMTP destination-port:: SMTP protocol:: tcp action:: deny } term deny-saddr { comment:: "Deny everything from bogons." source-address:: BOGON action:: deny } term ssh-to-internal { comment:: "Allow SSH to Internal from Internal." source-address:: INTERNAL destination-address:: INTERNAL destination-port:: SSH protocol:: tcp action:: accept } term default-deny { comment:: "Deny what's left." action:: deny } capirca-2.0.9/policies/pol/sample_packetfilter.pol000066400000000000000000000016571437377527500223140ustar00rootroot00000000000000header { comment:: "Denies all traffic to internal IPs except established tcp replies." target:: packetfilter allowtointernet } term accept-dhcp { comment:: "Optional - allow forwarding of DHCP requests." destination-port:: DHCP protocol:: udp action:: accept } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term deny-to-internal { comment:: "Deny access to rfc1918/internal." destination-address:: INTERNAL logging:: true action:: reject } term test-icmp { destination-address:: RFC1918 protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } term deny-to-specific_hosts { comment:: "Deny access to specified public." destination-address:: WEB_SERVERS MAIL_SERVERS action:: deny } term default-permit { comment:: "Allow what's left." action:: accept } capirca-2.0.9/policies/pol/sample_paloalto.pol000066400000000000000000000022061437377527500214410ustar00rootroot00000000000000################ ### RULEBASE ### ################ # NOTE: makes sure zone names correspond to those specified in your firewall setup header { target:: paloalto from-zone internal to-zone external } term ping-gdns { source-address:: INTERNAL destination-address:: GOOGLE_DNS protocol:: icmp action:: accept } term dns-gdns { source-address:: INTERNAL destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: tcp action:: accept } term allow-web-outbound { source-address:: INTERNAL destination-port:: WEB_SERVICES protocol:: tcp action:: accept } header { target:: paloalto from-zone external to-zone internal } term allow-icmp { protocol:: icmp action:: accept } # pan-application only: service application-default term allow-pan-app-01 { pan-application:: web-browsing action:: accept } # pan-application + tcp: service any-tcp term allow-pan-app-02 { pan-application:: web-browsing protocol:: tcp action:: accept } # pan-application + ports: service custom service objects term allow-pan-app-03 { pan-application:: ssl protocol:: tcp destination-port:: HTTPS IMAPS action:: accept } capirca-2.0.9/policies/pol/sample_sonic.pol000066400000000000000000000026771437377527500207550ustar00rootroot00000000000000header { comment:: "Denies all traffic to internal IPs except established tcp replies." comment:: "Also denies access to certain public allocations." comment:: "Ideal for some internal lab/testing types of subnets that are" comment:: "not well trusted, but allowing internal users to access." comment:: "Apply to ingress interface (to filter traffic coming from lab)" comment:: "MirACL version: KJ3RvLwBOkAvcGxhY2VyL3Byb2QvaG9tZS9zZWNvcHMtbmV0LWFjbC1wbGFjZXIvc25hcHNob3RzL3BhcnRpdGlvbj1zc2QvQgYIhpbdiQZKBgitkN2JBlIGCP2P3YkGWgYIgI_diQZqBgiTk92JBnIGCJKU3YkGegYIgpXdiQaCAQYI3ZHdiQY=" target:: sonic allowtointernet } term accept-dhcp { comment:: "Optional - allow forwarding of DHCP requests." destination-port:: DHCP protocol:: udp action:: accept } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term accept-tcp-replies { comment:: "Allow tcp replies to internal hosts." destination-address:: INTERNAL protocol:: tcp option:: tcp-established action:: accept } term deny-to-internal { comment:: "Deny access to rfc1918/internal." destination-address:: INTERNAL action:: deny } term deny-to-specific_hosts { comment:: "Deny access to specified public." destination-address:: WEB_SERVERS MAIL_SERVERS action:: deny } term default-permit { comment:: "Allow what's left." action:: accept } capirca-2.0.9/policies/pol/sample_speedway.pol000066400000000000000000000027071437377527500214550ustar00rootroot00000000000000header { comment:: "Sample policy for Speedway Iptables." comment:: "Speedway generates iptables output suitable for loading" comment:: "using the iptables-restore command" target:: speedway INPUT DROP } term base-allow-est-in { option:: established action:: accept } term base-allow-icmp-in { protocol:: icmp icmp-type:: echo-request action:: accept } term base-traceroute-in { protocol:: udp source-port:: TRACEROUTE destination-port:: HIGH_PORTS action:: accept } term base-allow-ssh-in { source-address:: INTERNAL protocol:: tcp destination-port:: SSH action:: accept } header { comment:: "Sample output filter policy for Speedway Iptables." target:: speedway OUTPUT DROP } term base-allow-lo0-out { comment:: "Allow all loopback communications" verbatim:: speedway "-A OUTPUT -o lo -j ACCEPT" } term base-allow-est-out { option:: established action:: accept } term base-allow-dns-query-out { protocol:: udp destination-port:: DNS action:: accept } term base-allow-icmp-out { protocol:: icmp action:: accept } term base-traceroute-out { protocol:: udp destination-port:: TRACEROUTE source-port:: HIGH_PORTS action:: accept } term base-allow-ssh-out { destination-address:: INTERNAL protocol:: tcp destination-port:: SSH action:: accept } header { comment:: "Sample forwarding filter policy for Speedway Iptables." target:: speedway FORWARD DROP } term base-forwarding-deny { action:: reject } capirca-2.0.9/policies/pol/sample_srx.pol000066400000000000000000000010411437377527500204360ustar00rootroot00000000000000header { comment:: "this is a sample policy to generate Juniper SRX filter" comment:: "from zone Untrust to zone DMZ." target:: srx from-zone Untrust to-zone DMZ } term test-tcp { destination-address:: RFC1918 protocol:: tcp udp logging:: log-both action:: accept } term test-icmp { destination-address:: RFC1918 protocol:: icmp icmp-type:: echo-request echo-reply action:: accept logging:: true } term good-term-6 { counter:: good-counter action:: accept } term default-deny { action:: deny logging:: true } capirca-2.0.9/policies/pol/sample_stateful_multitarget_complex.pol000066400000000000000000000033551437377527500256330ustar00rootroot00000000000000################ ### RULEBASE ### ################ # NOTE: makes sure zone names correspond to those specified in your firewall setup header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks tcp and udp rules are correctly" comment:: "generated for a large number of IPv4 IPs." target:: paloalto from-zone internal to-zone external target:: srx from-zone internal to-zone external } term test-tcp-udp-many-ipv4 { comment:: "Testing large IPv4 IPs to test address books." source-address:: MANY_IPV4 destination-address:: MANY_IPV4 protocol:: tcp udp action:: accept } header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks tcp and udp rules are correctly" comment:: "generated for a large number of IPv6 IPs." target:: paloalto from-zone internal to-zone external inet6 target:: srx from-zone internal to-zone external inet6 } term test-tcp-udp-many-ipv6 { comment:: "Testing large IPv6 IPs to test address books." source-address:: MANY_IPV6 destination-address:: MANY_IPV6 protocol:: tcp udp action:: accept } header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks tcp and udp rules are correctly" comment:: "generated for a large number of mixed IPs." target:: paloalto from-zone untrust to-zone external mixed target:: srx from-zone internal to-zone external mixed } term test-tcp-udp-many-mixed { comment:: "Testing mixed IPv4 and IPv6 IPs to test address books." source-address:: MANY_IPV4 MANY_IPV6 destination-address:: MANY_IPV4 MANY_IPV6 protocol:: tcp udp action:: accept } capirca-2.0.9/policies/pol/sample_stateful_multitarget_simple.pol000066400000000000000000000146631437377527500254610ustar00rootroot00000000000000################ ### RULEBASE ### ################ # NOTE: makes sure zone names correspond to those specified in your firewall setup header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks logging options, tcp, udp" comment:: "and icmp type options." target:: paloalto from-zone internal to-zone external target:: srx from-zone internal to-zone external } term test-tcp-log-both { comment:: "Testing log-both for tcp." protocol:: tcp logging:: log-both action:: accept } term test-udp-log { comment:: "Testing logging for udp." protocol:: udp logging:: true action:: accept } term test-tcp-udp-any { comment:: "Testing any port for tcp, udp," comment:: "`when destination-port is unspecified." protocol:: tcp udp action:: accept } term accept-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term accept-icmp-specific-types { comment:: "Allow ICMPv4, with icmp types, and test counters." source-address:: INTERNAL protocol:: icmp icmp-type:: echo-request echo-reply counter:: icmp-ping action:: accept } term accept-igmp { comment:: "Allow IGMP, and test syslog logging." source-address:: INTERNAL protocol:: igmp action:: accept logging:: syslog } term accept-traceroute-source-ports { comment:: "Allow traceroute with source port range for udp." protocol:: udp source-port:: TRACEROUTE action:: accept } term deny-to-bad-destinations { comment:: "Deny to bad destinations." destination-address:: RFC1918 BOGON RESERVED action:: deny } header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks tcp-established and udp " comment:: "established options." target:: paloalto from-zone external to-zone internal target:: srx from-zone external to-zone internal } term accept-webserver-traffic { comment:: "Allow webserver inbound traffic." destination-address:: WEB_SERVERS destination-port:: WEB_SERVICES protocol:: tcp action:: accept } term test-tcp-established { comment:: "Allow tcp-established traffic." comment:: "This should not be generated since this is a stateful policy." destination-address:: MAIL_SERVERS WEB_SERVERS PUBLIC_NAT protocol:: tcp option:: tcp-established action:: accept } term test-dns-replies { comment:: "Allow DNS replies, and test udp established option." comment:: "This should not be generated since this is a stateful policy." source-port:: DNS destination-address:: INTERNAL protocol:: udp option:: established counter:: dns-replies action:: accept } header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks inet6 options and icmpv6." target:: paloalto from-zone internal to-zone external inet6 target:: srx from-zone internal to-zone external inet6 } term ipv6-accept-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } term accept-icmpv6-specific-types { comment:: "Allow ICMPv6, with icmpv6 types." protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } term ipv6-deny-to-bad-destinations { comment:: "Deny to bad destinations." destination-address:: RFC1918 BOGON RESERVED action:: deny } header { comment:: "this is a sample policy for a zone based filter that generates" comment:: "multiple output formats. It checks inet6 options." target:: paloalto from-zone external to-zone internal inet6 target:: srx from-zone external to-zone internal inet6 } term ipv6-accept-webserver-traffic { comment:: "Allow webserver inbound traffic." destination-address:: WEB_IPV6_SERVERS destination-port:: WEB_SERVICES protocol:: tcp action:: accept } term test-tcp-established { comment:: "Allow tcp-established traffic." comment:: "This should not be generated since this is a stateful policy." destination-address:: WEB_IPV6_SERVERS protocol:: tcp option:: tcp-established action:: accept } term test-dns-replies { comment:: "Allow DNS replies, and test udp established option." comment:: "This should not be generated since this is a stateful policy." source-port:: DNS destination-address:: INTERNAL protocol:: udp option:: established counter:: dns-replies action:: accept } header { comment:: "this is a test policy for a zone based filter that generates" comment:: "multiple output formats. It checks inet6 options for IPv4 rules." target:: paloalto from-zone unknown to-zone internal inet6 target:: srx from-zone unknown to-zone internal inet6 } term test-inet6-webserver-traffic { comment:: "Allow webserver inbound traffic." comment:: "This should not be generated since this only has IPv4 addresses." source-address:: RFC1918 destination-address:: WEB_SERVERS protocol:: tcp action:: accept } term test-inet6-tcp-icmp-traffic { comment:: "Allow all tcp and icmp traffic." comment:: "The icmp term should not be generated since this is inet6." comment:: "The tcp term should be generated." protocol:: tcp icmp action:: accept } term test-inet6-tcp-icmpv6-traffic { comment:: "Allow all tcp and icmpv6 traffic." comment:: "This should be generated since this has no ports specified." protocol:: tcp icmpv6 action:: accept } header { comment:: "this is a test policy for a zone based filter that generates" comment:: "multiple output formats. It checks inet options for IPv6 rules." target:: paloalto from-zone unknown to-zone internal inet target:: srx from-zone unknown to-zone internal inet } term test-inet-webserver-traffic { comment:: "Allow webserver inbound IPv6 traffic." comment:: "This should not be generated since this only has IPv6 addresses." source-address:: PUBLIC_IPV6_SERVERS destination-address:: WEB_IPV6_SERVERS protocol:: tcp action:: accept } term test-inet-tcp-icmp-traffic { comment:: "Allow all tcp and icmp traffic." comment:: "This should be generated since this has no ports specified." protocol:: tcp icmp action:: accept } term test-inet-tcp-icmpv6-traffic { comment:: "Allow all tcp and icmpv6 traffic." comment:: "The icmpv6 term should not be generated since this is inet." comment:: "The tcp term should be generated." protocol:: icmpv6 tcp action:: accept } capirca-2.0.9/requirements.txt000066400000000000000000000001361437377527500164340ustar00rootroot00000000000000# Dependencies # Usage: # $ pip install -r requirements.txt absl-py ply PyYAML six>=1.12.0 capirca-2.0.9/setup.cfg000066400000000000000000000006541437377527500147760ustar00rootroot00000000000000[flake8] exclude = .git, .github, __pycache__, dist, build, debian, *.egg, *.egg-info, *.venv, *.archive, def, policies, doc max-line-length = 100 max-complexity = 10 filename = *.py ignore = E111, E114, E121, E731, C901, F821, W504, E501, [tool:pytest] markers = unit: Marks a unit test sanity: Marks a sanity test testpaths = tests capirca-2.0.9/setup.py000066400000000000000000000036061437377527500146670ustar00rootroot00000000000000# # Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """capirca installation module.""" from os import path import setuptools root_dir = path.abspath(path.dirname(__file__)) with open(path.join(root_dir, 'VERSION'), encoding='utf-8') as f: version = f.readline().strip() with open(path.join(root_dir, 'README.md'), encoding='utf-8') as f: long_description = f.read() setuptools.setup( name='capirca', version=version, description='Capirca', long_description=long_description, long_description_content_type='text/markdown', license='Apache License, Version 2.0', url='https://github.com/google/capirca/', maintainer='Capirca Team', maintainer_email='capirca-dev@google.com', packages=['capirca', 'capirca.lib', 'capirca.utils'], zip_safe=False, entry_points={ 'console_scripts': ['aclgen = capirca.aclgen:EntryPoint'], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Security', 'Topic :: System :: Networking :: Firewalls', ], install_requires=[ 'absl-py', 'ply', 'mock', 'six', 'PyYAML', ], python_requires='>=3.6', ) capirca-2.0.9/test-requirements.txt000066400000000000000000000002561437377527500174140ustar00rootroot00000000000000attrs==19.3.0 importlib-metadata==1.6.1 mock==4.0.2 more-itertools==8.3.0 packaging==20.4 pluggy==0.13.1 py==1.10.0 pyparsing==2.4.7 pytest==5.4.3 wcwidth==0.2.3 zipp==3.1.0 capirca-2.0.9/tests/000077500000000000000000000000001437377527500143125ustar00rootroot00000000000000capirca-2.0.9/tests/README000066400000000000000000000002301437377527500151650ustar00rootroot00000000000000To run all tests from trunk: python -m unittest discover -s . -p '*_test.py' Specific tests: python -m unittest discover -s . -p 'junipersrx_test.py' capirca-2.0.9/tests/__init__.py000066400000000000000000000000001437377527500164110ustar00rootroot00000000000000capirca-2.0.9/tests/integration/000077500000000000000000000000001437377527500166355ustar00rootroot00000000000000capirca-2.0.9/tests/integration/__init__.py000066400000000000000000000000001437377527500207340ustar00rootroot00000000000000capirca-2.0.9/tests/integration/aclgen_test.py000066400000000000000000000104101437377527500214730ustar00rootroot00000000000000# Copyright 2015 The Capirca Project Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing import os import pathlib import shutil import sys import tempfile from unittest import mock from absl import app from absl import flags from absl.testing import absltest from capirca import aclgen FLAGS = flags.FLAGS aclgen.SetupFlags() # Ensure flags are set up only once # Pass only the program name into absl so it uses the default flags FLAGS(sys.argv[0:1]) class TestAclGenDemo(absltest.TestCase): """Ensure Capirca demo runs successfully out-of-the-box.""" def setUp(self): super().setUp() self.test_subdirectory = tempfile.mkdtemp() self.def_dir = os.path.join(self.test_subdirectory, 'def') self.pol_dir = os.path.join(self.test_subdirectory, 'policies') shutil.rmtree(self.test_subdirectory, ignore_errors=True) os.mkdir(self.test_subdirectory) shutil.copytree('def', self.def_dir) shutil.copytree('policies', self.pol_dir) self.context = multiprocessing.get_context() self.max_renderers = 10 self.exp_info = 2 self.ignore_directories = ['DEPRECATED', 'def'] @mock.patch.object(aclgen, '_WriteFile', autospec=True) def test_smoke_test_generates_successfully(self, mock_writer): aclgen.Run( self.pol_dir, self.def_dir, None, self.test_subdirectory, self.exp_info, self.max_renderers, self.ignore_directories, None, None, self.context, ) files = [ 'sample_cisco_lab.acl', 'sample_cloudarmor.gca', 'sample_gce.gce', 'sample_ipset.ips', 'sample_juniper_loopback.jcl', 'sample_juniperevo_loopback.evojcl', 'sample_multitarget.acl', 'sample_multitarget.asa', 'sample_multitarget.bacl', 'sample_multitarget.eacl', 'sample_multitarget.ipt', 'sample_multitarget.jcl', 'sample_multitarget.evojcl', 'sample_multitarget.msmpc', 'sample_multitarget.xacl', 'sample_multitarget.nxacl', 'sample_nsxv.nsx', 'sample_packetfilter.pf', 'sample_speedway.ipt', 'sample_srx.srx', 'sample_paloalto.xml', 'sample_nftables-mixed-icmp.nft', 'sample_nftables-mixed-multiple-headers-combo.nft', 'sample_nftables.nft', 'sample_nftables-dev.nft', 'sample_stateful_multitarget_simple.xml', 'sample_stateful_multitarget_simple.srx', 'sample_stateful_multitarget_complex.xml', 'sample_stateful_multitarget_complex.srx', 'sample_k8s.yml', ] expected = [ mock.call(pathlib.Path(self.test_subdirectory, f), mock.ANY) for f in files ] mock_writer.assert_has_calls(expected, any_order=True) @mock.patch.object(aclgen, '_WriteFile', autospec=True) def test_generate_single_policy(self, mock_writer): policy_file = os.path.join(self.test_subdirectory, 'policies/pol/sample_cisco_lab.pol') aclgen.Run( self.pol_dir, self.def_dir, policy_file, self.test_subdirectory, self.exp_info, self.max_renderers, self.ignore_directories, None, None, self.context, ) mock_writer.assert_called_with( pathlib.Path(self.test_subdirectory, 'sample_cisco_lab.acl'), mock.ANY) # Test to ensure existence of the entry point function for installed script. @mock.patch.object(aclgen, 'SetupFlags', autospec=True) @mock.patch.object(app, 'run', autospec=True) def test_entry_point(self, mock_run, mock_flags): aclgen.EntryPoint() mock_flags.assert_called_with() mock_run.assert_called_with(aclgen.main) def main(unused_argv): absltest.main() if __name__ == '__main__': app.run(main) capirca-2.0.9/tests/lib/000077500000000000000000000000001437377527500150605ustar00rootroot00000000000000capirca-2.0.9/tests/lib/__init__.py000066400000000000000000000000001437377527500171570ustar00rootroot00000000000000capirca-2.0.9/tests/lib/aclcheck_test.py000066400000000000000000000104121437377527500202240ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for AclCheck.""" from absl.testing import absltest from capirca.lib import aclcheck from capirca.lib import naming from capirca.lib import policy from capirca.lib import port POLICYTEXT = """ header { comment:: "this is a test acl" target:: juniper test-filter } term term-1 { protocol:: tcp action:: next } term term-2 { source-address:: NET172 destination-address:: NET10 protocol:: tcp destination-port:: SSH option:: first-fragment tcp-established fragment-offset:: 1-6 packet-length:: 1-119 action:: accept } term term-3 { source-address:: NET172 destination-address:: NET10 protocol:: tcp destination-port:: SSH action:: accept } term term-4 { protocol:: udp action:: accept } term term-5 { action:: reject } """ class AclCheckTest(absltest.TestCase): def setUp(self): super().setUp() self.defs = naming.Naming(None) servicedata = [] servicedata.append('SSH = 22/tcp') networkdata = [] networkdata.append('NET172 = 172.16.0.0/12') networkdata.append('NET10 = 10.0.0.0/8') self.defs.ParseServiceList(servicedata) self.defs.ParseNetworkList(networkdata) self.pol = policy.ParsePolicy(POLICYTEXT, self.defs) def testExactMatches(self): check = aclcheck.AclCheck(self.pol, '172.16.1.1', '10.1.1.1', '1025', '22', 'tcp') matches = check.ExactMatches() self.assertEqual(len(matches), 1) def testAclCheck(self): srcip = '172.16.1.1' dstip = '10.2.2.10' sport = '10000' dport = '22' proto = 'tcp' check = aclcheck.AclCheck(self.pol, src=srcip, dst=dstip, sport=sport, dport=dport, proto=proto) matches = check.Matches() # Check correct number of matches self.assertEqual(len(matches), 3) # Check correct actions self.assertEqual(matches[0].action, 'next') # term-1 self.assertEqual(matches[1].action, 'accept') # term-2 self.assertEqual(matches[2].action, 'accept') # term-3 # Check for correct 'possibles' self.assertEqual(matches[0].possibles, []) # term-1 self.assertEqual(matches[1].possibles, ['first-frag', 'frag-offset', 'packet-length', 'tcp-est'] ) # term-2 self.assertEqual(matches[2].possibles, []) # term-3 # Check which term names match self.assertEqual(matches[0].term, 'term-1') self.assertEqual(matches[1].term, 'term-2') self.assertEqual(matches[2].term, 'term-3') # term-4 should never match self.assertNotIn('term-4', str(matches)) self.assertNotIn('term-5', str(matches)) def testExceptions(self): srcip = '172.16.1.1' dstip = '10.2.2.10' sport = '10000' dport = '22' proto = 'tcp' bad_portrange = '99999' bad_portvalue = 'port_99' self.assertRaises(port.BadPortValue, aclcheck.AclCheck, self.pol, srcip, dstip, bad_portvalue, dport, proto, ) self.assertRaises(port.BadPortRange, aclcheck.AclCheck, self.pol, srcip, dstip, sport, bad_portrange, proto, ) self.assertRaises(aclcheck.AddressError, aclcheck.AclCheck, self.pol, '300.400.500.600', dstip, sport, dport, proto, ) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/aclgenerator_test.py000066400000000000000000000204101437377527500211340ustar00rootroot00000000000000# Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for ACL rendering module.""" from absl.testing import absltest from absl.testing import parameterized from unittest import mock from capirca.lib import aclgenerator from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: mock } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } """ STATEFUL_ONLY_TERM = """ term stateful-only { option:: established action:: accept } """ ICMPV6_TERM = """ term icmpv6-term { protocol:: icmpv6 action:: accept } """ SHORT_TERM_NAME = """ term short-term-name { protocol:: tcp action:: accept } """ GOOD_LONG_TERM_NAME = """ term google-experiment-abbreviations { protocol:: tcp action:: accept } """ BAD_LONG_TERM_NAME = """ term this-term-name-is-really-far-too-long { protocol:: tcp action:: accept } """ # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class ACLMock(aclgenerator.ACLGenerator): _PLATFORM = 'mock' _TERM_MAX_LENGTH = 24 def _TranslatePolicy(self, pol, exp_info): pass class ACLGeneratorTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) # pylint: disable=line-too-long,g-inconsistent-quotes @parameterized.parameters(([ 'There is something very long about this comment that', 'will require it to be truncated in order for nftables', 'binary to be able to load the rulesets.'], '"There is something very long about this comment that will require it to be truncated in order for nftables binary to be able t"'), (['some comment description', 'second comment item \nNewline'], '"some comment description second comment item Newline"'), ('a string comment', '"a string comment"')) def testTruncateWords(self, input_data, expected_output): result = aclgenerator.TruncateWords( input_data, 126) self.assertEqual(result, expected_output) # pylint: disable=line-too-long def testEstablishedNostate(self): # When using "nostate" filter and a term with "option:: established" # have any protocol other than TCP and/or UDP should raise error. pol = policy.ParsePolicy(GOOD_HEADER_1 + STATEFUL_ONLY_TERM, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: self.assertRaises(aclgenerator.EstablishedError, acl.FixHighPorts, term, 'inet', False) def testSupportedAF(self): # Unsupported address families should raise an error. pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: self.assertRaises(aclgenerator.UnsupportedAFError, acl.FixHighPorts, term, 'unsupported', False) def testTermNameBelowLimit(self): # Term name that is below specified limit should come out unchanged, # regardless of abbreviation and truncation settings. pol = policy.ParsePolicy(GOOD_HEADER_1 + SHORT_TERM_NAME, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: result = acl.FixTermLength(term.name, True, True) self.assertEqual(term.name, result) result = acl.FixTermLength(term.name, True, False) self.assertEqual(term.name, result) result = acl.FixTermLength(term.name, False, True) self.assertEqual(term.name, result) result = acl.FixTermLength(term.name, False, False) self.assertEqual(term.name, result) result = acl.FixTermLength(term.name, False, False, 30) self.assertEqual(term.name, result) def testLongTermAbbreviation(self): # Term name that is above specified limit should come out abbreviated # when abbreviation is enabled. pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_LONG_TERM_NAME, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: result = acl.FixTermLength(term.name, True, False) self.assertIn('-abbreviations', result, 'Our strings disappeared during abbreviation.') # override the term max length and ensure there are no abbreviations. result = acl.FixTermLength(term.name, True, False, 4 * acl._TERM_MAX_LENGTH) self.assertNotIn( 'GOOG', result, 'Strings incorrect in abbreviation and length overriding.') def testTermNameTruncation(self): # Term name that is above specified limit should come out truncated # when truncation is enabled. pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_LONG_TERM_NAME, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: result = acl.FixTermLength(term.name, False, True) self.assertEqual('google-experiment-abbrev', result) result = acl.FixTermLength(term.name, True, False, 4 * acl._TERM_MAX_LENGTH) self.assertIn( 'google-experiment-abbreviations', result, 'Strings incorrectly disappeared during abbreviation ' 'and length overriding.') def testHexDigest(self): # Term name that is above specified limit should come out truncated # when truncation is enabled. pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_LONG_TERM_NAME, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: result = acl.HexDigest(term.name) self.assertEqual( '070582f8b50d3cb01aa432c26a55b5f378d281c98647f59dd7f3b0d8b1c9d0d5', result) result = acl.HexDigest(term.name, 32) self.assertEqual('070582f8b50d3cb01aa432c26a55b5f3', result) def testLongTermName(self): # Term name that is above specified limit and is impossible to abbreviate # should raise an exception. pol = policy.ParsePolicy(GOOD_HEADER_1 + BAD_LONG_TERM_NAME, self.naming) acl = ACLMock(pol, EXP_INFO) for _, terms in pol.filters: for term in terms: self.assertRaises(aclgenerator.TermNameTooLongError, acl.FixTermLength, term.name, True, False) def testProtocolNameToNumber(self): proto_map = {'icmp': 1, 'ipip': 4, 'tcp': 6, 'gre': 47, } proto_convert = ['gre', 'tcp'] protocol_list = ['icmp', 'gre', 'tcp', 'ipip'] expected_protocol_list = ['icmp', 47, 6, 'ipip'] retprotocol_list = aclgenerator.ProtocolNameToNumber(protocol_list, proto_convert, proto_map) self.assertListEqual(expected_protocol_list, retprotocol_list) def testAddRepositoryTags(self): # Format print the '$' into the RCS tags in order prevent the tags from # being interpolated here. # Include all tags. self.assertListEqual( ['%sId:%s' % ('$', '$'), '%sDate:%s' % ('$', '$'), '%sRevision:%s' % ('$', '$')], aclgenerator.AddRepositoryTags()) # Remove the revision tag. self.assertListEqual( ['%sId:%s' % ('$', '$'), '%sDate:%s' % ('$', '$')], aclgenerator.AddRepositoryTags(revision=False)) # Only include the Id: tag. self.assertListEqual( ['%sId:%s' % ('$', '$')], aclgenerator.AddRepositoryTags(date=False, revision=False)) # Wrap the Date: tag. self.assertListEqual( ['"%sDate:%s"' % ('$', '$')], aclgenerator.AddRepositoryTags(revision=False, rid=False, wrap=True)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/arista_test.py000066400000000000000000000212361437377527500177600ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for arista acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import arista from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test extended acl" target:: arista test-filter extended } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: arista test-filter } """ GOOD_HEADER_3 = """ header { comment:: "this is a test standard acl" target:: arista test-filter standard } """ GOOD_HEADER_IPV6 = """ header { comment:: "this is a test inet6 acl" target:: arista test-filter inet6 } """ GOOD_TERM = """ term good-term { protocol:: tcp option:: tcp-established action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: tcp option:: tcp-established policer:: batman action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { source-address:: SOME_HOST destination-port:: SSH protocol:: tcp action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { source-address:: SOME_HOST2 destination-port:: GOPENFLOW protocol:: tcp action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { comment:: "Accept SNMP from internal sources." address:: SOME_HOST action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { comment:: "Accept ESP from internal sources." address:: SOME_HOST protocol:: esp action:: accept } """ GOOD_TERM_6 = """ term good-term-6 { comment:: "Accept AH from internal sources." address:: SOME_HOST protocol:: ah action:: accept } """ GOOD_TERM_7 = """ term good-term-6 { comment:: "Accept AH from internal sources." address:: SOME_HOST protocol:: ah esp tcp action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'dscp_match', 'expiration', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'restrict_address_family', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'tcp-established', 'is-fragment', 'fragments'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class AristaTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testRemark(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_4, self.naming) acl = arista.Arista(pol, EXP_INFO) expected = 'remark this is a test standard acl' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = 'remark good-term-4' self.assertIn(expected, str(acl), str(acl)) expected = 'test-filter remark' self.assertNotIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testExtendedEosSyntax(self): # Extended access-lists should not use the "extended" argument to ip # access-list. acl = arista.Arista( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('ip access-list test-filter', str(acl)) def testESPIsAnInteger(self): acl = arista.Arista( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) self.assertIn('permit 50', str(acl)) def testAHIsAnInteger(self): acl = arista.Arista( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_6, self.naming), EXP_INFO) self.assertIn('permit 51', str(acl)) def testAHAndESPAreIntegers(self): acl = arista.Arista( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_7, self.naming), EXP_INFO) self.assertIn('permit 50', str(acl)) self.assertIn('permit 51', str(acl)) def testBuildTokens(self): pol1 = arista.Arista(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = arista.Arista(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testStandardTermHost(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/24')] self.naming.GetServiceByProto.return_value = ['22', '6537'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_2 + GOOD_TERM_3, self.naming) acl = arista.Arista(pol, EXP_INFO) expected = 'ip access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit tcp 10.1.1.0/24 any eq ssh' self.assertIn(expected, str(acl), str(acl)) expected = ' permit tcp 10.1.1.0/24 any eq 6537' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST'), mock.call('SOME_HOST2')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SSH', 'tcp'), mock.call('GOPENFLOW', 'tcp')]) def testStandardTermHostV6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:1::/64')] self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(GOOD_HEADER_IPV6 + GOOD_TERM_2, self.naming) acl = arista.Arista(pol, EXP_INFO) expected = 'ipv6 access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit tcp 2620:1::/64 any eq ssh' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_has_calls([mock.call('SSH', 'tcp')]) def testStandardTermV4(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/24')] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_4, self.naming) acl = arista.Arista(pol, EXP_INFO) expected = 'ip access-list standard test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit 10.1.1.0/24\n' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST')]) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/arista_tp_test.py000066400000000000000000001221511437377527500204610ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """unittest for arista traffic-policy rendering module.""" import datetime import re from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import arista_tp from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: arista_tp test-filter } """ # XXX check GOOD_HEADER_INET = """ header { comment:: "test inet acl" target:: arista_tp test-filter inet } """ GOOD_HEADER_INET6 = """ header { comment:: "this is a test acl" target:: arista_tp test-filter inet6 } """ GOOD_NOVERBOSE_MIXED_HEADER = """ header { target:: arista_tp test-filter mixed noverbose } """ GOOD_NOVERBOSE_V4_HEADER = """ header { target:: arista_tp test-filter inet noverbose } """ GOOD_NOVERBOSE_V6_HEADER = """ header { target:: arista_tp test-filter inet6 noverbose } """ BAD_HEADER = """ header { target:: arista_tp test-filter bridged } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ DUPLICATE_TERMS = """ term good-term-1 { protocol:: icmp action:: accept } term good-term-1 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_1_V6 = """ term good-term-1 { protocol:: icmpv6 action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_2 = """ term good-term-3 { protocol:: tcp destination-address:: SOME_HOST source-port:: HTTP option:: established tcp-established action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: icmp icmp-type:: echo-reply information-reply information-request icmp-type:: router-solicitation timestamp-request action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { protocol:: icmp protocol:: tcp action:: accept } """ PROTO_EXC_TCP = """ term good-term-7 { protocol-except:: tcp action:: accept } """ PROTO_EXC_LIST = """ term good-term-7 { protocol-except:: igmp egp rdp hopopt action:: accept } """ GOOD_TERM_8 = """ term good-term-8 { source-prefix:: foo_prefix_list destination-prefix:: bar_prefix_list baz_prefix_list action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { verbatim:: arista_tp "mary had a little lamb" verbatim:: iptables "mary had a second lamb" verbatim:: cisco "mary had a third lamb" } """ GOOD_TERM_OWNER = """ term owner-term { protocol:: tcp owner:: foo@google.com action:: accept } """ GOOD_TERM_18_SRC = """ term address-exclusions { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ GOOD_TERM_18_DST = """ term address-exclusions { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ # TODO(sulric): write a test for this term GOOD_TERM_19 = """ term minimize-prefix-list { source-address:: INCLUDES source-exclude:: EXCLUDES action:: accept } """ GOOD_TERM_V6_HOP_LIMIT = """ term good-term-v6-hl { hop-limit:: 25 action:: accept } """ GOOD_TERM_20_V6 = """ term good-term-20-v6 { protocol-except:: icmpv6 action:: accept } """ GOOD_TERM_21 = """ term good_term_21 { ttl:: 10 action:: accept } """ GOOD_TERM_28 = """ term good_term_28 { action:: accept } """ GOOD_TERM_35 = """ term good_term_35 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_COMMENT = """ term good-term-comment { protocol:: udp comment:: "This is a COMMENT" action:: accept } """ BAD_TERM_1 = """ term bad-term-1 { protocol:: tcp udp source-port:: DNS option:: tcp-established action:: accept } """ ESTABLISHED_TERM_1 = """ term established-term-1 { protocol:: tcp source-port:: DNS option:: established action:: accept } """ MISSING_MATCH = """ term missing-match { action:: accept } """ OPTION_TERM_1 = """ term option-term { protocol:: tcp option:: is-fragment action:: accept } """ BAD_ICMPTYPE_TERM_1 = """ term icmptype-mismatch { comment:: "error when icmpv6 paired with inet filter" protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ BAD_ICMPTYPE_TERM_2 = """ term icmptype-mismatch { comment:: "error when icmp paired with inet6 filter" protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } """ DEFAULT_TERM_1 = """ term default-term-1 { action:: deny } """ LONG_COMMENT_TERM_1 = """ term long-comment-term-1 { comment:: "this is very very very very very very very very very very very comment:: "very very very very very very very long." action:: deny } """ LONG_COMMENT_TERM_ANET = """ term long-comment-term-1 { comment:: "0 this is very very very very very very very very very very very" comment:: "1 very very very very very very very very very very very" comment:: "2 very very very very very very very very very very very" comment:: "3 very very very very very very very very very very very" comment:: "4 very very very very very very very long comment. " protocol:: icmpv6 action:: deny } """ HOPOPT_TERM = """ term good-term-1 { protocol:: hopopt action:: accept } """ FRAGOFFSET_TERM = """ term good-term-1 { fragment-offset:: 1-7 action:: accept } """ COUNTER_CLEANUP_TERM = """ term good-term-1 { protocol:: tcp counter:: test.cleanup.check action:: accept } """ # test the various mixed filter_type permutations MIXED_INET = """ term MIXED_INET { source-address:: GOOGLE_DNS destination-address:: INTERNAL protocol:: tcp udp action:: accept } """ INET_MIXED = """ term INET_MIXED { source-address:: INTERNAL destination-address:: GOOGLE_DNS protocol:: tcp udp action:: accept } """ MIXED_INET6 = """ term MIXED_INET6 { source-address:: GOOGLE_DNS destination-address:: SOME_HOST action:: accept } """ INET6_MIXED = """ term INET6_MIXED { source-address:: SOME_HOST destination-address:: GOOGLE_DNS action:: accept } """ MIXED_MIXED = """ term MIXED_MIXED { source-address:: GOOGLE_DNS destination-address:: GOOGLE_DNS action:: accept } """ MIXED_ANY = """ term MIXED_ANY { source-address:: GOOGLE_DNS action:: accept } """ ANY_MIXED = """ term ANY_MIXED { destination-address:: GOOGLE_DNS action:: accept } """ INET_INET = """ term INET_INET { source-address:: NTP_SERVERS destination-address:: INTERNAL action:: accept } """ INET6_INET6 = """ term INET6_INET6 { source-address:: SOME_HOST destination-address:: SOME_HOST action:: accept } """ INET_INET6 = """ term INET_INET6 { source-address:: INTERNAL destination-address:: SOME_HOST action:: accept } """ INET6_INET = """ term INET6_INET { source-address:: SOME_HOST destination-address:: INTERNAL action:: accept } """ SRC_FIELD_SET_INET = """ term FS_INET { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ SRC_FIELD_SET_INET6 = """ term FS_INET6 { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ SRC_FIELD_SET_MIXED = """ term FS_MIXED { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ DST_FIELD_SET_INET = """ term FS_INET { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ DST_FIELD_SET_INET6 = """ term FS_INET6 { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ DST_FIELD_SET_MIXED = """ term FS_MIXED { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ # this term should not have the logging element rendered LOGGING_ACCEPT = """ term logging-term-1 { protocol:: icmp action:: accept logging:: true } """ # this term _should_ have the logging element rendered LOGGING_DENY = """ term logging-term-1 { protocol:: icmp action:: deny logging:: true } """ SUPPORTED_TOKENS = frozenset([ "action", "comment", "counter", "destination_address", "destination_address_exclude", "destination_port", "destination_prefix", "dscp_set", "expiration", "fragment_offset", "hop_limit", "icmp_code", "icmp_type", "logging", "name", "option", "owner", "packet_length", "platform", "platform_exclude", "port", "protocol", "protocol_except", "source_address", "source_address_exclude", "source_port", "source_prefix", "stateless_reply", "translated", "ttl", "verbatim", ]) SUPPORTED_SUB_TOKENS = { "action": {"accept", "deny", "reject", "next", "reject-with-tcp-rst"}, "icmp_type": { "alternate-address", "certification-path-advertisement", "certification-path-solicitation", "conversion-error", "destination-unreachable", "echo-reply", "echo-request", "mobile-redirect", "home-agent-address-discovery-reply", "home-agent-address-discovery-request", "icmp-node-information-query", "icmp-node-information-response", "information-request", "inverse-neighbor-discovery-advertisement", "inverse-neighbor-discovery-solicitation", "mask-reply", "mask-request", "information-reply", "mobile-prefix-advertisement", "mobile-prefix-solicitation", "multicast-listener-done", "multicast-listener-query", "multicast-listener-report", "multicast-router-advertisement", "multicast-router-solicitation", "multicast-router-termination", "neighbor-advertisement", "neighbor-solicit", "packet-too-big", "parameter-problem", "redirect", "redirect-message", "router-advertisement", "router-renumbering", "router-solicit", "router-solicitation", "source-quench", "time-exceeded", "timestamp-reply", "timestamp-request", "unreachable", "version-2-multicast-listener-report", }, "option": { "established", "is-fragment", ".*", # not actually a lex token! "tcp-established", "tcp-initial", }, } # print an info message when a term is set to expire in that many weeks. # normally passed from command line. EXP_INFO = 2 class AristaTpTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testOptions(self): self.naming.GetNetAddr.return_value = [nacaddr.IP("10.0.0.0/8")] self.naming.GetServiceByProto.return_value = ["80"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) output = str(atp) self.assertIn("destination port 1024-65535", output, output) # verify that tcp-established; doesn't get duplicated if both # 'established' and 'tcp-established' options are included in term self.assertEqual(output.count("established"), 1) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("HTTP", "tcp") def testTermAndFilterName(self): self.naming.GetNetAddr.return_value = [nacaddr.IP("10.0.0.0/8")] self.naming.GetServiceByProto.return_value = ["25"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(atp) self.assertIn("match good-term-1", output, output) self.assertIn("traffic-policy test-filter", output, output) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testBadFilterType(self): self.naming.GetNetAddr.return_value = [nacaddr.IP("10.0.0.0/8")] self.naming.GetServiceByProto.return_value = ["25"] pol = policy.ParsePolicy(BAD_HEADER + GOOD_TERM_1, self.naming) self.assertRaises( aclgenerator.UnsupportedAFError, arista_tp.AristaTrafficPolicy, pol, EXP_INFO, ) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testDuplicateTermName(self): self.naming.GetNetAddr.return_value = [nacaddr.IP("10.0.0.0/8")] self.naming.GetServiceByProto.return_value = ["25"] pol = policy.ParsePolicy(GOOD_HEADER + DUPLICATE_TERMS, self.naming) self.assertRaises( aclgenerator.DuplicateTermError, arista_tp.AristaTrafficPolicy, pol, EXP_INFO, ) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testCounterCleanup(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + COUNTER_CLEANUP_TERM, self.naming), EXP_INFO) output = str(atp) self.assertIn("counter test-cleanup-check", output, output) self.assertIn("count test-cleanup-check", output, output) def testDefaultDeny(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1, self.naming), EXP_INFO) output = str(atp) self.assertIn("match ipv4-default-all ipv4", output, output) self.assertIn("match ipv6-default-all ipv6", output, output) def testIcmpType(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) output = str(atp) # verify proper translation from policy icmp-type text to # traffic-policy self.assertIn("icmp type ", output, output) self.assertIn("0,", output, output) self.assertIn("10,", output, output) self.assertIn("13,", output, output) self.assertIn("15,", output, output) self.assertIn("16", output, output) def testIcmpCode(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_35, self.naming), EXP_INFO) output = str(atp) self.assertIn("code 3,4", output, output) def testInet6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP("2001::/33")] self.naming.GetServiceByProto.return_value = ["25"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_1_V6, self.naming), EXP_INFO) output = str(atp) self.assertTrue("protocol icmpv6" in output and "protocol tcp" in output, output) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testHopLimit(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_V6_HOP_LIMIT, self.naming), EXP_INFO, ) output = str(atp) self.assertIn("ttl 25", output, output) def testProtocol(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) output = str(atp) self.assertIn("protocol icmp tcp", output, output) def testProtocolExceptTcp(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + PROTO_EXC_TCP, self.naming), EXP_INFO) output = str(atp) self.assertIn("protocol 1-5,7-255", output, output) self.assertIn("protocol 0-5,7-255", output, output) def testProtocolExceptList(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + PROTO_EXC_LIST, self.naming), EXP_INFO) output = str(atp) self.assertIn("protocol 1,3-7,9-26,28-255", output, output) def testPrefixList(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) spfx_re = re.compile(r"source prefix field-set\W+foo_prefix_list\W+") dpfx_re = re.compile( r"destination prefix field-set\W+bar_prefix_list\W+baz_prefix_list\W+") output = str(atp) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testVerbatimTerm(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) output = str(atp) self.assertIn("mary had a little lamb", output, output) # check if other platforms verbatim shows up in output self.assertNotIn("mary had a second lamb", output, output) self.assertNotIn("mary had a third lamb", output, output) def testTcpEstablished(self): self.naming.GetServiceByProto.return_value = ["53"] policy_text = GOOD_HEADER + ESTABLISHED_TERM_1 atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(atp) self.assertIn("established", output, output) self.naming.GetServiceByProto.assert_called_once_with("DNS", "tcp") def testNonTcpWithTcpEstablished(self): self.naming.GetServiceByProto.return_value = ["53"] policy_text = GOOD_HEADER + BAD_TERM_1 pol_obj = policy.ParsePolicy(policy_text, self.naming) atp = arista_tp.AristaTrafficPolicy(pol_obj, EXP_INFO) self.assertRaises(arista_tp.TcpEstablishedWithNonTcpError, str, atp) self.naming.GetServiceByProto.assert_has_calls( [mock.call("DNS", "tcp"), mock.call("DNS", "udp")]) def testNoVerboseMixed(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP("192.168." + str(octet) + ".64/27") addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ["25"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy( GOOD_NOVERBOSE_MIXED_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn("192.168.0.64/27", str(atp)) self.assertNotIn("COMMENT", str(atp)) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testNoVerboseV4(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP("192.168." + str(octet) + ".64/27") addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ["25"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy( GOOD_NOVERBOSE_V4_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn("192.168.0.64/27", str(atp)) self.assertNotIn("COMMENT", str(atp)) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testNoVerboseV6(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IPv6( "2001:db8:1010:" + str(octet) + "::64/64", strict=False) addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ["25"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy( GOOD_NOVERBOSE_V6_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn("2001:db8:1010:90::/61", str(atp)) self.assertNotIn("COMMENT", str(atp)) self.naming.GetNetAddr.assert_called_once_with("SOME_HOST") self.naming.GetServiceByProto.assert_called_once_with("SMTP", "tcp") def testTermTypeIndexKeys(self): # ensure an _INET entry for each _TERM_TYPE entry self.assertCountEqual( arista_tp.Term._TERM_TYPE.keys(), arista_tp.Term.AF_MAP.keys()) def testCommentReflow(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + LONG_COMMENT_TERM_ANET, self.naming), EXP_INFO) output = str(atp) self.assertIn("!! 0 this is v", output, output) self.assertIn("!! very", output, output) self.assertIn("!! 1 very very", output, output) self.assertIn("!! 2 very very", output, output) self.assertIn("!! 3 very very", output, output) self.assertIn("!! 4 very very", output, output) @mock.patch.object(arista_tp.logging, "warning") def testArbitraryOptions(self, mock_warn): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + OPTION_TERM_1, self.naming), EXP_INFO) output = str(atp) self.assertIn("fragment", output, output) # since this is a mixed term, check to make sure that the fragment term # isn't rendered for inet6 mock_warn.assert_any_call( "WARNING: term %s in mixed policy %s uses fragment " "the ipv6 version of the term will not be rendered.", "ipv6-option-term", "test-filter") @mock.patch.object(arista_tp.logging, "warning") def testLoggingOptionFail(self, mock_warn): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + LOGGING_ACCEPT, self.naming), EXP_INFO) output = str(atp) self.assertIn("match logging-term-1", output) self.assertNotIn(" log\n", output) # check for bare 'log' word mock_warn.assert_any_call( "WARNING: term %s uses logging option but is not a deny " "action. logging will not be added.", "logging-term-1", ) def testLoggingOption(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + LOGGING_DENY, self.naming), EXP_INFO) output = str(atp) self.assertIn(" log\n", output) @mock.patch.object(arista_tp.logging, "debug") def testIcmpv6InetMismatch(self, mock_debug): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + BAD_ICMPTYPE_TERM_1, self.naming), EXP_INFO) str(atp) mock_debug.assert_called_once_with( "Term icmptype-mismatch will not be rendered, " "as it has icmpv6 match specified but " "the ACL is of inet address family.") @mock.patch.object(arista_tp.logging, "debug") def testIcmpInet6Mismatch(self, mock_debug): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER_INET6 + BAD_ICMPTYPE_TERM_2, self.naming), EXP_INFO, ) str(atp) mock_debug.assert_called_once_with( "Term icmptype-mismatch will not be rendered, " "as it has icmp match specified but " "the ACL is of inet6 address family.") # icmptype-mismatch test for mixed filter type @mock.patch.object(arista_tp.logging, "debug") def testIcmpMismatchMixedInet(self, mock_debug): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + BAD_ICMPTYPE_TERM_1, self.naming), EXP_INFO, ) str(atp) mock_debug.assert_called_once_with( "Term icmptype-mismatch will not be rendered, " "as it has icmpv6 match specified but " "the ACL is of inet address family.") @mock.patch.object(arista_tp.logging, "debug") def testIcmpMismatchMixedInet6(self, mock_debug): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + BAD_ICMPTYPE_TERM_2, self.naming), EXP_INFO, ) str(atp) mock_debug.assert_called_once_with( "Term ipv6-icmptype-mismatch will not be rendered, " "as it has icmp match specified but " "the ACL is of inet6 address family.") @mock.patch.object(arista_tp.logging, "warning") def testExpiredTerm(self, mock_warn): _ = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_any_call( "WARNING: term %s in policy %s is expired and will " "not be rendered.", "is_expired", "test-filter", ) mock_warn.assert_any_call( "WARNING: term %s in policy %s is expired and will " "not be rendered.", "ipv6-is_expired", "test-filter", ) @mock.patch.object(arista_tp.logging, "info") def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = arista_tp.AristaTrafficPolicy( policy.ParsePolicy( GOOD_HEADER + EXPIRING_TERM % exp_date.strftime("%Y-%m-%d"), self.naming), EXP_INFO, ) mock_info.assert_any_call( "INFO: term %s in policy %s expires in " "less than two weeks.", "is_expiring", "test-filter") def testOwnerTerm(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_OWNER, self.naming), EXP_INFO) output = str(atp) self.assertIn("!! owner: foo@google.com", output, output) # confirm that we don't generate a term for non-default @mock.patch.object(arista_tp.logging, "warning") def testMissingMatchCriteria(self, mock_warn): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + MISSING_MATCH, self.naming), EXP_INFO) output = str(atp) self.assertNotIn("match", output, output) mock_warn.has_calls( "WARNING: term %s has no valid match criteria and " "will not be rendered.", "missing-match", ) def testAddressExclude(self): big = nacaddr.IPv4("0.0.0.0/1") ip1 = nacaddr.IPv4("10.0.0.0/8") ip2 = nacaddr.IPv4("172.16.0.0/12") terms = (GOOD_TERM_18_SRC, GOOD_TERM_18_DST) self.naming.GetNetAddr.side_effect = [[big, ip1, ip2], [ip1]] * len(terms) mock_calls = [] for term in terms: atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + term, self.naming), EXP_INFO) output = str(atp) self.assertIn("except 10.0.0.0/8", output, output) # note that the additional spaces are in the following assert to insure # that it's not being rendered w/o the "except" self.assertNotIn(" 10.0.0.0/8", output, output) self.assertIn("172.16.0.0/12", output, output) self.assertNotIn("except 172.16.0.0/12", output, output) mock_calls.append(mock.call("INTERNAL")) mock_calls.append(mock.call("SOME_HOST")) self.naming.GetNetAddr.assert_has_calls(mock_calls) def testMixedInet(self): self.naming.GetNetAddr.side_effect = [[ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], [ nacaddr.IP("10.0.0.0/8"), nacaddr.IP("172.16.0.0/12"), nacaddr.IP("192.168.0.0/16") ]] pol = policy.ParsePolicy(GOOD_HEADER + MIXED_INET, self.naming) atp = arista_tp.AristaTrafficPolicy(pol, EXP_INFO) output = str(atp) self.assertIn("match MIXED_INET ipv4", output, output) self.assertIn("source prefix 8.8.4.4/32", output, output) self.assertIn("destination prefix 10.0.0.0/8", output, output) self.assertNotIn("match ipv6-MIXED_INET ipv6", output, output) self.assertNotIn("source prefix 2001:4860:4860::8844/128", output, output) def testInetMixed(self): self.naming.GetNetAddr.side_effect = [ [ nacaddr.IP("10.0.0.0/8"), nacaddr.IP("172.16.0.0/12"), nacaddr.IP("192.168.0.0/16") ], [ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("match INET_MIXED ipv4", output, output) self.assertIn("source prefix 10.0.0.0/8", output, output) self.assertIn("destination prefix 8.8.4.4/32", output, output) self.assertNotIn("match ipv6-INET_MIXED ipv6", output, output) self.assertNotIn("destination prefix 2001:4860:4860::8844/128", output, output) def testMixedInet6(self): self.naming.GetNetAddr.side_effect = [[ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], [nacaddr.IP("2001:4860:4860::8844")]] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + MIXED_INET6, self.naming), EXP_INFO) output = str(atp) # note that the term name will contain the 'ipv6-' prefix self.assertIn("match ipv6-MIXED_INET6 ipv6", output, output) self.assertIn("source prefix 2001:4860:4860::8844/128", output, output) self.assertIn("destination prefix 2001:4860:4860::8844/128", output, output) # check to make sure that the IPv4 elements are not rendered self.assertNotIn("match MIXED_INET6 ipv4", output, output) self.assertNotIn("source prefix 8.8.8.8", output, output) def testInet6Mixed(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("2001:4860:4860::8844")], [ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ] ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET6_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("match ipv6-INET6_MIXED ipv6", output, output) self.assertIn("source prefix 2001:4860:4860::8844/128", output, output) self.assertIn("destination prefix 2001:4860:4860::8844/128", output, output) self.assertNotIn("match INET6_MIXED ipv4", output, output) self.assertNotIn("destination prefix 8.8.8.8", output, output) def testMixedMixed(self): self.naming.GetNetAddr.side_effect = [ [ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], [ nacaddr.IP("4.4.2.2"), nacaddr.IP("4.4.4.4"), nacaddr.IP("2001:4860:1337::8844"), nacaddr.IP("2001:4860:1337::8888") ] ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + MIXED_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("match MIXED_MIXED ipv4", output, output) self.assertIn("source prefix 8.8.4.4/32", output, output) self.assertIn("destination prefix 4.4.2.2", output, output) self.assertIn("match ipv6-MIXED_MIXED ipv6", output, output) self.assertIn("source prefix 2001:4860:4860::8844/128", output, output) self.assertIn("destination prefix 2001:4860:1337::8844/128", output, output) def testMixedAny(self): self.naming.GetNetAddr.side_effect = [[ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ]] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + MIXED_ANY, self.naming), EXP_INFO) output = str(atp) self.assertIn("match MIXED_ANY ipv4", output, output) self.assertIn("source prefix 8.8.4.4/32", output, output) self.assertIn("match ipv6-MIXED_ANY ipv6", output, output) self.assertIn("source prefix 2001:4860:4860::8844/128", output, output) def testAnyMixed(self): self.naming.GetNetAddr.side_effect = [[ nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ]] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + ANY_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("match ANY_MIXED ipv4", output, output) self.assertIn("destination prefix 8.8.4.4/32", output, output) self.assertIn("match ipv6-ANY_MIXED ipv6", output, output) self.assertIn("destination prefix 2001:4860:4860::8844/128", output, output) def testInetInet(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8")], [nacaddr.IP("4.4.2.2"), nacaddr.IP("4.4.4.4")], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET_INET, self.naming), EXP_INFO) output = str(atp) self.assertIn("match INET_INET ipv4", output, output) self.assertIn("source prefix 8.8.4.4/32", output, output) self.assertIn("destination prefix 4.4.2.2/32", output, output) def testInet6Inet6(self): self.naming.GetNetAddr.side_effect = [[ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], [nacaddr.IP("2001:4860:1337::8844"), nacaddr.IP("2001:4860:1337::8888")]] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET6_INET6, self.naming), EXP_INFO) output = str(atp) self.assertIn("match ipv6-INET6_INET6 ipv6", output, output) self.assertIn("source prefix 2001:4860:4860::8844/128", output, output) self.assertIn("destination prefix 2001:4860:1337::8844/128", output, output) def testInetInet6(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8")], [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET_INET6, self.naming), EXP_INFO) output = str(atp) # we should not generate this term # TODO(sulrich): we should, however, throw a warning self.assertNotIn("match INET_INET6 ipv4", output, output) self.assertNotIn("match ipv6-INET_INET6 ipv6", output, output) def testInet6Inet(self): self.naming.GetNetAddr.side_effect = [ [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4860::8888") ], [nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8")], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + INET6_INET, self.naming), EXP_INFO) output = str(atp) self.assertNotIn("match INET6_INET ipv4", output, output) self.assertNotIn("match ipv6-INET6_INET ipv6", output, output) def testSrcFsInet(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("8.8.4.0/24"), nacaddr.IP("8.8.8.0/24")], [nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8")], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + SRC_FIELD_SET_INET, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv4 prefix src-FS_INET", output, output) self.assertIn("source prefix field-set src-FS_INET", output, output) def testSrcFsInet6(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4861::/64")], [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4861::8888") ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + SRC_FIELD_SET_INET6, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv6 prefix src-ipv6-FS_INET6", output, output) self.assertIn("source prefix field-set src-ipv6-FS_INET6", output, output) def testSrcFsMixed(self): self.naming.GetNetAddr.side_effect = [ [ nacaddr.IP("8.8.4.0/24"), nacaddr.IP("8.8.8.0/24"), nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4861::/64") ], [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4861::8888"), nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + SRC_FIELD_SET_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv4 prefix src-FS_MIXED", output, output) self.assertIn("field-set ipv6 prefix src-ipv6-FS_MIXED", output, output) self.assertIn("source prefix field-set src-FS_MIXED", output, output) self.assertIn("source prefix field-set src-ipv6-FS_MIXED", output, output) def testDstFsInet(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("8.8.4.0/24"), nacaddr.IP("8.8.8.0/24")], [nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8")], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + DST_FIELD_SET_INET, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv4 prefix dst-FS_INET", output, output) self.assertIn("destination prefix field-set dst-FS_INET", output, output) def testDstFsInet6(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4861::/64")], [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4861::8888") ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + DST_FIELD_SET_INET6, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv6 prefix dst-ipv6-FS_INET6", output, output) self.assertIn("destination prefix field-set dst-ipv6-FS_INET6", output, output) def testDstFsMixed(self): self.naming.GetNetAddr.side_effect = [ [ nacaddr.IP("8.8.4.0/24"), nacaddr.IP("8.8.8.0/24"), nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4860::/64"), nacaddr.IP("2001:4860:4861::/64") ], [ nacaddr.IP("2001:4860:4860::8844"), nacaddr.IP("2001:4860:4861::8888"), nacaddr.IP("8.8.4.4"), nacaddr.IP("8.8.8.8"), ], ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + DST_FIELD_SET_MIXED, self.naming), EXP_INFO) output = str(atp) self.assertIn("field-set ipv4 prefix dst-FS_MIXED", output, output) self.assertIn("field-set ipv6 prefix dst-ipv6-FS_MIXED", output, output) self.assertIn("destination prefix field-set dst-FS_MIXED", output, output) self.assertIn("destination prefix field-set dst-ipv6-FS_MIXED", output, output) def testConfigHelper(self): match_indent = " " * 6 config = arista_tp.Config() config.Append(match_indent, "test") config.Append(match_indent, "blah") config.Append(match_indent, "foo") config.Append(match_indent, "bar") config.Append(match_indent, "Mr. T Pities the fool!", verbatim=True) self.assertMultiLineEqual( str(config), " test\n" " blah\n" " foo\n" " bar\n" "Mr. T Pities the fool!") def testFragmentOffset(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + FRAGOFFSET_TERM, self.naming), EXP_INFO) output = str(atp) self.assertIn("fragment offset 1-7", output, output) def testTTL(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming), EXP_INFO) output = str(atp) self.assertIn("ttl 10", output) def testBuildTokens(self): self.naming.GetNetAddr.return_value = [ nacaddr.IP("10.1.1.1/26", strict=False) ] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) st, sst = atp._BuildTokens() # print(ppr.pprint(st)) # print(ppr.pprint(SUPPORTED_TOKENS)) self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) st, sst = atp._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testHopOptProtocol(self): atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + HOPOPT_TERM, self.naming), EXP_INFO) output = str(atp) self.assertIn("protocol 0", output, output) def testFailIsFragmentInV6(self): self.naming.GetServiceByProto.return_value = ["22"] pol = policy.ParsePolicy(GOOD_HEADER_INET6 + OPTION_TERM_1, self.naming) self.assertRaises( arista_tp.AristaTpFragmentInV6Error, arista_tp.AristaTrafficPolicy, pol, EXP_INFO, ) @mock.patch.object(arista_tp.logging, "warning") def testFailIsFragmentInMixed(self, mock_warn): self.naming.GetServiceByProto.return_value = ["22"] atp = arista_tp.AristaTrafficPolicy( policy.ParsePolicy(GOOD_HEADER + OPTION_TERM_1, self.naming), EXP_INFO) output = str(atp) self.assertNotIn("match ipv6-option-term ipv6", output, output) mock_warn.assert_any_call( "WARNING: term %s in mixed policy %s uses fragment " "the ipv6 version of the term will not be rendered.", "ipv6-option-term", "test-filter") if __name__ == "__main__": absltest.main() capirca-2.0.9/tests/lib/aruba_test.py000066400000000000000000000561641437377527500175770ustar00rootroot00000000000000# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Aruba acl rendering module.""" import datetime import logging import textwrap from absl.testing import absltest from unittest import mock from capirca.lib import aruba from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_V4 = """ header { target:: aruba test-filter } """ GOOD_HEADER_V6 = """ header { target:: aruba test-filter inet6 } """ GOOD_HEADER_NOVERBOSE = """ header { target:: aruba test-filter noverbose } """ EXPIRED_TERM = """ term is-expired { expiration:: 2010-01-01 action:: accept } """ EXPIRING_TERM = """ term is-expiring { expiration:: %s action:: accept } """ GOOD_TERM_SIMPLE = """ term good-term-simple { action:: accept } """ GOOD_TERM_SHORT_COMMENT = """ term good-term-short-comment { comment:: "Some short comment." owner:: someowner action:: deny } """ GOOD_TERM_LONG_COMMENT = """ term good-term-long-comment { comment:: "Two households, both alike in dignity," comment:: "In fair Verona, where we lay our scene," comment:: "From ancient grudge break to new mutiny, Where civil blood makes civil hands unclean." owner:: wshakespeare action:: accept } """ GOOD_TERM_VERBATIM = """ term much-verbatim { verbatim:: aruba "aruba uses some odd ACL format" verbatim:: aruba "which is kinda like, weird" verbatim:: aruba "" verbatim:: cisco "But Cisco's format is Ok, tho." verbatim:: juniper "And Juniper's is the best!" } """ GOOD_TERM_ALLOW_ANY_ANY = """ term good-term-allow-any-any { action:: accept } """ GOOD_TERM_DENY_ANY_ANY = """ term good-term-deny-any-any { action:: deny } """ GOOD_TERM_SINGLE_NETDESTINATION = """ term gt-one-netd { source-address:: SINGLE_HOST protocol:: icmp action:: accept } """ GOOD_TERM_TWO_NETDESTINATIONS = """ term gt-two-netd { source-address:: SINGLE_HOST destination-address:: SINGLE_HOST protocol:: icmp action:: accept } """ GOOD_TERM_TWO_NETWORK_NETDESTINATIONS = """ term gt-mix-netd { source-address:: SOME_NETWORK destination-address:: SOME_NETWORK protocol:: icmp action:: accept } """ GOOD_TERM_COMBINED_NETDESTINATIONS = """ term good-term-combined-netdestinations { source-address:: MIXED_HOSTS protocol:: tcp destination-port:: HTTP action:: deny } """ GOOD_TERMS_COMBINED_SINGLE_CASE = """ term good-terms-combined-1 { source-address:: SOME_HOST destination-address:: SOME_HOST protocol:: udp destination-port:: TFTP action:: accept } term good-terms-combined-2 { action:: deny } """ GOOD_TERM_SOURCE_IS_USER = """ term good-term-source-is-user { destination-address:: SOME_NETWORK protocol:: tcp destination-port:: DNS action:: accept option:: source-is-user } """ GOOD_TERM_DESTINATION_IS_USER = """ term good-term-destination-is-user { source-address:: SOME_NETWORK protocol:: tcp destination-port:: DNS action:: accept option:: destination-is-user } """ GOOD_TERM_NEGATE_1 = """ term good-term-negate { source-address:: SOME_NETWORK action:: deny option:: negate } """ GOOD_TERM_NEGATE_2 = """ term good-term-negate { action:: accept option:: negate } """ GOOD_TERM_PROTOCOL_MAP = """ term allow-icmp { protocol:: icmp action:: accept } term allow-gre { protocol:: gre action:: accept } term allow-esp { protocol:: esp action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_port', 'expiration', 'stateless_reply', 'name', 'option', 'protocol', 'source_address', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': { 'accept', 'deny', }, 'option': { 'source-is-user', 'destination-is-user', 'negate', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class ArubaTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testBuildTokens(self): aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SIMPLE, self.naming), EXP_INFO) st, sst = aru._BuildTokens() self.assertEqual(SUPPORTED_TOKENS, st) self.assertEqual(SUPPORTED_SUB_TOKENS, sst) @mock.patch.object(logging, 'warning') def testExpiredTerm(self, mock_warn): aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and will not ' 'be rendered.', 'is-expired', 'test-filter') @mock.patch.object(logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is-expiring', 'test-filter') def testSimpleTerm(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SIMPLE, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testShortComment(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter # Some short comment. # Owner: someowner any any any deny ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SHORT_COMMENT, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testNoVerbose(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter any any any deny ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_NOVERBOSE + GOOD_TERM_SHORT_COMMENT, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testLongWrappedComment(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter # Two households, both alike in dignity, # In fair Verona, where we lay our scene, # From ancient grudge break to new mutiny, Where civil blood makes civil # hands unclean. # Owner: wshakespeare any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_LONG_COMMENT, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testVerbatim(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter aruba uses some odd ACL format which is kinda like, weird any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_VERBATIM + GOOD_TERM_ALLOW_ANY_ANY, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testMultipleCallsSingleOwnerLine(self): expected_result = textwrap.dedent("""\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter # Two households, both alike in dignity, # In fair Verona, where we lay our scene, # From ancient grudge break to new mutiny, Where civil blood makes civil # hands unclean. # Owner: wshakespeare any any any permit ! """) aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_LONG_COMMENT, self.naming), EXP_INFO) self.assertEqual(expected_result, str(aru)) self.assertEqual(expected_result, str(aru)) def testTermAllowAnyAnyIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_ALLOW_ANY_ANY, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTermAllowAnyAnyIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter ipv6 any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_ALLOW_ANY_ANY, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTermDenyAnyAnyIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter any any any deny ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_DENY_ANY_ANY, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTermDenyAnyAnyIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter ipv6 any any any deny ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_DENY_ANY_ANY, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testMultipleCallsSingleNetdestinationsBlock(self): expected_result = textwrap.dedent("""\ # $Id:$ # $Date:$ # $Revision:$ netdestination gt-one-netd_src host 10.1.1.1 ! ip access-list session test-filter alias gt-one-netd_src any 1 permit ! """) self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SINGLE_NETDESTINATION, self.naming), EXP_INFO) self.assertEqual(expected_result, str(aru)) self.assertEqual(expected_result, str(aru)) def testSingleNetdestinationIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination gt-one-netd_src host 10.1.1.1 ! ip access-list session test-filter alias gt-one-netd_src any 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SINGLE_NETDESTINATION, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testSingleNetdestinationIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination6 gt-one-netd_src host 2001:: ! ip access-list session test-filter ipv6 alias gt-one-netd_src any 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/128')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_SINGLE_NETDESTINATION, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTwoNetdestinationsIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination gt-two-netd_src host 10.1.1.1 ! netdestination gt-two-netd_dst host 10.1.1.1 ! ip access-list session test-filter alias gt-two-netd_src alias gt-two-netd_dst 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_TWO_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTwoNetdestinationsIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination6 gt-two-netd_src host 2001:: ! netdestination6 gt-two-netd_dst host 2001:: ! ip access-list session test-filter ipv6 alias gt-two-netd_src alias gt-two-netd_dst 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/128')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_TWO_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTwoNetworkNetdestinationsIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination gt-mix-netd_src network 10.0.0.0 255.0.0.0 ! netdestination gt-mix-netd_dst network 10.0.0.0 255.0.0.0 ! ip access-list session test-filter alias gt-mix-netd_src alias gt-mix-netd_dst 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_TWO_NETWORK_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testTwoNetworkNetdestinationsIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination6 gt-mix-netd_src network 2001::/64 ! netdestination6 gt-mix-netd_dst network 2001::/64 ! ip access-list session test-filter ipv6 alias gt-mix-netd_src alias gt-mix-netd_dst 1 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/64')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_TWO_NETWORK_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testCombinedNetdestinationsIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-combined-netdestinations_src host 10.0.0.1 network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter alias good-term-combined-netdestinations_src any tcp 80 deny ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8'), nacaddr.IP('10.0.0.1/32')] self.naming.GetServiceByProto.return_value = ['80'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_COMBINED_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testCombinedNetdestinationsIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination6 good-term-combined-netdestinations_src host 2001:: network 2002::/64 ! ip access-list session test-filter ipv6 alias good-term-combined-netdestinations_src any tcp 80 deny ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('2002::/64'), nacaddr.IP('2001::/128')] self.naming.GetServiceByProto.return_value = ['80'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_COMBINED_NETDESTINATIONS, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testMultipleTermsIPv4(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-terms-combined-1_src host 10.0.0.1 network 100.0.0.0 255.0.0.0 ! netdestination good-terms-combined-1_dst host 10.0.0.1 network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter alias good-terms-combined-1_src alias good-terms-combined-1_dst udp 69 permit any any any deny ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8'), nacaddr.IP('10.0.0.1/32')] self.naming.GetServiceByProto.return_value = ['69'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERMS_COMBINED_SINGLE_CASE, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testMultipleTermsIPv6(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination6 good-terms-combined-1_src host 2001:: network 2002::/64 ! netdestination6 good-terms-combined-1_dst host 2001:: network 2002::/64 ! ip access-list session test-filter ipv6 alias good-terms-combined-1_src alias good-terms-combined-1_dst udp 69 permit ipv6 any any any deny ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('2002::/64'), nacaddr.IP('2001::/128')] self.naming.GetServiceByProto.return_value = ['69'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERMS_COMBINED_SINGLE_CASE, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testSourceIsUser(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-source-is-user_dst network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter user alias good-term-source-is-user_dst tcp 53 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['53'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_SOURCE_IS_USER, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testDestinationIsUser(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-destination-is-user_src network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter alias good-term-destination-is-user_src user tcp 53 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['53'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_DESTINATION_IS_USER, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testProtocolIsContiguousRange(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-destination-is-user_src network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter alias good-term-destination-is-user_src user tcp 53 55 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['53-55', '54'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_DESTINATION_IS_USER, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testProtocolIsDiscontiguousRange(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-destination-is-user_src network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter alias good-term-destination-is-user_src user tcp 1 permit alias good-term-destination-is-user_src user tcp 10 20 permit alias good-term-destination-is-user_src user tcp 53 55 permit ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['53-55', '54', '10-20', '1'] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_DESTINATION_IS_USER, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testNegateWithNetwork(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ netdestination good-term-negate_src network 100.0.0.0 255.0.0.0 ! ip access-list session test-filter no alias good-term-negate_src any any deny ! """ self.naming.GetNetAddr.return_value = [nacaddr.IP('100.0.0.0/8')] aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_NEGATE_1, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testNegateAny(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter no any any any permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_NEGATE_2, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) def testProtocolMap(self): expected_result = """\ # $Id:$ # $Date:$ # $Revision:$ ip access-list session test-filter any any 1 permit any any 47 permit any any 50 permit ! """ aru = aruba.Aruba(policy.ParsePolicy(GOOD_HEADER_V4 + GOOD_TERM_PROTOCOL_MAP, self.naming), EXP_INFO) self.assertEqual(textwrap.dedent(expected_result), str(aru)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/brocade_test.py000066400000000000000000000110471437377527500200730ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for brocade acl rendering module.""" import re from absl.testing import absltest from unittest import mock from capirca.lib import brocade from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: brocade test-filter } """ GOOD_TERM = """ term good-term { protocol:: tcp option:: tcp-established action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: tcp policer:: batman option:: tcp-established action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'dscp_match', 'expiration', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'restrict_address_family', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'tcp-established', 'is-fragment', 'fragments'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class BrocadeTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testTcpEstablished(self): acl = brocade.Brocade( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertTrue(re.search('permit tcp any any established\n', str(acl)), str(acl)) def testNoTermRemark(self): acl = brocade.Brocade( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertNotIn('remark good-term-3', str(acl)) def testBuildTokens(self): pol1 = brocade.Brocade(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = brocade.Brocade(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/cgrep_test.py000066400000000000000000000374001437377527500175750ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for cgrep. Order doesn't matter for the purposes of these tests, so many actual and expected results are sorted/sets to prevent issues relating to the order in which items are returned. """ import argparse from absl.testing import absltest from capirca.lib import nacaddr from capirca.lib import naming from tools import cgrep _NETWORK = """ # # Sample naming defintions for network objects # RFC1918 = 10.0.0.0/8 # non-public 172.16.0.0/12 # non-public 192.168.0.0/16 # non-public INTERNAL = RFC1918 LOOPBACK = 127.0.0.0/8 # loopback ::1/128 # ipv6 loopback RFC_3330 = 169.254.0.0/16 # special use IPv4 addresses - netdeploy RFC_6598 = 100.64.0.0/10 # Shared Address Space LINKLOCAL = FE80::/10 # IPv6 link-local SITELOCAL = FEC0::/10 # Ipv6 Site-local MULTICAST = 224.0.0.0/4 # IP multicast FF00::/8 # IPv6 multicast CLASS-E = 240.0.0.0/4 RESERVED = 0.0.0.0/8 # reserved RFC1918 LOOPBACK RFC_3330 RFC_6598 MULTICAST CLASS-E 0000::/8 # reserved by IETF 0100::/8 # reserved by IETF 0200::/7 # reserved by IETF 0400::/6 # reserved by IETF 0800::/5 # reserved by IETF 1000::/4 # reserved by IETF 4000::/3 # reserved by IETF 6000::/3 # reserved by IETF 8000::/3 # reserved by IETF A000::/3 # reserved by IETF C000::/3 # reserved by IETF E000::/4 # reserved by IETF F000::/5 # reserved by IETF F800::/6 # reserved by IETF FC00::/7 # unique local unicast FE00::/9 # reserved by IETF LINKLOCAL # link local unicast SITELOCAL # IPv6 site-local ANY = 0.0.0.0/0 # http://www.team-cymru.org/Services/Bogons/bogon-bn-agg.txt # 22-Apr-2011 BOGON = 0.0.0.0/8 192.0.0.0/24 192.0.2.0/24 198.18.0.0/15 198.51.100.0/24 203.0.113.0/24 MULTICAST CLASS-E 3FFE::/16 # 6bone 5F00::/8 # 6bone 2001:DB8::/32 # IPv6 documentation prefix GOOGLE_PUBLIC_DNS_ANYCAST = 8.8.4.4/32 # IPv4 Anycast 8.8.8.8/32 # IPv4 Anycast 2001:4860:4860::8844/128 # IPv6 Anycast 2001:4860:4860::8888/128 # IPv6 Anycast GOOGLE_DNS = GOOGLE_PUBLIC_DNS_ANYCAST # The following are sample entires intended for us in the included # sample policy file. These should be removed. WEB_SERVERS = 200.1.1.1/32 # Example web server 1 200.1.1.2/32 # Example web server 2 MAIL_SERVERS = 200.1.1.4/32 # Example mail server 1 200.1.1.5/32 # Example mail server 2 PUBLIC_NAT = 200.1.1.3/32 # Example company NAT address NTP_SERVERS = 10.0.0.1/32 # Example NTP server 10.0.0.2/32 # Example NTP server TACACS_SERVERS = 10.1.0.1/32 # Example tacacs server 10.1.0.2/32 # Example tacacs server INTERNAL_SERVER = 1.0.0.1/32 PUBLIC_SERVER = 100.0.0.1/32 INTERNAL_SERVERS = INTERNAL_SERVER PUBLIC_SERVERS = PUBLIC_SERVER SERVERS = INTERNAL_SERVERS PUBLIC_SERVERS """ _SERVICE = """ # # Sample naming service definitions # WHOIS = 43/udp SSH = 22/tcp TELNET = 23/tcp SMTP = 25/tcp MAIL_SERVICES = SMTP ESMTP SMTP_SSL POP_SSL TIME = 37/tcp 37/udp TACACS = 49/tcp DNS = 53/tcp 53/udp BOOTPS = 67/udp # BOOTP server BOOTPC = 68/udp # BOOTP client DHCP = BOOTPS BOOTPC TFTP = 69/tcp 69/udp HTTP = 80/tcp WEB_SERVICES = HTTP HTTPS POP3 = 110/tcp RPC = 111/udp IDENT = 113/tcp 113/udp NNTP = 119/tcp NTP = 123/tcp 123/udp MS_RPC_EPMAP = 135/udp 135/tcp MS_137 = 137/udp MS_138 = 138/udp MS_139 = 139/tcp IMAP = 143/tcp SNMP = 161/udp SNMP_TRAP = 162/udp BGP = 179/tcp IMAP3 = 220/tcp LDAP = 389/tcp LDAP_SERVICE = LDAP LDAPS HTTPS = 443/tcp MS_445 = 445/tcp SMTP_SSL = 465/tcp IKE = 500/udp SYSLOG = 514/udp RTSP = 554/tcp ESMTP = 587/tcp LDAPS = 636/tcp IMAPS = 993/tcp POP_SSL = 995/tcp HIGH_PORTS = 1024-65535/tcp 1024-65535/udp MSSQL = 1433/tcp MSSQL_MONITOR = 1434/tcp RADIUS = 1812/tcp 1812/udp HSRP = 1985/udp NFSD = 2049/tcp 2049/udp NETFLOW = 2056/udp SQUID_PROXY = 3128/tcp MYSQL = 3306/tcp RDP = 3389/tcp IPSEC = 4500/udp POSTGRESQL = 5432/tcp TRACEROUTE = 33434-33534/udp """ class Namespace: def __init__(self, **kwargs): for arg in kwargs: setattr(self, arg, kwargs[arg]) class CgrepTest(absltest.TestCase): def setUp(self): super().setUp() self.db = naming.Naming(None) self.db.ParseServiceList(_SERVICE.split('\n')) self.db.ParseNetworkList(_NETWORK.split('\n')) # # test ip->token resolution (-i) # # 1.1.1.1 should only be in 'ANY' def test_one_ip(self): expected_results = [('ANY', ['0.0.0.0/0'])] ip = '1.1.1.1' results = cgrep.get_ip_parents(ip, self.db) self.assertEqual(results, expected_results) # 2001:db8::1 should only be in 'BOGON' def test_one_ipv6(self): expected_results = [('BOGON', ['2001:db8::/32'])] ip = '2001:db8::1' results = cgrep.get_ip_parents(ip, self.db) self.assertEqual(results, expected_results) # 1.1.1.1 should not be in CLASS-E def test_one_ip_fail(self): expected_results = [('CLASS-E', ['240.0.0.0/4'])] ip = nacaddr.IP('1.1.1.1/32') results = cgrep.get_ip_parents(ip, self.db) self.assertNotEqual(results, expected_results) # 2001:db8::1 should not be in LINKLOCAL def test_one_ipv6_fail(self): expected_results = [('LINKLOCAL', ['FE80::/10'])] ip = '2001:db8::1' results = cgrep.get_ip_parents(ip, self.db) self.assertNotEqual(results, expected_results) # 8.8.8.8 is in GOOGLE_PUBLIC_DNS_ANYCAST which is inside GOOGLE_DNS def test_one_ip_nested(self): expected_results = sorted((('GOOGLE_DNS', ['8.8.8.8/32']), ('GOOGLE_DNS -> GOOGLE_PUBLIC_DNS_ANYCAST', ['8.8.8.8/32']), ('ANY', ['0.0.0.0/0']))) ip = '8.8.8.8' results = sorted(cgrep.get_ip_parents(ip, self.db)) self.assertEqual(results, expected_results) # 2001:4860:4860::8844/128 is in GOOGLE_PUBLIC_DNS_ANYCAST which is # inside GOOGLE_DNS def test_one_ipv6_nested(self): expected_results = sorted((('GOOGLE_DNS', ['2001:4860:4860::8844/128']), ('GOOGLE_DNS -> GOOGLE_PUBLIC_DNS_ANYCAST', ['2001:4860:4860::8844/128']))) ip = '2001:4860:4860::8844/128' results = sorted(cgrep.get_ip_parents(ip, self.db)) self.assertEqual(results, expected_results) # 1.0.0.1 is inside INTERNAL_SERVER, which is inside INTERNAL_SERVERS, which # is inside SERVERS def test_one_ip_multi_nested(self): expected_results = sorted((('INTERNAL_SERVERS -> INTERNAL_SERVER', ['1.0.0.1/32']), ('SERVERS -> INTERNAL_SERVER', ['1.0.0.1/32']), ('SERVERS -> INTERNAL_SERVERS', ['1.0.0.1/32']), ('SERVERS', ['1.0.0.1/32']), ('ANY', ['0.0.0.0/0']))) ip = '1.0.0.1' results = sorted(cgrep.get_ip_parents(ip, self.db)) self.assertEqual(results, expected_results) # # test 'ip in token' (-i -t) # # 8.8.8.8 is inside GOOGLE_DNS def test_ip_in_token(self): expected_results = r'8.8.8.8 is in GOOGLE_DNS' options = Namespace() options.ip = ('8.8.8.8',) options.token = ('GOOGLE_DNS') results = cgrep.compare_ip_token(options, self.db) self.assertEqual(results, expected_results) # 2001:4860:4860::8844 is inside GOOGLE_DNS def test_ipv6_in_token(self): expected_results = r'2001:4860:4860::8844 is in GOOGLE_DNS' options = Namespace() options.ip = ('2001:4860:4860::8844',) options.token = ('GOOGLE_DNS') results = cgrep.compare_ip_token(options, self.db) self.assertEqual(results, expected_results) # 69.171.239.12 is not in GOOGLE_DNS def test_ip_in_token_fail(self): expected_results = r'69.171.239.12 is _not_ in GOOGLE_DNS' options = Namespace() options.ip = ('69.171.239.12',) options.token = ('GOOGLE_DNS') results = cgrep.compare_ip_token(options, self.db) self.assertEqual(results, expected_results) # 2a03:2880:fffe:c:face:b00c:0:35 is not in GOOGLE_DNS def test_ipv6_in_token_fail(self): expected_results = r'2a03:2880:fffe:c:face:b00c:0:35 is _not_ in GOOGLE_DNS' options = Namespace() options.ip = ('2a03:2880:fffe:c:face:b00c:0:35',) options.token = ('GOOGLE_DNS') results = cgrep.compare_ip_token(options, self.db) self.assertEqual(results, expected_results) # # test network token compare (-c) # # these two tokens are identical and should contain the same nets def test_compare_same_token(self): expected_results = ( ( r'PUBLIC_NAT', r'PUBLIC_NAT', [ nacaddr.IPv4('200.1.1.3/32') ], ), [ r'200.1.1.3/32' ] ) options = Namespace() options.cmp = ('PUBLIC_NAT', 'PUBLIC_NAT') results = cgrep.compare_tokens(options, self.db) self.assertEqual(results, expected_results) # # test network token encapsulations # def test_ip_contained(self): expected_results = True results = cgrep.check_encapsulated('network', 'RFC1918', 'RESERVED', self.db) self.assertEqual(results, expected_results) def test_ip_not_contained(self): expected_results = False results = cgrep.check_encapsulated('network', 'RESERVED', 'RFC1918', self.db) self.assertEqual(results, expected_results) def test_ipv6_contained(self): expected_results = True results = cgrep.check_encapsulated('network', 'LINKLOCAL', 'RESERVED', self.db) self.assertEqual(results, expected_results) def test_ipv6_not_contained(self): expected_results = False results = cgrep.check_encapsulated('network', 'RESERVED', 'LINKLOCAL', self.db) self.assertEqual(results, expected_results) # # test ip->object comparisons (-g) # # 8.8.8.8 is not present in object RESERVED and # 127.0.0.1 is not present in object GOOGLE_DNS and # the two IPs both exist in 'ANY' def test_group_diff(self): expected_results = sorted(( ['ANY'], ['GOOGLE_DNS', 'GOOGLE_DNS -> GOOGLE_PUBLIC_DNS_ANYCAST'], ['RESERVED', 'RESERVED -> LOOPBACK'] )) options = Namespace() options.gmp = ['8.8.8.8', '127.0.0.1'] results = sorted(cgrep.group_diff(options, self.db)) self.assertCountEqual(results, expected_results) # test to make sure two IPs share the same groups def test_group_diff_identical(self): expected_results = sorted(( ['ANY', 'INTERNAL', 'INTERNAL -> RFC1918', 'RESERVED', 'RESERVED -> RFC1918'], [], [] )) options = Namespace() options.gmp = ['172.16.0.1', '192.168.0.1'] results = sorted(cgrep.group_diff(options, self.db)) self.assertCountEqual(results, expected_results) # # test token->ip(s) resolution (-o) # # resolve GOOGLE_DNS to the 4 given IPs def test_token_to_ips(self): expected_results = [ ( r'GOOGLE_DNS', [ nacaddr.IPv4('8.8.4.4/32'), nacaddr.IPv4('8.8.8.8/32'), nacaddr.IPv6('2001:4860:4860::8844/128'), nacaddr.IPv6('2001:4860:4860::8888/128') ] ) ] options = Namespace() options.obj = ('GOOGLE_DNS',) results = cgrep.get_nets(options.obj, self.db) self.assertEqual(results[0][0], expected_results[0][0]) self.assertEqual(set(results[0][1]), set(expected_results[0][1])) # GOOGLE_DNS does not resole to the given IP def test_token_to_ip_fail(self): expected_results = [ ( r'GOOGLE_DNS', [ nacaddr.IPv4('69.171.239.12/32'), nacaddr.IPv6('2a03:2880:fffe:c:face:b00c:0:35/128') ] ) ] options = Namespace() options.obj = ('GOOGLE_DNS',) results = cgrep.get_nets(options.obj, self.db) # the network object name should match, but not the IPs contained within self.assertEqual(results[0][0], expected_results[0][0]) self.assertNotEqual(set(results[0][1]), set(expected_results[0][1])) # # test service->port resolution (-s) # # "SSH" is just '22/tcp' def test_svc_to_port(self): expected_results = [ ( r'SSH', [ '22/tcp' ] ) ] options = Namespace() options.service = ('SSH',) results = cgrep.get_ports(options.service, self.db) self.assertEqual(results, expected_results) # "SSH" does not contain '23/tcp' def test_svc_to_port_fail(self): expected_results = [ ( r'SSH', [ '23/tcp' ] ) ] options = Namespace() options.svc = ('SSH',) results = cgrep.get_ports(options.svc, self.db) self.assertNotEqual(results, expected_results) # # test port->service object resolution (-p) # # '22/tcp' belongs to SSH def test_get_port_parents(self): expected_results = (r'22', r'tcp', ['SSH']) options = Namespace() options.port = ('22', 'tcp') results = cgrep.get_services(options, self.db) self.assertEqual(results, expected_results) # 22/tcp does not belong to TELNET def test_get_port_parents_fail(self): expected_results = (r'22', r'tcp', ['TELNET']) options = Namespace() options.port = ('22', 'tcp') results = cgrep.get_services(options, self.db) self.assertNotEqual(results, expected_results) # 33434/tcp should only be in HIGH_PORTS (not also TRACEROUTE) def test_get_port_parents_range_tcp(self): expected_results = (r'33434', r'tcp', ['HIGH_PORTS']) options = Namespace() options.port = ('33434', 'tcp') results = cgrep.get_services(options, self.db) self.assertEqual(results, expected_results) # 33434/udp should be in HIGH_PORTS and TRACEROUTE def test_get_port_parents_range_udp(self): expected_results = (r'33434', r'udp', ['HIGH_PORTS', 'TRACEROUTE']) options = Namespace() options.port = ('33434', 'udp') results = cgrep.get_services(options, self.db) self.assertEqual(results, expected_results) # # test IP validity # def test_invalid_ip(self): self.assertRaises(argparse.ArgumentTypeError, cgrep.is_valid_ip, '10.0.0.256') def test_invalid_ipv6(self): self.assertRaises(argparse.ArgumentTypeError, cgrep.is_valid_ip, '2001:db8::z') def test_valid_ips(self): arg = '8.8.8.8' results = cgrep.is_valid_ip(arg) self.assertEqual(results, arg) def test_valid_ips_v6(self): arg = '2001:4860:4860::8844' results = cgrep.is_valid_ip(arg) self.assertEqual(results, arg) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/cisco_test.py000066400000000000000000000730761437377527500176060ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for cisco acl rendering module.""" import datetime import re from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import cisco from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: cisco test-filter } """ GOOD_STANDARD_HEADER_1 = """ header { comment:: "this is a standard acl" target:: cisco 99 standard } """ GOOD_STANDARD_HEADER_2 = """ header { comment:: "this is a standard acl" target:: cisco FOO standard } """ GOOD_STANDARD_NUMBERED_HEADER = """ header { comment:: "numbered standard" target:: cisco 50 standard } """ GOOD_OBJGRP_HEADER = """ header { comment:: "obj group header test" target:: cisco objgroupheader object-group } """ GOOD_INET6_HEADER = """ header { comment:: "inet6 header test" target:: cisco inet6_acl inet6 } """ GOOD_MIXED_HEADER = """ header { comment:: "mixed inet/inet6 header test" target:: cisco mixed_acl mixed } """ GOOD_DSMO_HEADER = """ header { comment:: "this is a dsmo test acl" target:: cisco dsmo_acl extended enable_dsmo } """ GOOD_EXTENDED_NUMBERED_HEADER = """ header { comment:: "numbered extended" target:: cisco 150 extended } """ GOOD_NOVERBOSE_HEADER = """ header { comment:: "should not see me" target:: cisco test-filter noverbose } """ GOOD_NOVERBOSE_STANDARD_HEADER = """ header { comment:: "should not see me" target:: cisco 99 standard noverbose } """ GOOD_NOVERBOSE_OBJGRP_HEADER = """ header { comment:: "should not see me" target:: cisco objgroupheader object-group noverbose } """ GOOD_NOVERBOSE_INET6_HEADER = """ header { comment:: "should not see me" target:: cisco inet6_acl inet6 noverbose } """ BAD_STANDARD_HEADER_1 = """ header { comment:: "this is a standard acl" target:: cisco 2001 standard } """ BAD_STANDARD_HEADER_2 = """ header { comment:: "this is a standard acl" target:: cisco 101 standard } """ BAD_HEADER = """ header { comment:: "this is a test acl" target:: juniper test-filter } """ BAD_HEADER_2 = """ header { target:: cisco 1300 } """ LONG_VERSION_HEADER = """ header { comment:: "This long header should be split even on a looooooooooooooooooooooooooonnnnnnnnnnnnnnnnnngggggggggg string. https://www.google.com/maps/place/1600+Amphitheatre+Parkway,+Mountain+View,+CA/@37.507491,-122.2540443,15z/data=!4m5!3m4!1s0x808fb99f8c51e885:0x169ef02a512c5b28!8m2!3d37.4220579!4d-122.0840897" target:: cisco test-filter } """ GOOD_STANDARD_TERM_1 = """ term standard-term-1 { address:: SOME_HOST action:: accept } """ GOOD_STANDARD_TERM_2 = """ term standard-term-2 { address:: SOME_HOST action:: accept } """ BAD_STANDARD_TERM_1 = """ term bad-standard-term-1 { destination-address:: SOME_HOST protocol:: tcp action:: accept } """ UNSUPPORTED_TERM_1 = """ term protocol_except_term { protocol-except:: tcp udp icmp action:: reject } """ UNSUPPORTED_TERM_2 = """ term protocol_except_term { source-prefix:: configured-neighbors-only action:: reject } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp destination-address:: SOME_HOST source-port:: HTTP option:: established action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: tcp option:: tcp-established action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { protocol:: tcp logging:: true action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { verbatim:: cisco "mary had a little lamb" verbatim:: iptables "mary had second lamb" verbatim:: juniper "mary had third lamb" } """ GOOD_TERM_6 = """ term good-term-6 { destination-address:: ANY action:: accept } """ GOOD_TERM_7 = """ term good-term { protocol:: vrrp action:: accept } """ GOOD_TERM_8 = """ term good-term { protocol:: tcp destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_9 = """ term good-term-9 { protocol:: tcp udp option:: established action:: accept } """ GOOD_TERM_10 = """ term good-term-10 { protocol:: icmp icmp-type:: echo-reply unreachable time-exceeded action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { protocol:: icmpv6 icmp-type:: echo-reply destination-unreachable time-exceeded action:: accept } """ GOOD_TERM_12 = """ term good-term-12 { action:: accept } """ GOOD_TERM_13 = """ term good-term-13 { owner:: foo@google.com action:: accept } """ GOOD_TERM_14 = """ term good-term-14 { protocol:: tcp destination-address:: SOME_HOST destination-port:: CONSECUTIVE_PORTS action:: accept } """ GOOD_TERM_15 = """ term good-term-15 { protocol:: hopopt action:: accept } """ GOOD_TERM_16 = """ term good-term-16 { protocol:: tcp action:: accept dscp-match:: 42 } """ GOOD_TERM_17 = """ term good-term-17 { protocol:: tcp udp policer:: batman option:: established action:: accept } """ GOOD_TERM_18 = """ term good-term-18 { source-address:: SOME_HOST destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_19 = """ term good_term_19 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_20 = """ term good_term_20 { source-address:: SOME_HOST destination-address:: SOME_HOST option:: fragments action:: accept } """ GOOD_TERM_21 = """ term good_term_21 { source-address:: cs4-valid_network_name destination-address:: cs4-valid_network_name action:: accept } """ GOOD_TERM_22 = """ term good_term_22 { source-address:: SOME_HOST destination-address:: SOME_HOST option:: is-fragment action:: accept } """ GOOD_TERM_23 = """ term good_term_23 { protocol:: tcp destination-address:: SOME_HOST restrict-address-family:: inet action:: accept } """ GOOD_TERM_24 = """ term good_term_24 { protocol:: ipip destination-address:: SOME_HOST action:: accept } """ LONG_COMMENT_TERM = """ term long-comment-term { comment:: "%s " action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'dscp_match', 'expiration', 'icmp_type', 'icmp_code', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'restrict_address_family', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'tcp-established', 'is-fragment', 'fragments'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class CiscoTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testIPVersion(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0'), nacaddr.IP('::/0')] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_6, self.naming) acl = cisco.Cisco(pol, EXP_INFO) # check if we've got a v6 address in there. self.assertNotIn('::', str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('ANY') def testOptions(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['80'] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) # this is a hacky sort of way to test that 'established' maps to HIGH_PORTS # in the destination port section. range_test = 'permit tcp any eq 80 10.0.0.0 0.255.255.255 range 1024 65535' self.assertIn(range_test, str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testExpandingConsequtivePorts(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['80', '81'] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_14, self.naming), EXP_INFO) first_string = 'permit tcp any 10.0.0.0 0.255.255.255 eq 80' second_string = 'permit tcp any 10.0.0.0 0.255.255.255 eq 81' self.assertIn(first_string, str(acl), '[%s]' % str(acl)) self.assertIn(second_string, str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with( 'CONSECUTIVE_PORTS', 'tcp') def testDSCP(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_16, self.naming), EXP_INFO) self.assertTrue(re.search('permit tcp any any dscp 42', str(acl)), str(acl)) def testTermAndFilterName(self): acl = cisco.Cisco(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_1 + GOOD_TERM_6, self.naming), EXP_INFO) self.assertIn('ip access-list extended test-filter', str(acl), str(acl)) self.assertIn(' remark good-term-1', str(acl), str(acl)) self.assertIn(' permit ip any any', str(acl), str(acl)) def testRemark(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] # Extended ACLs should have extended remark style. acl = cisco.Cisco(policy.ParsePolicy( GOOD_EXTENDED_NUMBERED_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) self.assertIn('ip access-list extended 150', str(acl), str(acl)) self.assertIn(' remark numbered extended', str(acl), str(acl)) self.assertNotIn('150 remark', str(acl), str(acl)) # Standard ACLs should have standard remark style. acl = cisco.Cisco(policy.ParsePolicy( GOOD_STANDARD_NUMBERED_HEADER + GOOD_STANDARD_TERM_1, self.naming), EXP_INFO) self.assertIn('access-list 50 remark numbered standard', str(acl), str(acl)) self.assertIn('access-list 50 remark standard-term-1', str(acl), str(acl)) self.assertIn('access-list 50 remark %sId:%s' % ('$', '$'), str(acl), str(acl)) self.assertNotIn('access-list 50 remark %sRevision:%s' % ('$', '$'), str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testTcpEstablished(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) self.assertTrue(re.search('permit tcp any any established\n', str(acl)), str(acl)) def testLogging(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4, self.naming), EXP_INFO) self.assertTrue(re.search('permit tcp any any log\n', str(acl)), str(acl)) def testVerbatimTerm(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) self.assertIn('mary had a little lamb', str(acl), str(acl)) # check if other platforms verbatim shows up in ouput self.assertNotIn('mary had a second lamb', str(acl), str(acl)) self.assertNotIn('mary had a third lamb', str(acl), str(acl)) def testDuplicateTermNames(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_1 + GOOD_STANDARD_TERM_1, self.naming) self.assertRaises(cisco.CiscoDuplicateTermError, cisco.Cisco, pol, EXP_INFO) def testBadStandardTerm(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + BAD_STANDARD_TERM_1, self.naming) self.assertRaises(cisco.StandardAclTermError, cisco.Cisco, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testStandardTermHost(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_1, self.naming) acl = cisco.Cisco(pol, EXP_INFO) expected = 'access-list 99 permit 10.1.1.1' self.assertIn(expected, str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testStandardTermNet(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2, self.naming) acl = cisco.Cisco(pol, EXP_INFO) expected = 'access-list 99 permit 10.0.0.0 0.255.255.255' self.assertIn(expected, str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testNamedStandard(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_2 + GOOD_STANDARD_TERM_2, self.naming) acl = cisco.Cisco(pol, EXP_INFO) expected = 'ip access-list standard FOO' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit 10.0.0.0 0.255.255.255\n' self.assertIn(expected, str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testNoIPv6InOutput(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:0:1000::/40')] pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2, self.naming) acl = cisco.Cisco(pol, EXP_INFO) self.assertNotIn('::', str(acl), '[%s]' % str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testStandardFilterName(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] pol = policy.ParsePolicy(BAD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2, self.naming) self.assertRaises(cisco.UnsupportedCiscoAccessListError, cisco.Cisco, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testStandardFilterRange(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] pol = policy.ParsePolicy(BAD_STANDARD_HEADER_2 + GOOD_STANDARD_TERM_2, self.naming) self.assertRaises(cisco.UnsupportedCiscoAccessListError, cisco.Cisco, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testObjectGroup(self): ip_grp = ['object-group network ipv4 SOME_HOST'] ip_grp.append(' 10.0.0.0/8') ip_grp.append('exit') port_grp1 = ['object-group port 80-80'] port_grp1.append(' eq 80') port_grp1.append('exit') port_grp2 = ['object-group port 1024-65535'] port_grp2.append(' range 1024 65535') port_grp2.append('exit') self.naming.GetNetAddr.return_value = [ nacaddr.IP('10.0.0.0/8', token='SOME_HOST')] self.naming.GetServiceByProto.return_value = ['80'] pol = policy.ParsePolicy( GOOD_OBJGRP_HEADER + GOOD_TERM_2 + GOOD_TERM_18, self.naming) acl = cisco.Cisco(pol, EXP_INFO) self.assertIn('\n'.join(ip_grp), str(acl), '%s %s' % ( '\n'.join(ip_grp), str(acl))) self.assertIn('\n'.join(port_grp1), str(acl), '%s %s' % ( '\n'.join(port_grp1), str(acl))) self.assertIn('\n'.join(port_grp2), str(acl), '%s %s' % ( '\n'.join(port_grp2), str(acl))) # Object-group terms should use the object groups created. self.assertIn( ' permit tcp any port-group 80-80 net-group SOME_HOST port-group' ' 1024-65535', str(acl), str(acl)) self.assertIn( ' permit ip net-group SOME_HOST net-group SOME_HOST', str(acl), str(acl)) # There should be no addrgroups that look like IP addresses. for addrgroup in re.findall(r'net-group ([a-f0-9.:/]+)', str(acl)): self.assertRaises(ValueError, nacaddr.IP(addrgroup)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST'), mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testInet6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('2001:4860:8000::/33')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) inet6_test1 = 'no ipv6 access-list inet6_acl' inet6_test2 = 'ipv6 access-list inet6_acl' inet6_test3 = 'permit tcp any 2001:4860:8000::/33' self.assertIn(inet6_test1, str(acl), '[%s]' % str(acl)) self.assertIn(inet6_test2, str(acl), '[%s]' % str(acl)) self.assertTrue(re.search(inet6_test3, str(acl)), str(acl)) self.assertNotIn('10.0.0.0', str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testMixed(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('2001:4860:8000::/33')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_MIXED_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) inet6_test1 = 'no ip access-list extended mixed_acl' inet6_test2 = 'ip access-list extended mixed_acl' inet6_test3 = 'permit tcp any 10.0.0.0 0.255.255.255' inet6_test4 = 'no ipv6 access-list ipv6-mixed_acl' inet6_test5 = 'ipv6 access-list ipv6-mixed_acl' inet6_test6 = 'permit tcp any 2001:4860:8000::/33' aclout = str(acl) self.assertIn(inet6_test1, aclout, '[%s]' % aclout) self.assertIn(inet6_test2, aclout, '[%s]' % aclout) self.assertTrue(re.search(inet6_test3, aclout), aclout) self.assertIn(inet6_test4, aclout, '[%s]' % aclout) self.assertIn(inet6_test5, aclout, '[%s]' % aclout) self.assertTrue(re.search(inet6_test6, aclout), aclout) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testRestrictAddressFamilyType(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('127.0.0.1'), nacaddr.IPv6('::1/128')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_MIXED_HEADER + GOOD_TERM_23, self.naming), EXP_INFO) output = str(acl) self.assertIn('127.0.0.1', output, output) self.assertNotIn('::1/128', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testDsmo(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP('192.168.' + str(octet) + '.64/27') addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list acl = cisco.Cisco(policy.ParsePolicy(GOOD_DSMO_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) self.assertIn('permit tcp any 192.168.0.64 0.0.255.31', str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testUdpEstablished(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_9, self.naming), EXP_INFO) self.assertFalse(re.search('permit 17 any any established', str(acl)), str(acl)) def testIcmpTypes(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10, self.naming), EXP_INFO) # echo-reply = 0 self.assertTrue(re.search('permit icmp any any 0', str(acl)), str(acl)) # unreachable = 3 self.assertTrue(re.search('permit icmp any any 3', str(acl)), str(acl)) # time-exceeded = 11 self.assertTrue(re.search('permit icmp any any 11', str(acl)), str(acl)) def testIcmpCode(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming), EXP_INFO) output = str(acl) self.assertIn(' permit icmp any any 3 3', output, output) self.assertIn(' permit icmp any any 3 4', output, output) def testIpv6IcmpTypes(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) # echo-reply = icmp-type code 129 self.assertTrue(re.search('permit 58 any any 129', str(acl)), str(acl)) # destination-unreachable = icmp-type code 1 self.assertTrue(re.search('permit 58 any any 1', str(acl)), str(acl)) # time-exceeded = icmp-type code 3 self.assertTrue(re.search('permit 58 any any 3', str(acl)), str(acl)) @mock.patch.object(cisco.logging, 'debug') def testIcmpv6InetMismatch(self, mock_debug): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) # output happens in __str_ str(acl) mock_debug.assert_called_once_with( 'Term good-term-11 will not be rendered,' ' as it has icmpv6 match specified but ' 'the ACL is of inet address family.') @mock.patch.object(cisco.logging, 'debug') def testIcmpInet6Mismatch(self, mock_debug): acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) # output happens in __str_ str(acl) mock_debug.assert_called_once_with( 'Term good-term-1 will not be rendered,' ' as it has icmp match specified but ' 'the ACL is of inet6 address family.') def testUnsupportedKeywordsError(self): pol1 = policy.ParsePolicy(GOOD_HEADER + UNSUPPORTED_TERM_1, self.naming) pol2 = policy.ParsePolicy(GOOD_HEADER + UNSUPPORTED_TERM_1, self.naming) # protocol-except self.assertRaises(aclgenerator.UnsupportedFilterError, cisco.Cisco, pol1, EXP_INFO) # source-prefix self.assertRaises(aclgenerator.UnsupportedFilterError, cisco.Cisco, pol2, EXP_INFO) def testDefaultInet6Protocol(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_12, self.naming), EXP_INFO) self.assertTrue(re.search('permit ipv6 any any', str(acl)), str(acl)) @mock.patch.object(cisco.logging, 'warning') def testExpiredTerm(self, mock_warn): _ = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and will not ' 'be rendered.', 'is_expired', 'test-filter') @mock.patch.object(cisco.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testTermHopByHop(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_15, self.naming), EXP_INFO) self.assertIn('permit hbh any any', str(acl), str(acl)) def testOwnerTerm(self): acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_13, self.naming), EXP_INFO) self.assertTrue(re.search(' remark Owner: foo@google.com', str(acl)), str(acl)) def testBuildTokens(self): pol1 = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_17, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testProtoInts(self): pol = policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_7 + GOOD_TERM_9, self.naming) acl = cisco.Cisco(pol, EXP_INFO) self.assertIn('permit 112 any any', str(acl), str(acl)) self.assertIn('permit tcp any any range 1024 65535 ' 'established', str(acl), str(acl)) self.assertIn('permit udp any any range 1024 65535', str(acl), str(acl)) def testFragments01(self): """Test policy term using 'fragments' (ref Github issue #187).""" self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_20, self.naming), EXP_INFO) expected = 'permit ip 10.0.0.0 0.0.0.255 10.0.0.0 0.0.0.255 fragments' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST'), mock.call('SOME_HOST')]) def testFragments02(self): """Test policy term using 'is-fragment' (ref Github issue #187).""" self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_22, self.naming), EXP_INFO) expected = 'permit ip 10.0.0.0 0.0.0.255 10.0.0.0 0.0.0.255 fragments' self.assertIn(expected, str(acl)) def testTermDSCPMarker(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming), EXP_INFO) expected = 'permit ip 10.0.0.0 0.0.0.255 10.0.0.0 0.0.0.255' self.assertIn(expected, str(acl)) self.naming.GetNetAddr.assert_has_calls( [mock.call('cs4-valid_network_name'), mock.call('cs4-valid_network_name')]) def testNoVerbose(self): for i in [GOOD_NOVERBOSE_HEADER, GOOD_NOVERBOSE_STANDARD_HEADER, GOOD_NOVERBOSE_OBJGRP_HEADER, GOOD_NOVERBOSE_INET6_HEADER]: self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] acl = cisco.Cisco(policy.ParsePolicy(i+GOOD_STANDARD_TERM_1, self.naming), EXP_INFO) self.assertNotIn('remark', str(acl), str(acl)) def testLongHeader(self): pol = policy.ParsePolicy( LONG_VERSION_HEADER + GOOD_TERM_7, self.naming) acl = cisco.Cisco(pol, EXP_INFO) self.assertIn('remark This long header should be split even on a', str(acl)) self.assertIn(('remark looooooooooooooooooooooooooonnnnnnnnnnnnnnnnnn' 'gggggggggg string.'), str(acl)) self.assertIn(('remark https://www.google.com/maps/place/1600+Amphitheatr' 'e+Parkway,+Mountain+'), str(acl)) self.assertIn(('remark View,+CA/@37.507491,-122.2540443,15z/data=!4m5!3m4!' '1s0x808fb99f8c51e88'), str(acl)) self.assertIn(('remark 5:0x169ef02a512c5b28!8m2!3d37.4220579!4d-122.084' '0897'), str(acl)) def testIPIP(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/24')] acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_24, self.naming), EXP_INFO) self.assertIn('permit 4 ', str(acl)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/ciscoasa_test.py000066400000000000000000000100151437377527500202530ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for ciscoasa acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import ciscoasa from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: ciscoasa test-filter } """ GOOD_TERM_1 = """ term good-term-1 { verbatim:: ciscoasa "mary had a little lamb" verbatim:: iptables "mary had second lamb" verbatim:: juniper "mary had third lamb" } """ GOOD_TERM_2 = """ term good-term-2 { verbatim:: ciscoasa "mary had a little lamb" policer:: batman } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'tcp-established'}} # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class CiscoASATest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testBuildTokens(self): pol1 = ciscoasa.CiscoASA(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = ciscoasa.CiscoASA(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/cisconx_test.py000066400000000000000000000215501437377527500201420ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for cisconx acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import cisconx from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test extended acl" target:: cisconx test-filter extended } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: cisconx test-filter } """ GOOD_HEADER_IPV6 = """ header { comment:: "this is a test inet6 acl" target:: cisconx test-filter inet6 } """ GOOD_TERM = """ term good-term { protocol:: tcp option:: tcp-established action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: tcp option:: tcp-established policer:: batman action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { source-address:: SOME_HOST destination-port:: SSH protocol:: tcp action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { source-address:: SOME_HOST2 destination-port:: GOPENFLOW protocol:: tcp action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { comment:: "Accept SNMP from internal sources." address:: SOME_HOST action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { comment:: "Accept ESP from internal sources." address:: SOME_HOST protocol:: esp action:: accept } """ GOOD_TERM_6 = """ term good-term-6 { comment:: "Accept AH from internal sources." address:: SOME_HOST protocol:: ah action:: accept } """ GOOD_TERM_7 = """ term good-term-6 { comment:: "Accept AH from internal sources." address:: SOME_HOST protocol:: ah esp tcp action:: accept } """ GOOD_TERM_10 = """ term good-term-10 { protocol:: icmp icmp-type:: echo-reply unreachable time-exceeded action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { protocol:: icmpv6 icmp-type:: echo-reply destination-unreachable time-exceeded action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'dscp_match', 'expiration', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'restrict_address_family', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': { 'established', 'tcp-established', 'is-fragment', 'fragments' } } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class CiscoNXTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testRemark(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4, self.naming) acl = cisconx.CiscoNX(pol, EXP_INFO) expected = 'remark this is a test extended acl' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = 'remark good-term-4' self.assertIn(expected, str(acl), str(acl)) expected = 'test-filter remark' self.assertNotIn(expected, str(acl), str(acl)) self.assertNotIn(' remark %sId:%s' % ('$', '$'), str(acl), str(acl)) self.assertIn(' remark "%sRevision:%s"' % ('$', '$'), str(acl), str(acl)) self.assertNotIn(' remark $', str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testExtendedNXosSyntax(self): # Extended access-lists should not use the "extended" argument to ip # access-list. acl = cisconx.CiscoNX( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('ip access-list test-filter', str(acl)) def testBuildTokens(self): pol1 = cisconx.CiscoNX( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = cisconx.CiscoNX( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testStandardTermHost(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/24')] self.naming.GetServiceByProto.return_value = ['22', '6537'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_2 + GOOD_TERM_3, self.naming) acl = cisconx.CiscoNX(pol, EXP_INFO) expected = 'ip access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 22' self.assertIn(expected, str(acl), str(acl)) expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 6537' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls( [mock.call('SOME_HOST'), mock.call('SOME_HOST2')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SSH', 'tcp'), mock.call('GOPENFLOW', 'tcp')]) def testStandardTermHostV6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:1::/64')] self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(GOOD_HEADER_IPV6 + GOOD_TERM_2, self.naming) acl = cisconx.CiscoNX(pol, EXP_INFO) expected = 'ipv6 access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit tcp 2620:1::/64 any eq 22' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_has_calls([mock.call('SSH', 'tcp')]) def testIcmpTypes(self): acl = cisconx.CiscoNX( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10, self.naming), EXP_INFO) # echo-reply = 0 expected = 'permit icmp any any 0' self.assertIn(expected, str(acl), str(acl)) # unreachable = 3 expected = 'permit icmp any any 3' self.assertIn(expected, str(acl), str(acl)) # time-exceeded = 11 expected = 'permit icmp any any 11' self.assertIn(expected, str(acl), str(acl)) def testIpv6IcmpTypes(self): acl = cisconx.CiscoNX( policy.ParsePolicy(GOOD_HEADER_IPV6 + GOOD_TERM_11, self.naming), EXP_INFO) # echo-reply = icmp-type code 129 expected = 'permit icmp any any 129' self.assertIn(expected, str(acl), str(acl)) # destination-unreachable = icmp-type code 1 expected = 'permit icmp any any 1' self.assertIn(expected, str(acl), str(acl)) # time-exceeded = icmp-type code 3 expected = 'permit icmp any any 3' self.assertIn(expected, str(acl), str(acl)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/ciscoxr_test.py000066400000000000000000000235461437377527500201550ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Cisco XR acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import ciscoxr from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: ciscoxr test-filter } """ GOOD_HEADER_2 = """ header { comment:: "this is a test ipv6 acl" target:: ciscoxr ipv6-test-filter inet6 } """ OBJECT_GROUP_HEADER = """ header { target:: ciscoxr foo object-group } """ GOOD_TERM_1 = """ term good-term-1 { source-address:: SOME_HOST protocol:: icmp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp destination-address:: SOME_HOST2 source-port:: HTTP action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: tcp destination-address:: SOME_HOST2 source-port:: HTTP policer:: batman action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { source-address:: SOME_HOST2 action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { next-ip:: TEST_NEXT } """ GOOD_TERM_6 = """ term good-term-6 { action:: accept next-ip:: TEST_NEXT } """ VERBATIM_TERM = """ term verb_term { verbatim:: ciscoxr " permit tcp any" } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'dscp_match', 'expiration', 'icmp_code', 'icmp_type', 'next_ip', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'restrict_address_family', 'source_address', 'source_address_exclude', 'source_port', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'tcp-established', 'is-fragment', 'fragments'} } EXP_INFO = 2 class CiscoXRTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testRemark(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'remark this is a test acl' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = 'remark good-term-1' self.assertIn(expected, str(acl), str(acl)) expected = 'test-filter remark' self.assertNotIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testStandardTermHost(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_TERM_4, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'ipv4 access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit icmp host 10.1.1.1 any' self.assertIn(expected, str(acl), str(acl)) expected = ' permit ipv4 host 10.1.1.1 any' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST'), mock.call('SOME_HOST2')]) def testStandardTermHostIPv6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::3/128')] self.naming.GetServiceByProto.return_value = ['80'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_2 + GOOD_TERM_4, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'ipv6 access-list ipv6-test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit tcp any eq 80 host 2001::3' self.assertIn(expected, str(acl), str(acl)) expected = ' permit ipv6 host 2001::3 any' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST2'), mock.call('SOME_HOST2')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testAclBasedForwardingIPv4(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'ipv4 access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit ipv4 any any nexthop1 ipv4 10.1.1.1' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('TEST_NEXT')]) def testAclBasedForwardingIPv6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::3/128')] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_5, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'ipv6 access-list ipv6-test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit ipv6 any any nexthop1 ipv6 2001::3' self.assertIn(expected, str(acl), str(acl)) self.naming.GetNetAddr.assert_has_calls([mock.call('TEST_NEXT')]) def testAclBasedForwardingMultipleIP(self): self.naming.GetNetAddr.return_value = [ nacaddr.IP('10.1.1.0/32'), nacaddr.IP('10.1.1.1/32') ] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) self.assertRaises(ciscoxr.cisco.CiscoNextIpError, str, acl) self.naming.GetNetAddr.assert_has_calls([mock.call('TEST_NEXT')]) def testAclBasedForwardingNetworkIP(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/31')] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) self.assertRaises(ciscoxr.cisco.CiscoNextIpError, str, acl) self.naming.GetNetAddr.assert_has_calls([mock.call('TEST_NEXT')]) def testAclBasedForwardingNotIP(self): self.naming.GetNetAddr.return_value = ['not_ip_address'] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) self.assertRaises(ciscoxr.cisco.CiscoNextIpError, str, acl) self.naming.GetNetAddr.assert_has_calls([mock.call('TEST_NEXT')]) def testAclBasedForwardingActionAcceptNextIpIgnored(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_6, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) expected = 'ipv4 access-list test-filter' self.assertIn(expected, str(acl), '[%s]' % str(acl)) expected = ' permit ipv4 any any' self.assertIn(expected, str(acl), str(acl)) expected = 'nexthop1' self.assertNotIn(expected, str(acl), str(acl)) def testBuildTokens(self): pol1 = ciscoxr.CiscoXR(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::3/128')] self.naming.GetServiceByProto.return_value = ['80'] pol1 = ciscoxr.CiscoXR(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testVerbatimObjectGroup(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] pol = policy.ParsePolicy(OBJECT_GROUP_HEADER + VERBATIM_TERM, self.naming) acl = ciscoxr.CiscoXR(pol, EXP_INFO) self.assertIn('permit tcp any', str(acl)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/cloudarmor_test.py000066400000000000000000000516701437377527500206510ustar00rootroot00000000000000"""Tests for google3.third_party.py.capirca.lib.cloudarmor.""" import json import random from absl.testing import absltest from unittest import mock from capirca.lib import cloudarmor from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy SUPPORTED_TOKENS = { 'action', 'comment', 'priority', 'source_address' } EXP_INFO = 2 GOOD_HEADER = """ header { comment:: "Test ACL for CloudArmor (IPv4)" target:: cloudarmor inet } """ GOOD_HEADER_IPV6_ONLY = """ header { comment:: "Test ACL for CloudArmor (IPv6 only)" target:: cloudarmor inet6 } """ GOOD_HEADER_NOVERBOSE = """ header { comment:: "Test ACL for CloudArmor (IPv4)" target:: cloudarmor inet noverbose } """ GOOD_HEADER_MIXED = """ header { comment:: "Test ACL for CloudArmor (IPv4 + IPv6)" target:: cloudarmor mixed } """ GOOD_HEADER_NO_AF = """ header { comment:: "Test ACL for CloudArmor (Default AF = IPv4)" target:: cloudarmor } """ BAD_HEADER_INVALID_AF = """ header { comment:: "Test ACL for CloudArmor (IPv4 + IPv6)" target:: cloudarmor inet8 } """ GOOD_TERM_ALLOW = """ term good-term-allow { comment:: "Sample CloudArmor Allow Rule" source-address:: GOOGLE_PUBLIC_DNS_ANYCAST action:: accept } """ GOOD_TERM_DENY = """ term good-term-deny { comment:: "Sample Deny Rule" source-address:: INTERNAL action:: deny } """ GOOD_TERM_NO_COMMENT = """ term good-term-nocomment { source-address:: GOOGLE_PUBLIC_DNS_ANYCAST action:: accept } """ GOOD_TERM_DEFAULT_DENY = """ term good-term-defaultdeny { comment:: "Default Deny term" action:: deny } """ GOOD_TERM_LARGE_COMMENT = """ term good-term-allow { comment:: "This is an unnecessarily long term comment that's going to be truncated" source-address:: GOOGLE_PUBLIC_DNS_ANYCAST action:: accept } """ BAD_TERM_NO_ACTION = """ term bad-term-no-action { comment:: "Sample rule with missing 'action' attribute" source-address:: GOOGLE_PUBLIC_DNS_ANYCAST } """ EXPECTED_IPV4_NOSPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "deny(404)", "description": "Sample Deny Rule", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 } ] """ EXPECTED_IPV6_NOSPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule", "match": { "config": { "srcIpRanges": [ "2001:4860:8000::5/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "deny(404)", "description": "Sample Deny Rule", "match": { "config": { "srcIpRanges": [ "2001:4860:8000::5/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 } ] """ EXPECTED_MIXED_NOSPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32", "2001:4860:8000::5/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "deny(404)", "description": "Sample Deny Rule", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32", "2001:4860:8000::5/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 } ] """ EXPECTED_IPV4_SPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule [1/2]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [2/2]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 }, { "action": "deny(404)", "description": "Sample Deny Rule [1/2]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 3 }, { "action": "deny(404)", "description": "Sample Deny Rule [2/2]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 4 } ] """ EXPECTED_IPV6_SPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule [1/2]", "match": { "config": { "srcIpRanges": [ "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128", "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [2/2]", "match": { "config": { "srcIpRanges": [ "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 }, { "action": "deny(404)", "description": "Sample Deny Rule [1/2]", "match": { "config": { "srcIpRanges": [ "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128", "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 3 }, { "action": "deny(404)", "description": "Sample Deny Rule [2/2]", "match": { "config": { "srcIpRanges": [ "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 4 } ] """ EXPECTED_MIXED_SPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule [1/3]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [2/3]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32", "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [3/3]", "match": { "config": { "srcIpRanges": [ "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128", "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 3 }, { "action": "deny(404)", "description": "Sample Deny Rule [1/3]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 4 }, { "action": "deny(404)", "description": "Sample Deny Rule [2/3]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32", "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 5 }, { "action": "deny(404)", "description": "Sample Deny Rule [3/3]", "match": { "config": { "srcIpRanges": [ "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128", "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 6 } ] """ EXPECTED_NOCOMMENT_SPLIT_JSON = """ [ { "action": "allow", "description": " [1/2]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": " [2/2]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 } ] """ EXPECTED_NOCOMMENT_NOSPLIT_JSON = """ [ { "action": "allow", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 } ] """ EXPECTED_LARGECOMMENT_NOSPLIT_JSON = """ [ { "action": "allow", "description": "This is an unnecessarily long term comment that's going to be tr", "match": { "config": { "srcIpRanges": [ "10.2.3.4/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 } ] """ EXPECTED_LARGECOMMENT_SPLIT_JSON = """ [ { "action": "allow", "description": "This is an unnecessarily long term comment that's going to [1/2]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": "This is an unnecessarily long term comment that's going to [2/2]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 } ] """ EXPECTED_DEFAULT_DENY_JSON = """ [ { "action": "deny(404)", "description": "Default Deny term", "match": { "config": { "srcIpRanges": [ "*" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 } ] """ EXPECTED_DEFAULT_DENY_SPLIT_JSON = """ [ { "action": "allow", "description": "Sample CloudArmor Allow Rule [1/3]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 1 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [2/3]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32", "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 2 }, { "action": "allow", "description": "Sample CloudArmor Allow Rule [3/3]", "match": { "config": { "srcIpRanges": [ "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128", "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 3 }, { "action": "deny(404)", "description": "Sample Deny Rule [1/3]", "match": { "config": { "srcIpRanges": [ "5.2.3.2/32", "10.2.3.4/32", "23.2.3.3/32", "54.2.3.4/32", "76.2.3.5/32" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 4 }, { "action": "deny(404)", "description": "Sample Deny Rule [2/3]", "match": { "config": { "srcIpRanges": [ "132.2.3.6/32", "197.2.3.7/32", "2001:4860:8000::5/128", "24da:3ed8:32a0::7/128", "3051:abd2:5400::9/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 5 }, { "action": "deny(404)", "description": "Sample Deny Rule [3/3]", "match": { "config": { "srcIpRanges": [ "577e:5400:3051::6/128", "6f5d:abd2:1403::1/128", "aee2:37ba:3cc0::3/128", "af22:32d2:3f00::2/128" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 6 }, { "action": "deny(404)", "description": "Default Deny term", "match": { "config": { "srcIpRanges": [ "*" ] }, "versionedExpr": "SRC_IPS_V1" }, "preview": false, "priority": 7 } ] """ TEST_IPS_NOSPLIT = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128')] TEST_IPS_SPLIT = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('5.2.3.2/32'), nacaddr.IP('23.2.3.3/32'), nacaddr.IP('54.2.3.4/32'), nacaddr.IP('76.2.3.5/32'), nacaddr.IP('132.2.3.6/32'), nacaddr.IP('197.2.3.7/32'), nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('3051:abd2:5400::9/128'), nacaddr.IP('aee2:37ba:3cc0::3/128'), nacaddr.IP('6f5d:abd2:1403::1/128'), nacaddr.IP('577e:5400:3051::6/128'), nacaddr.IP('af22:32d2:3f00::2/128'), nacaddr.IP('24da:3ed8:32a0::7/128')] class CloudArmorTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def _StripAclHeaders(self, acl): return '\n'.join([line for line in str(acl).split('\n') if not line.lstrip().startswith('#')]) def testGenericIPv4Term(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV4_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testGenericIPv6Term(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy( GOOD_HEADER_IPV6_ONLY + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV6_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testGenericMixedTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MIXED_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testDefaultAddressFamily(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER_NO_AF + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV4_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testIPv4TermSplitting(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV4_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testIPv6TermSplitting(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy( GOOD_HEADER_IPV6_ONLY + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV6_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermSplitting(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ALLOW + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MIXED_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInvalidAddressFamilyCheck(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT self.assertRaisesRegex( cloudarmor.UnsupportedFilterTypeError, "'inet8' is not a valid filter type", cloudarmor.CloudArmor, policy.ParsePolicy( BAD_HEADER_INVALID_AF + GOOD_TERM_ALLOW, self.naming), EXP_INFO) def testMaxRuleLimitEnforcement(self): test_1001_ips_list = [] for _ in range(1001): random_ip_octets = [] for _ in range(4): random_ip_octets.append(str(int(random.randint(1, 255)))) rand_ip = '.'.join(random_ip_octets) test_1001_ips_list.append(nacaddr.IP(rand_ip + '/32')) self.naming.GetNetAddr.return_value = test_1001_ips_list self.assertRaisesRegex( cloudarmor.ExceededMaxTermsError, 'Exceeded maximum number of rules in a single policy | MAX = 200', cloudarmor.CloudArmor, policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ALLOW, self.naming), EXP_INFO) def testNoCommentWithSplit(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_NO_COMMENT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_NOCOMMENT_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testNoCommentWithoutSplit(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_NO_COMMENT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_NOCOMMENT_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testLargeCommentWithSplit(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LARGE_COMMENT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_LARGECOMMENT_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testLargeCommentWithoutSplit(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LARGE_COMMENT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_LARGECOMMENT_NOSPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testNoVerbose(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER_NOVERBOSE + GOOD_TERM_LARGE_COMMENT, self.naming), EXP_INFO) self.assertNotIn('description', str(acl)) def testDefaultDenyStandalone(self): self.naming.GetNetAddr.return_value = TEST_IPS_NOSPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_DEFAULT_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DEFAULT_DENY_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testDefaultDenyWithSplit(self): self.naming.GetNetAddr.return_value = TEST_IPS_SPLIT acl = cloudarmor.CloudArmor( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ALLOW + GOOD_TERM_DENY + GOOD_TERM_DEFAULT_DENY, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DEFAULT_DENY_SPLIT_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/gce_test.py000066400000000000000000001523611437377527500172370ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for GCE firewall rendering module.""" import json from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import gce from capirca.lib import gcp from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "The general policy comment." target:: gce global/networks/default } """ GOOD_HEADER_INGRESS = """ header { comment:: "The general policy comment." target:: gce INGRESS } """ GOOD_HEADER_EGRESS = """ header { comment:: "The general policy comment." target:: gce EGRESS } """ GOOD_HEADER_NO_NETWORK = """ header { comment:: "The general policy comment." target:: gce } """ GOOD_HEADER_MAX_ATTRIBUTE_COUNT = """ header { comment:: "The general policy comment." target:: gce INGRESS global/networks/default 2 } """ GOOD_HEADER_INET = """ header { comment:: "The general policy comment." target:: gce INGRESS inet } """ GOOD_HEADER_EGRESS_INET = """ header { comment:: "The general policy comment." target:: gce INGRESS inet } """ GOOD_HEADER_INET6 = """ header { comment:: "The general policy comment." target:: gce INGRESS inet6 } """ GOOD_HEADER_EGRESS_INET6 = """ header { comment:: "The general policy comment." target:: gce EGRESS inet6 } """ GOOD_HEADER_MIXED = """ header { comment:: "The general policy comment." target:: gce INGRESS mixed } """ GOOD_HEADER_EGRESS_MIXED = """ header { comment:: "The general policy comment." target:: gce EGRESS mixed } """ GOOD_TERM = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp policer:: batman action:: accept } """ GOOD_TERM_3 = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp priority:: 1 action:: accept } """ GOOD_TERM_EXCLUDE = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL source-exclude:: GUEST_WIRELESS_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_4 = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { comment:: "ICMP from IP." source-address:: CORP_EXTERNAL protocol:: icmp action:: accept } """ GOOD_TERM_EGRESS = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_EGRESS_SOURCETAG = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL source-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_INGRESS_SOURCETAG = """ term good-term-1 { comment:: "Allow all GCE network internal traffic." source-tag:: internal-servers protocol:: udp tcp action:: accept } """ GOOD_TERM_INGRESS_ADDRESS_SOURCETAG = """ term good-term-1 { comment:: "Allow all GCE network internal traffic." source-tag:: internal-servers source-address:: CORP_EXTERNAL protocol:: udp tcp action:: accept } """ GOOD_PLATFORM_EXCLUDE_TERM = """ term good-platform-exclude-term { comment:: "DNS access from corp." destination-tag:: dns-servers protocol:: udp tcp action:: accept platform-exclude:: gce } """ GOOD_PLATFORM_TERM = """ term good-platform-term { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept platform:: gce } """ GOOD_TERM_JSON = """ [ { "name": "default-good-term-1", "sourceRanges": [ "10.2.3.4/32" ], "allowed": [ { "ports": [ "53" ], "IPProtocol": "udp" }, { "ports": [ "53" ], "IPProtocol": "tcp" } ], "description": "DNS access from corp.", "targetTags": [ "dns-servers" ], "direction": "INGRESS", "network": "global/networks/default" } ] """ GOOD_TERM_NO_NETWORK_JSON = """ [ { "name": "good-term-1", "sourceRanges": [ "10.2.3.4/32" ], "allowed": [ { "ports": [ "53" ], "IPProtocol": "udp" }, { "ports": [ "53" ], "IPProtocol": "tcp" } ], "description": "DNS access from corp.", "direction": "INGRESS", "targetTags": [ "dns-servers" ] } ] """ GOOD_TERM_EXPIRED = """ term good-term-expired { comment:: "Management access from corp." expiration:: 2001-01-01 source-address:: CORP_EXTERNAL destination-tag:: ssh-servers destination-port:: SSH protocol:: tcp action:: accept } """ GOOD_TERM_LOGGING = """ term good-term-logging { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept logging:: true } """ GOOD_TERM_CUSTOM_NAME = """ term %s { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_OWNERS = """ term good-term-owners { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp owner:: test-owner action:: accept } """ GOOD_TERM_ICMP = """ term good-term-ping { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: icmp action:: accept } """ GOOD_TERM_ICMPV6 = """ term good-term-pingv6 { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: icmpv6 action:: accept } """ GOOD_TERM_IGMP = """ term good-term-igmp { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: igmp action:: accept } """ GOOD_TERM_NO_PROTOCOL = """ term good-term-no-protocol { comment:: "Good term." source-address:: CORP_EXTERNAL action:: accept } """ GOOD_TERM_INGRESS_SOURCE_SERVICE_ACCOUNT = """ term good-term-source-service-account { comment:: "Test with a service account." source-address:: CORP_EXTERNAL source-service-accounts:: acct@blah.com protocol:: udp tcp action:: accept } """ GOOD_TERM_INGRESS_TARGET_SERVICE_ACCOUNT = """ term good-term-target-service-account { comment:: "Test with a service account." source-address:: CORP_EXTERNAL target-service-accounts:: acct@blah.com protocol:: udp tcp action:: accept } """ BAD_TERM_NO_SOURCE = """ term bad-term-no-source { comment:: "Management access from corp." destination-tag:: ssh-servers destination-port:: SSH protocol:: tcp action:: accept } """ BAD_TERM_SOURCE_EXCLUDE_ONLY = """ term bad-term-source-ex-only { comment:: "Management access from corp." destination-port:: SSH source-tag:: ssh-bastion source-exclude:: GUEST_WIRELESS_EXTERNAL protocol:: tcp action:: accept } """ BAD_TERM_SOURCE_PORT = """ term bad-term-source-port { comment:: "Management access from corp." source-address:: CORP_EXTERNAL source-port:: SSH destination-tag:: ssh-servers protocol:: tcp action:: accept } """ BAD_TERM_NAME_TOO_LONG = """ term good-term-whith-a-name-which-is-way-way-too-long-for-gce-to-accept { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp action:: accept } """ BAD_TERM_UNSUPPORTED_PORT = """ term good-term-unsupported-port { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp icmp action:: accept } """ BAD_TERM_UNSUPPORTED_OPTION = """ term bad-term-unsupported-option { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp action:: accept option:: tcp-initial } """ BAD_TERM_EGRESS = """ term bad-term-dest-tag { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_EGRESS_SOURCE_ADDRESS = """ term bad-term-source-address { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_EGRESS_SOURCE_DEST_TAG = """ term bad-term-source-dest-tag { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers source-tag:: ssh-bastion destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_PORTS_COUNT = """ term bad-term-ports-count { comment:: "This term has way too many ports." source-address:: CORP_EXTERNAL source-tag:: ssh-bastion destination-port:: SSH protocol:: tcp action:: accept } """ SAMPLE_TAG = 'ssh-bastions ' BAD_TERM_SOURCE_TAGS_COUNT = """ term bad-term-source-tags-count {{ comment:: "This term has way too many source tags." protocol:: tcp action:: accept source-tag:: {many_source_tags} }}""".format(many_source_tags=SAMPLE_TAG * (gce.Term._TERM_SOURCE_TAGS_LIMIT + 1)) BAD_TERM_TARGET_TAGS_COUNT = """ term bad-term-target-tags-count {{ comment:: "This term has way too many target tags." source-address:: CORP_EXTERNAL protocol:: tcp action:: accept destination-tag:: {many_target_tags} }}""".format(many_target_tags=SAMPLE_TAG * (gce.Term._TERM_TARGET_TAGS_LIMIT + 1)) BAD_TERM_SERVICE_ACCOUNTS_COUNT = """ term bad-term-service-accounts-count {{ comment:: "This term has way too many source service accounts." protocol:: tcp action:: accept source-tag:: ssh-bastion source-service-accounts:: {many_service_accounts} }}""".format(many_service_accounts='acct1@blah.com ' * (gce.Term._TERM_SERVICE_ACCOUNTS_LIMIT + 1)) BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS = """ term bad-term-tags-and-service-accounts { comment:: "This term has both a tag and a target service account." source-address:: CORP_EXTERNAL destination-tag:: dns-servers protocol:: tcp action:: accept target-service-accounts:: acct1@blah.com } """ BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS2 = """ term bad-term-tags-and-service-accounts { comment:: "This term has both a tag and a source service account." source-address:: CORP_EXTERNAL destination-tag:: dns-servers protocol:: tcp action:: accept source-service-accounts:: acct1@blah.com } """ GOOD_TERM_EXCLUDE_RANGE = """ [ { "name": "default-good-term-1", "sourceRanges": [ "10.128.0.0/10", "10.192.0.0/11", "10.224.0.0/12", "10.241.0.0/16", "10.242.0.0/15", "10.244.0.0/14", "10.248.0.0/13" ], "allowed": [ { "ports": [ "53" ], "IPProtocol": "udp" }, { "ports": [ "53" ], "IPProtocol": "tcp" } ], "description": "DNS access from corp.", "direction": "INGRESS", "targetTags": [ "dns-servers" ], "network": "global/networks/default" } ] """ DEFAULT_DENY = """ term default-deny { comment:: "default_deny." action:: deny } """ GOOD_TERM_DENY = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers protocol:: udp tcp action:: deny } """ GOOD_TERM_DENY_EXPECTED = """[ { "denied": [ { "IPProtocol": "udp" }, { "IPProtocol": "tcp" } ], "description": "DNS access from corp.", "name": "default-good-term-1", "network": "global/networks/default", "sourceRanges": [ "10.2.3.4/32" ], "direction": "INGRESS", "targetTags": [ "dns-servers" ] } ] """ VALID_TERM_NAMES = [ 'icmp', 'gcp-to-gcp', 'accept-ssh-from-google', 'ndc-rampart', 'lab-syslog', 'windows-windows', 'shell-wmn-inbound', 'shell-internal-smtp', 'accept-internal-traffic', 'deepfield-lab-management', 'deepfield-lab-reverse-proxy', 'cr-proxy-replication', 'ciena-one-control-tcp', 'fms-prod-to-fms-prod', 'ast', 'default-deny', 'google-web', 'zo6hmxkfibardh6tgbiy7ua6' ] SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_tag', 'expiration', 'stateless_reply', 'name', 'option', 'owner', 'priority', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'source_service_accounts', 'source_tag', 'target_service_accounts', 'translated', 'platform', 'platform_exclude', }) SUPPORTED_SUB_TOKENS = {'action': {'accept', 'deny'}} # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 TEST_IPS = [ nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('::ffff:a02:301/128'), # IPV4-mapped nacaddr.IP('2002::/16'), # 6to4 nacaddr.IP('::0000:a02:301/128'), # IPv4-compatible ] TEST_INCLUDE_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('10.4.3.2/32')] TEST_EXCLUDE_IPS = [nacaddr.IP('10.4.3.2/32')] TEST_INCLUDE_RANGE = [nacaddr.IP('10.128.0.0/9')] TEST_EXCLUDE_RANGE = [nacaddr.IP('10.240.0.0/16')] ANY_IPS = [nacaddr.IP('0.0.0.0/0'), nacaddr.IP('::/0')] TEST_IPV4_ONLY = [nacaddr.IP('10.2.3.4/32')] TEST_IPV6_ONLY = [nacaddr.IP('2001:4860:8000::5/128')] _TERM_SOURCE_TAGS_LIMIT = 30 _TERM_TARGET_TAGS_LIMIT = 70 _TERM_PORTS_LIMIT = 256 class GCETest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def _StripAclHeaders(self, acl): return '\n'.join([ line for line in str(acl).split('\n') if not line.lstrip().startswith('#') ]) def testGenericTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testTermWithPriority(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) self.assertIn('"priority": "1",', str(acl), str(acl)) def testTermWithLogging(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOGGING, self.naming), EXP_INFO) rendered_acl = json.loads(str(acl))[0] self.assertIn('logConfig', rendered_acl) self.assertEqual(rendered_acl['logConfig'], {'enable': True}) def testGenericTermWithoutNetwork(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_NO_NETWORK + GOOD_TERM, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_NO_NETWORK_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testGenericTermWithExclude(self): self.naming.GetNetAddr.side_effect = [TEST_INCLUDE_IPS, TEST_EXCLUDE_IPS] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testGenericTermWithExcludeRange(self): self.naming.GetNetAddr.side_effect = [ TEST_INCLUDE_RANGE, TEST_EXCLUDE_RANGE ] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_EXCLUDE_RANGE) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testSkipExpiredTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXPIRED, self.naming), EXP_INFO) self.assertEqual(self._StripAclHeaders(str(acl)), '[]\n\n') self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSkipStatelessReply(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] # Add stateless_reply to terms, there is no current way to include it in the # term definition. ret = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming) _, terms = ret.filters[0] for term in terms: term.stateless_reply = True acl = gce.GCE(ret, EXP_INFO) self.assertEqual(self._StripAclHeaders(str(acl)), '[]\n\n') self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testSourceNetworkSplit(self): lots_of_ips = [] for i in range(20): for j in range(20): lots_of_ips.append(nacaddr.IP('10.%d.%d.1/32' % (i, j))) self.naming.GetNetAddr.return_value = lots_of_ips self.naming.GetServiceByProto.return_value = ['53'] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('default-good-term-1-1', str(acl)) self.assertIn('default-good-term-1-2', str(acl)) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithoutSource(self): self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce.GceFirewallError, 'Ingress rule missing required field oneof "sourceRanges" or "sourceTags.', gce.GCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_NO_SOURCE, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithOnlySourceExclusion(self): self.naming.GetNetAddr.return_value = TEST_EXCLUDE_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce.GceFirewallError, ('GCE firewall does not support address exclusions without a source ' 'address list.'), gce.GCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_SOURCE_EXCLUDE_ONLY, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('GUEST_WIRELESS_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesNoSourceAfterExclude(self): self.naming.GetNetAddr.side_effect = [TEST_INCLUDE_IPS, TEST_INCLUDE_IPS] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce.GceFirewallError, ('GCE firewall rule no longer contains any source addresses after ' 'the prefixes in source_address_exclude were removed.'), gce.GCE, policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithSourcePort(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall does not support source port restrictions.', gce.GCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_SOURCE_PORT, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithLongTermName(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaises( aclgenerator.TermNameTooLongError, gce.GCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_NAME_TOO_LONG, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithUnsupportedOption(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall does not support term options.', gce.GCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_UNSUPPORTED_OPTION, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testBuildTokens(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol1 = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol1 = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testDenyAction(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_DENY_EXPECTED) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testIngress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) def testEgress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS, self.naming), EXP_INFO) self.assertIn('EGRESS', str(acl)) self.assertIn('good-term-1-e', str(acl)) self.assertNotIn('INGRESS', str(acl)) def testRaisesWithEgressDestinationTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce.GceFirewallError, 'GCE Egress rule cannot have destination tag.', gce.GCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithEgressSourceAddress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce.GceFirewallError, 'Egress rules cannot include "sourceRanges".', gce.GCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS_SOURCE_ADDRESS, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithEgressSourceAndDestTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce.GceFirewallError, 'GCE Egress rule cannot have destination tag.', gce.GCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS_SOURCE_DEST_TAG, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testEgressTags(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('targetTags', str(acl)) self.assertNotIn('sourceTags', str(acl)) def testIngressTags(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('sourceTags', str(acl)) self.assertNotIn('targetTags', str(acl)) def testSourceServiceAccounts(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_SOURCE_SERVICE_ACCOUNT, self.naming), EXP_INFO) self.assertIn('sourceServiceAccounts', str(acl)) self.assertNotIn('targetTags', str(acl)) self.assertNotIn('sourceTags', str(acl)) def testTargetServiceAccounts(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_TARGET_SERVICE_ACCOUNT, self.naming), EXP_INFO) self.assertIn('targetServiceAccounts', str(acl)) self.assertNotIn('targetTags', str(acl)) self.assertNotIn('sourceTags', str(acl)) def testDestinationRanges(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS, self.naming), EXP_INFO) self.assertIn('destinationRanges', str(acl), str(acl)) self.assertNotIn('sourceRanges', str(acl), str(acl)) self.assertIn('10.2.3.4/32', str(acl), str(acl)) def testP4TagsNotPresent(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertNotIn('$Id:', str(acl)) def testRaisesConflictingDirectionAddress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce.GceFirewallError, 'Ingress rule missing required field oneof "sourceRanges" or "sourceTags"', gce.GCE, policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM_4, self.naming), EXP_INFO) self.assertRaisesRegex( gce.GceFirewallError, 'Egress rules cannot include "sourceRanges".', gce.GCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM, self.naming), EXP_INFO) def testDefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('"priority": 65534', str(acl)) def testDefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('"priority": 65534', str(acl)) def testValidTermNames(self): for name in VALID_TERM_NAMES: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_NAME % name, self.naming) acl = gce.GCE(pol, EXP_INFO) self.assertIsNotNone(str(acl)) def testInet(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) def testInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) def testInetWithV6AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) def testInet6WithV4AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) def testFilterIPv4InIPv6FormatMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testFilterIPv4InIPv6FormatInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testFilterIPv4InIPv6FormatInet(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testInetWithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) def testInet6WithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('internal-servers', str(acl)) def testInetWithSourceTagAndV6Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('internal-servers', str(acl)) def testInet6WithSourceTagAndV4Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('internal-servers', str(acl)) def testInet6DefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_INET6 + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('"priority": 65534', str(acl)) self.assertIn('::/0', str(acl)) self.assertNotIn('0.0.0.0/0', str(acl)) def testInet6DefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('"priority": 65534', str(acl)) self.assertIn('::/0', str(acl)) self.assertNotIn('0.0.0.0/0', str(acl)) def testIcmpInet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertIn('icmp', str(acl)) self.assertNotIn('58', str(acl)) def testIcmpv6Inet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertIn('58', str(acl)) self.assertNotIn('icmp', str(acl)) def testIcmpInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertNotIn('icmp', str(acl)) def testIcmpv6Inet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertNotIn('58', str(acl)) def testIgmpInet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertIn('2', str(acl)) def testIgmpInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertNotIn('2', str(acl)) def testPortsCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = list( str(i) for i in range(1024, 1024 + (gce.Term._TERM_PORTS_LIMIT) * 3, 2)) self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall rule exceeded number of ports per rule: ' + 'bad-term-ports-count', gce.GCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_PORTS_COUNT, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSourceTagCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall rule exceeded number of source tags per rule: ' + 'bad-term-source-tags-count', gce.GCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_SOURCE_TAGS_COUNT, self.naming), EXP_INFO) def testTargetTagCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall rule exceeded number of target tags per rule: ' + 'bad-term-target-tags-count', gce.GCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_TARGET_TAGS_COUNT, self.naming), EXP_INFO) def testServiceAccountCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce.GceFirewallError, 'GCE firewall rule exceeded number of service accounts per rule: ' + 'bad-term-service-accounts-count', gce.GCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_SERVICE_ACCOUNTS_COUNT, self.naming), EXP_INFO) def testTargetTagsAndSourceServiceAccountsError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce.GceFirewallError, 'sourceServiceAccounts cannot be used at the same time as targetTags or sourceTags', gce.GCE, policy.ParsePolicy( GOOD_HEADER_INET + BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS2, self.naming), EXP_INFO) def testTargetTagsAndTargetServiceAccountsError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce.GceFirewallError, 'targetServiceAccounts cannot be used at the same time as targetTags or sourceTags', gce.GCE, policy.ParsePolicy( GOOD_HEADER_INET + BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS, self.naming), EXP_INFO) def testMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testInetIsDefault(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testMixedWithV6AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) def testMixedWithV4AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testMixedIsSeparateRules(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('good-term-1', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagOnly(self): self.naming.GetNetAddr.return_value = [] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagAndV6Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagAndV4Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithEgressSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_MIXED + GOOD_TERM_EGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('dns-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1-e'), str(acl)) def testMixedDefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_MIXED + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('"priority": 65534', str(acl)) self.assertIn('default-deny-e', str(acl)) self.assertIn(gcp.GetIpv6TermName('default-deny-e'), str(acl)) self.assertIn('::/0', str(acl)) self.assertIn('0.0.0.0/0', str(acl)) def testMixedDefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('"priority": 65534', str(acl)) self.assertIn('default-deny', str(acl)) self.assertIn(gcp.GetIpv6TermName('default-deny'), str(acl)) self.assertIn('::/0', str(acl)) self.assertIn('0.0.0.0/0', str(acl)) def testIcmpMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertIn('icmp', str(acl)) self.assertNotIn('58', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testIcmpv6Mixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertIn('58', str(acl)) self.assertNotIn('icmp', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-pingv6'), str(acl)) def testIgmpMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertIn('2', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-pingv6'), str(acl)) def testNoProtocol(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_NO_PROTOCOL, self.naming), EXP_INFO) self.assertIn('all', str(acl)) def testPlatformExclude(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_PLATFORM_EXCLUDE_TERM + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('good-term-1', str(acl)) self.assertNotIn('good-platform-exclude-term', str(acl)) def testPlatform(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_PLATFORM_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('good-platform-term', str(acl)) def testTermOwners(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce.GCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_OWNERS, self.naming), EXP_INFO) rendered_acl = json.loads(str(acl))[0] self.assertEqual(rendered_acl['description'], 'DNS access from corp. Owner: test-owner') def testMaxAttributeExceeded(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaises( gce.ExceededAttributeCountError, gce.GCE, policy.ParsePolicy( GOOD_HEADER_MAX_ATTRIBUTE_COUNT + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) def testMaxAttribute(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.2.3.4/32')] pol = policy.ParsePolicy(GOOD_HEADER_MAX_ATTRIBUTE_COUNT + GOOD_TERM_5, self.naming) acl = gce.GCE(pol, EXP_INFO) self.assertIsNotNone(str(acl)) @parameterized.named_parameters(('1 ip, 2 ports', { 'sourceRanges': ['10.128.0.0/10'], 'allowed': [{ 'ports': ['22'], 'IPProtocol': 'tcp' }, { 'ports': ['53'], 'IPProtocol': 'udp' }], }, 5), ('1 ip, 2 ports, 1 target tag', { 'sourceRanges': ['10.128.0.0/10'], 'allowed': [{ 'ports': ['22'], 'IPProtocol': 'tcp' }, { 'ports': ['53'], 'IPProtocol': 'udp' }], 'targetTags': ['dns-servers'], }, 6), ('2 ips, 2 ports, 1 target tag', { 'sourceRanges': ['10.128.0.0/10', '192.168.1.1/24'], 'allowed': [{ 'ports': ['22'], 'IPProtocol': 'tcp' }, { 'ports': ['53'], 'IPProtocol': 'udp' }], 'targetTags': ['dns-servers'], }, 7), ('2 ips, 2 ports', { 'sourceRanges': ['10.128.0.0/10', '192.168.1.1/24'], 'allowed': [{ 'ports': ['22'], 'IPProtocol': 'tcp' }, { 'ports': ['53'], 'IPProtocol': 'udp' }], }, 6), ('2 ips, 2 protocols', { 'sourceRanges': ['10.128.0.0/10', '192.168.1.1/24'], 'allowed': [{ 'IPProtocol': 'tcp' }, { 'IPProtocol': 'udp' }], }, 4), ('1 ip, 2 protocols, 1 source tag', { 'sourceRanges': ['10.128.0.0/10'], 'allowed': [{ 'IPProtocol': 'tcp' }, { 'IPProtocol': 'udp' }], 'sourceTags': ['dns-servers'], }, 4), ('2 ips, 1 protocol', { 'sourceRanges': ['10.128.0.0/10', '192.168.1.1/24'], 'allowed': [{ 'IPProtocol': 'icmp' }], }, 3), ('1 ip, 2 protocols, 2 service accounts', { 'sourceRanges': ['10.128.0.0/10'], 'allowed': [{ 'IPProtocol': 'tcp' }, { 'IPProtocol': 'udp' }], 'sourceServiceAccounts': ['test@system.gserviceaccount.com'], 'targetServiceAccounts': ['test@system.gserviceaccount.com'], }, 5)) def testGetAttributeCount(self, dict_term, expected): self.assertEqual(gce.GetAttributeCount(dict_term), expected) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/gce_vpc_tf_test.py000066400000000000000000001540011437377527500205710ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for gce_vpc_tf firewall rendering module.""" import json from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import gce_vpc_tf from capirca.lib import gcp from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "The general policy comment." target:: gce_vpc_tf global/networks/default } """ GOOD_HEADER_INGRESS = """ header { comment:: "The general policy comment." target:: gce_vpc_tf INGRESS global/networks/default } """ GOOD_HEADER_EGRESS = """ header { comment:: "The general policy comment." target:: gce_vpc_tf EGRESS global/networks/default } """ GOOD_HEADER_NO_NETWORK = """ header { comment:: "The general policy comment." target:: gce_vpc_tf } """ GOOD_HEADER_MAX_ATTRIBUTE_COUNT = """ header { comment:: "The general policy comment." target:: gce_vpc_tf INGRESS global/networks/default 2 } """ GOOD_HEADER_INET = """ header { comment:: "The general policy comment." target:: gce_vpc_tf INGRESS inet global/networks/default } """ GOOD_HEADER_INET6 = """ header { comment:: "The general policy comment." target:: gce_vpc_tf INGRESS inet6 global/networks/default } """ GOOD_HEADER_EGRESS_INET6 = """ header { comment:: "The general policy comment." target:: gce_vpc_tf EGRESS inet6 global/networks/default } """ GOOD_HEADER_MIXED = """ header { comment:: "The general policy comment." target:: gce_vpc_tf INGRESS mixed global/networks/default } """ GOOD_HEADER_EGRESS_MIXED = """ header { comment:: "The general policy comment." target:: gce_vpc_tf EGRESS mixed global/networks/default } """ GOOD_TERM = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp policer:: batman action:: accept } """ GOOD_TERM_3 = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp priority:: 1 action:: accept } """ GOOD_TERM_EXCLUDE = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL source-exclude:: GUEST_WIRELESS_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_4 = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { comment:: "ICMP from IP." source-address:: CORP_EXTERNAL protocol:: icmp action:: accept } """ GOOD_TERM_EGRESS = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_EGRESS_SOURCETAG = """ term good-term-1 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL source-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_INGRESS_SOURCETAG = """ term good-term-1 { comment:: "Allow all GCE network internal traffic." source-tag:: internal-servers protocol:: udp tcp action:: accept } """ GOOD_TERM_INGRESS_ADDRESS_SOURCETAG = """ term good-term-1 { comment:: "Allow all GCE network internal traffic." source-tag:: internal-servers source-address:: CORP_EXTERNAL protocol:: udp tcp action:: accept } """ GOOD_PLATFORM_EXCLUDE_TERM = """ term good-platform-exclude-term { comment:: "DNS access from corp." destination-tag:: dns-servers protocol:: udp tcp action:: accept platform-exclude:: gce_vpc_tf } """ GOOD_PLATFORM_TERM = """ term good-platform-term { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept platform:: gce_vpc_tf } """ GOOD_TERM_JSON = """ { "resource": { "google_compute_firewall": [ { "default-good-term-1": { "name": "default-good-term-1", "source_ranges": [ "10.2.3.4/32" ], "allow": [ { "ports": [ "53" ], "protocol": "udp" }, { "ports": [ "53" ], "protocol": "tcp" } ], "description": "DNS access from corp.", "target_tags": [ "dns-servers" ], "direction": "INGRESS", "network": "global/networks/default", "priority": 1 } } ] } } """ GOOD_TERM_EXPIRED = """ term good-term-expired { comment:: "Management access from corp." expiration:: 2001-01-01 source-address:: CORP_EXTERNAL destination-tag:: ssh-servers destination-port:: SSH protocol:: tcp action:: accept } """ GOOD_TERM_LOGGING = """ term good-term-logging { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept logging:: true } """ GOOD_TERM_CUSTOM_NAME = """ term %s { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_OWNERS = """ term good-term-owners { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp owner:: test-owner action:: accept } """ GOOD_TERM_ICMP = """ term good-term-ping { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: icmp action:: accept } """ GOOD_TERM_ICMPV6 = """ term good-term-pingv6 { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: icmpv6 action:: accept } """ GOOD_TERM_IGMP = """ term good-term-igmp { comment:: "Good term." source-address:: CORP_EXTERNAL protocol:: igmp action:: accept } """ GOOD_TERM_NO_PROTOCOL = """ term good-term-no-protocol { comment:: "Good term." source-address:: CORP_EXTERNAL action:: accept } """ GOOD_TERM_INGRESS_TARGET_SERVICE_ACCOUNT = """ term good-term-target-service-account { comment:: "Test with a service account." source-address:: CORP_EXTERNAL target-service-accounts:: acct@blah.com protocol:: udp tcp action:: accept } """ BAD_TERM_NO_SOURCE = """ term bad-term-no-source { comment:: "Management access from corp." destination-tag:: ssh-servers destination-port:: SSH protocol:: tcp action:: accept } """ BAD_TERM_SOURCE_EXCLUDE_ONLY = """ term bad-term-source-ex-only { comment:: "Management access from corp." destination-port:: SSH source-tag:: ssh-bastion source-exclude:: GUEST_WIRELESS_EXTERNAL protocol:: tcp action:: accept } """ BAD_TERM_SOURCE_PORT = """ term bad-term-source-port { comment:: "Management access from corp." source-address:: CORP_EXTERNAL source-port:: SSH destination-tag:: ssh-servers protocol:: tcp action:: accept } """ BAD_TERM_NAME_TOO_LONG = """ term good-term-whith-a-name-which-is-way-way-too-long-for-gce-to-accept { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp action:: accept } """ BAD_TERM_UNSUPPORTED_PORT = """ term good-term-unsupported-port { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp icmp action:: accept } """ BAD_TERM_UNSUPPORTED_OPTION = """ term bad-term-unsupported-option { comment:: "Management access from corp." source-address:: CORP_EXTERNAL destination-port:: SSH protocol:: tcp action:: accept option:: tcp-initial } """ BAD_TERM_EGRESS = """ term bad-term-dest-tag { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_EGRESS_SOURCE_ADDRESS = """ term bad-term-source-address { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_EGRESS_SOURCE_DEST_TAG = """ term bad-term-source-dest-tag { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-tag:: dns-servers source-tag:: ssh-bastion destination-port:: DNS protocol:: udp tcp action:: accept } """ BAD_TERM_PORTS_COUNT = """ term bad-term-ports-count { comment:: "This term has way too many ports." source-address:: CORP_EXTERNAL source-tag:: ssh-bastion destination-port:: SSH protocol:: tcp action:: accept } """ SAMPLE_TAG = 'ssh-bastions ' BAD_TERM_SOURCE_TAGS_COUNT = """ term bad-term-source-tags-count {{ comment:: "This term has way too many source tags." protocol:: tcp action:: accept source-tag:: {many_source_tags} }}""".format(many_source_tags=SAMPLE_TAG * (gce_vpc_tf.Term._TERM_SOURCE_TAGS_LIMIT + 1)) BAD_TERM_TARGET_TAGS_COUNT = """ term bad-term-target-tags-count {{ comment:: "This term has way too many target tags." source-address:: CORP_EXTERNAL protocol:: tcp action:: accept destination-tag:: {many_target_tags} }}""".format(many_target_tags=SAMPLE_TAG * (gce_vpc_tf.Term._TERM_TARGET_TAGS_LIMIT + 1)) BAD_TERM_SERVICE_ACCOUNTS_COUNT = """ term bad-term-service-accounts-count {{ comment:: "This term has way too many source service accounts." protocol:: tcp action:: accept source-tag:: ssh-bastion source-service-accounts:: {many_service_accounts} }}""".format(many_service_accounts='acct1@blah.com ' * (gce_vpc_tf.Term._TERM_SERVICE_ACCOUNTS_LIMIT + 1)) BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS = """ term bad-term-tags-and-service-accounts { comment:: "This term has both a tag and a service account." source-address:: CORP_EXTERNAL destination-tag:: dns-servers protocol:: tcp action:: accept target-service-accounts:: acct1@blah.com } """ GOOD_TERM_EXCLUDE_RANGE = """ { "resource": { "google_compute_firewall": [ { "default-good-term-1": { "name": "default-good-term-1", "source_ranges": [ "10.128.0.0/10", "10.192.0.0/11", "10.224.0.0/12", "10.241.0.0/16", "10.242.0.0/15", "10.244.0.0/14", "10.248.0.0/13" ], "allow": [ { "ports": [ "53" ], "protocol": "udp" }, { "ports": [ "53" ], "protocol": "tcp" } ], "description": "DNS access from corp.", "direction": "INGRESS", "target_tags": [ "dns-servers" ], "network": "global/networks/default", "priority": 1 } } ] } } """ DEFAULT_DENY = """ term default-deny { comment:: "default_deny." action:: deny } """ GOOD_TERM_DENY = """ term good-term-1 { comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-tag:: dns-servers protocol:: udp tcp action:: deny } """ GOOD_TERM_DENY_EXPECTED = """{ "resource": { "google_compute_firewall": [ { "default-good-term-1": { "deny": [ { "protocol": "udp" }, { "protocol": "tcp" } ], "description": "DNS access from corp.", "direction": "INGRESS", "name": "default-good-term-1", "network": "global/networks/default", "priority": 1, "source_ranges": [ "10.2.3.4/32" ], "target_tags": [ "dns-servers" ] } } ] } } """ STATELESS_REPLY = """{ "resource": { "google_compute_firewall": [] } } """ VALID_TERM_NAMES = [ 'icmp', 'gcp-to-gcp', 'accept-ssh-from-google', 'ndc-rampart', 'lab-syslog', 'windows-windows', 'shell-wmn-inbound', 'shell-internal-smtp', 'accept-internal-traffic', 'deepfield-lab-management', 'deepfield-lab-reverse-proxy', 'cr-proxy-replication', 'ciena-one-control-tcp', 'fms-prod-to-fms-prod', 'ast', 'default-deny', 'google-web', 'zo6hmxkfibardh6tgbiy7ua6' ] SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_tag', 'expiration', 'stateless_reply', 'name', 'option', 'owner', 'priority', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'source_service_accounts', 'source_tag', 'target_service_accounts', 'translated', 'platform', 'platform_exclude', }) SUPPORTED_SUB_TOKENS = {'action': {'accept', 'deny'}} # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 TEST_IPS = [ nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('::ffff:a02:301/128'), # IPV4-mapped nacaddr.IP('2002::/16'), # 6to4 nacaddr.IP('::0000:a02:301/128'), # IPv4-compatible ] TEST_INCLUDE_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('10.4.3.2/32')] TEST_EXCLUDE_IPS = [nacaddr.IP('10.4.3.2/32')] TEST_INCLUDE_RANGE = [nacaddr.IP('10.128.0.0/9')] TEST_EXCLUDE_RANGE = [nacaddr.IP('10.240.0.0/16')] ANY_IPS = [nacaddr.IP('0.0.0.0/0'), nacaddr.IP('::/0')] TEST_IPV4_ONLY = [nacaddr.IP('10.2.3.4/32')] TEST_IPV6_ONLY = [nacaddr.IP('2001:4860:8000::5/128')] _TERM_SOURCE_TAGS_LIMIT = 30 _TERM_TARGET_TAGS_LIMIT = 70 _TERM_PORTS_LIMIT = 256 class TerraformGCETest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def _StripAclHeaders(self, acl): return '\n'.join([ line for line in str(acl).split('\n') if not line.lstrip().startswith('#') ]) def testGenericTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testTermWithPriority(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) self.assertIn('"priority": 1', str(acl), str(acl)) def testTermWithLogging(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOGGING, self.naming), EXP_INFO) rendered_acl = json.loads( str(acl) )['resource']['google_compute_firewall'][0]['default-good-term-logging'] self.assertIn('log_config', rendered_acl) self.assertEqual(rendered_acl['log_config'], {'metadata': 'INCLUDE_ALL_METADATA'}) def testGenericTermWithoutNetwork(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE filter does not specify a network.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_NO_NETWORK + GOOD_TERM, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testGenericTermWithExclude(self): self.naming.GetNetAddr.side_effect = [TEST_INCLUDE_IPS, TEST_EXCLUDE_IPS] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_JSON) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testGenericTermWithExcludeRange(self): self.naming.GetNetAddr.side_effect = [ TEST_INCLUDE_RANGE, TEST_EXCLUDE_RANGE ] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_EXCLUDE_RANGE) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testSkipExpiredTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXPIRED, self.naming), EXP_INFO) self.assertEqual(self._StripAclHeaders(str(acl)), STATELESS_REPLY) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSkipStatelessReply(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] # Add stateless_reply to terms, there is no current way to include it in the # term definition. ret = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming) _, terms = ret.filters[0] for term in terms: term.stateless_reply = True acl = gce_vpc_tf.TerraformGCE(ret, EXP_INFO) self.assertEqual(self._StripAclHeaders(str(acl)), STATELESS_REPLY) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testSourceNetworkSplit(self): lots_of_ips = [] for i in range(20): for j in range(20): lots_of_ips.append(nacaddr.IP('10.%d.%d.1/32' % (i, j))) self.naming.GetNetAddr.return_value = lots_of_ips self.naming.GetServiceByProto.return_value = ['53'] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('default-good-term-1-1', str(acl)) self.assertIn('default-good-term-1-2', str(acl)) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithoutSource(self): self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'Ingress rule missing required field oneof "source_ranges" or "source_tags.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_NO_SOURCE, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithOnlySourceExclusion(self): self.naming.GetNetAddr.return_value = TEST_EXCLUDE_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, ('GCE firewall does not support address exclusions without a source ' 'address list.'), gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_SOURCE_EXCLUDE_ONLY, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('GUEST_WIRELESS_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesNoSourceAfterExclude(self): self.naming.GetNetAddr.side_effect = [TEST_INCLUDE_IPS, TEST_INCLUDE_IPS] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, ('GCE firewall rule no longer contains any source addresses after ' 'the prefixes in source_address_exclude were removed.'), gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_has_calls( [mock.call('CORP_EXTERNAL'), mock.call('GUEST_WIRELESS_EXTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithSourcePort(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall does not support source port restrictions.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_SOURCE_PORT, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithLongTermName(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaises( aclgenerator.TermNameTooLongError, gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_NAME_TOO_LONG, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRaisesWithUnsupportedOption(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall does not support term options.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_UNSUPPORTED_OPTION, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testBuildTokens(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol1 = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol1 = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testDenyAction(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_DENY, self.naming), EXP_INFO) expected = json.loads(GOOD_TERM_DENY_EXPECTED) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testIngress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) def testEgress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS, self.naming), EXP_INFO) self.assertIn('EGRESS', str(acl)) self.assertIn('good-term-1-e', str(acl)) self.assertNotIn('INGRESS', str(acl)) def testRaisesWithEgressDestinationTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE Egress rule cannot have destination tag.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithEgressSourceAddress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'Egress rules cannot include "source_ranges".', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS_SOURCE_ADDRESS, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testRaisesWithEgressSourceAndDestTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE Egress rule cannot have destination tag.', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EGRESS_SOURCE_DEST_TAG, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testEgressTags(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('target_tags', str(acl)) self.assertNotIn('source_tags', str(acl)) def testIngressTags(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('source_tags', str(acl)) self.assertNotIn('target_tags', str(acl)) def testTargetServiceAccounts(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_TARGET_SERVICE_ACCOUNT, self.naming), EXP_INFO) self.assertIn('target_service_accounts', str(acl)) self.assertNotIn('target_tags', str(acl)) self.assertNotIn('source_tags', str(acl)) def testDestinationRanges(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS, self.naming), EXP_INFO) self.assertIn('destination_ranges', str(acl), str(acl)) self.assertNotIn('source_ranges', str(acl), str(acl)) self.assertIn('10.2.3.4/32', str(acl), str(acl)) def testP4TagsNotPresent(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertNotIn('$Id:', str(acl)) def testRaisesConflictingDirectionAddress(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'Ingress rule missing required field oneof "source_ranges" or "source_tags"', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_INGRESS + GOOD_TERM_4, self.naming), EXP_INFO) self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'Egress rules cannot include "source_ranges".', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM, self.naming), EXP_INFO) def testDefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('"priority": 2', str(acl)) def testDefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INGRESS + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('"priority": 2', str(acl)) def testValidTermNames(self): for name in VALID_TERM_NAMES: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_NAME % name, self.naming) acl = gce_vpc_tf.TerraformGCE(pol, EXP_INFO) self.assertIsNotNone(str(acl)) def testInet(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) def testInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) def testInetWithV6AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) def testInet6WithV4AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) def testFilterIPv4InIPv6FormatMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testFilterIPv4InIPv6FormatInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testFilterIPv4InIPv6FormatInet(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('::ffff:a02:301/128', str(acl)) self.assertNotIn('2002::/16', str(acl)) self.assertNotIn('::0000:a02:301/128', str(acl)) def testInetWithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) def testInet6WithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('internal-servers', str(acl)) def testInetWithSourceTagAndV6Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('internal-servers', str(acl)) def testInet6WithSourceTagAndV4Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('internal-servers', str(acl)) def testInet6DefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_INET6 + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('"priority": 2', str(acl)) self.assertIn('::/0', str(acl)) self.assertNotIn('0.0.0.0/0', str(acl)) def testInet6DefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('"priority": 2', str(acl)) self.assertIn('::/0', str(acl)) self.assertNotIn('0.0.0.0/0', str(acl)) def testIcmpInet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertIn('icmp', str(acl)) self.assertNotIn('58', str(acl)) def testIcmpv6Inet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertIn('58', str(acl)) self.assertNotIn('icmp', str(acl)) def testIcmpInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertNotIn('icmp', str(acl)) def testIcmpv6Inet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertNotIn('58', str(acl)) def testIgmpInet(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertIn('2', str(acl)) def testIgmpInet6(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET6 + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertNotIn('2', str(acl)) def testPortsCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = list( str(i) for i in range(1024, 1024 + (gce_vpc_tf.Term._TERM_PORTS_LIMIT) * 3, 2)) self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall rule exceeded number of ports per rule: ' + 'bad-term-ports-count', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_PORTS_COUNT, self.naming), EXP_INFO) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSourceTagCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall rule exceeded number of source tags per rule: ' + 'bad-term-source-tags-count', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_SOURCE_TAGS_COUNT, self.naming), EXP_INFO) def testTargetTagCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall rule exceeded number of target tags per rule: ' + 'bad-term-target-tags-count', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_TARGET_TAGS_COUNT, self.naming), EXP_INFO) def testServiceAccountCountExceededError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'GCE firewall rule exceeded number of service accounts per rule: ' + 'bad-term-service-accounts-count', gce_vpc_tf.TerraformGCE, policy.ParsePolicy(GOOD_HEADER_INET + BAD_TERM_SERVICE_ACCOUNTS_COUNT, self.naming), EXP_INFO) def testTargetTagsAndServiceAccountsError(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( gce_vpc_tf.TerraformFirewallError, 'target_service_accounts cannot be used at the same time as target_tags or source_tags', gce_vpc_tf.TerraformGCE, policy.ParsePolicy( GOOD_HEADER_INET + BAD_TERM_TARGET_TAGS_AND_SERVICE_ACCOUNTS, self.naming), EXP_INFO) def testMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testInetIsDefault(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testMixedWithV6AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) def testMixedWithV4AddressesOnly(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) def testMixedIsSeparateRules(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('good-term-1', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagOnly(self): self.naming.GetNetAddr.return_value = [] self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagAndV6Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV6_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithSourceTagAndV4Addresses(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_ADDRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('internal-servers', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testMixedWithEgressSourceTag(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_MIXED + GOOD_TERM_EGRESS_SOURCETAG, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn('dns-servers', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-1-e'), str(acl)) def testMixedDefaultDenyEgressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_EGRESS_MIXED + GOOD_TERM_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) self.assertNotIn('INGRESS', str(acl)) self.assertIn('EGRESS', str(acl)) self.assertIn('"priority": 2', str(acl)) self.assertIn('default-deny-e', str(acl)) self.assertIn(gcp.GetIpv6TermName('default-deny-e'), str(acl)) self.assertIn('::/0', str(acl)) self.assertIn('0.0.0.0/0', str(acl)) def testMixedDefaultDenyIngressCreation(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_INGRESS_SOURCETAG + DEFAULT_DENY, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('"priority": 2', str(acl)) self.assertIn('default-deny', str(acl)) self.assertIn(gcp.GetIpv6TermName('default-deny'), str(acl)) self.assertIn('::/0', str(acl)) self.assertIn('0.0.0.0/0', str(acl)) def testIcmpMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ICMP, self.naming), EXP_INFO) self.assertIn('icmp', str(acl)) self.assertNotIn('58', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-1'), str(acl)) def testIcmpv6Mixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) self.assertIn('58', str(acl)) self.assertNotIn('icmp', str(acl)) self.assertNotIn('10.2.3.4/32', str(acl)) self.assertIn('2001:4860:8000::5/128', str(acl)) self.assertIn(gcp.GetIpv6TermName('good-term-pingv6'), str(acl)) def testIgmpMixed(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_IGMP, self.naming), EXP_INFO) self.assertIn('2', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertNotIn(gcp.GetIpv6TermName('good-term-pingv6'), str(acl)) def testNoProtocol(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_NO_PROTOCOL, self.naming), EXP_INFO) self.assertIn('all', str(acl)) def testPlatformExclude(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy( GOOD_HEADER_INET + GOOD_PLATFORM_EXCLUDE_TERM + GOOD_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('good-term-1', str(acl)) self.assertNotIn('good-platform-exclude-term', str(acl)) def testPlatform(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER_INET + GOOD_PLATFORM_TERM, self.naming), EXP_INFO) self.assertIn('INGRESS', str(acl)) self.assertNotIn('EGRESS', str(acl)) self.assertIn('10.2.3.4/32', str(acl)) self.assertNotIn('2001:4860:8000::5/128', str(acl)) self.assertIn('good-platform-term', str(acl)) def testTermOwners(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gce_vpc_tf.TerraformGCE( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_OWNERS, self.naming), EXP_INFO) rendered_acl = json.loads( str(acl) )['resource']['google_compute_firewall'][0]['default-good-term-owners'] self.assertEqual(rendered_acl['description'], 'DNS access from corp. Owner: test-owner') def testMaxAttributeExceeded(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaises( gce_vpc_tf.ExceededAttributeCountError, gce_vpc_tf.TerraformGCE, policy.ParsePolicy( GOOD_HEADER_MAX_ATTRIBUTE_COUNT + GOOD_TERM + DEFAULT_DENY, self.naming), EXP_INFO) def testMaxAttribute(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.2.3.4/32')] pol = policy.ParsePolicy(GOOD_HEADER_MAX_ATTRIBUTE_COUNT + GOOD_TERM_5, self.naming) acl = gce_vpc_tf.TerraformGCE(pol, EXP_INFO) self.assertIsNotNone(str(acl)) @parameterized.named_parameters(('1 ip, 2 ports', { 'source_ranges': ['10.128.0.0/10'], 'allow': [{ 'ports': ['22'], 'protocol': 'tcp' }, { 'ports': ['53'], 'protocol': 'udp' }], }, 5), ('1 ip, 2 ports, 1 target tag', { 'source_ranges': ['10.128.0.0/10'], 'allow': [{ 'ports': ['22'], 'protocol': 'tcp' }, { 'ports': ['53'], 'protocol': 'udp' }], 'target_tags': ['dns-servers'], }, 6), ('2 ips, 2 ports, 1 target tag', { 'source_ranges': ['10.128.0.0/10', '192.168.1.1/24'], 'allow': [{ 'ports': ['22'], 'protocol': 'tcp' }, { 'ports': ['53'], 'protocol': 'udp' }], 'target_tags': ['dns-servers'], }, 7), ('2 ips, 2 ports', { 'source_ranges': ['10.128.0.0/10', '192.168.1.1/24'], 'allow': [{ 'ports': ['22'], 'protocol': 'tcp' }, { 'ports': ['53'], 'protocol': 'udp' }], }, 6), ('2 ips, 2 protocols', { 'source_ranges': ['10.128.0.0/10', '192.168.1.1/24'], 'allow': [{ 'protocol': 'tcp' }, { 'protocol': 'udp' }], }, 4), ('1 ip, 2 protocols, 1 source tag', { 'source_ranges': ['10.128.0.0/10'], 'allow': [{ 'protocol': 'tcp' }, { 'protocol': 'udp' }], 'source_tags': ['dns-servers'], }, 4), ('2 ips, 1 protocol', { 'source_ranges': ['10.128.0.0/10', '192.168.1.1/24'], 'allow': [{ 'protocol': 'icmp' }], }, 3), ('1 ip, 2 protocols, 1 service account', { 'source_ranges': ['10.128.0.0/10'], 'allow': [{ 'protocol': 'tcp' }, { 'protocol': 'udp' }], 'target_service_accounts': ['test@system.gserviceaccount.com'], }, 4)) def testGetAttributeCount(self, dict_term, expected): self.assertEqual(gce_vpc_tf.GetAttributeCount(dict_term), expected) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/gcp_hf_test.py000066400000000000000000003026121437377527500177230ustar00rootroot00000000000000"""Tests for google3.third_party.py.capirca.lib.gcp_hf.py.""" import json from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import gcp from capirca.lib import gcp_hf from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy HEADER_NO_OPTIONS = """ header { comment:: "The general policy comment." target:: gcp_hf displayname } """ HEADER_OPTION_MAX = """ header { comment:: "The general policy comment." target:: gcp_hf displayname 20 } """ HEADER_OPTION_EGRESS = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS } """ HEADER_OPTION_EGRESS_2 = """ header { comment:: "The general policy comment." target:: gcp_hf displayname2 EGRESS } """ HEADER_OPTION_AF = """ header { comment:: "The general policy comment." target:: gcp_hf displayname inet } """ HEADER_OPTION_HIGH_QUOTA = """ header { comment:: "The general policy comment." target:: gcp_hf displayname 500 } """ HEADER_OPTION_EGRESS_HIGH_QUOTA = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS 500 } """ HEADER_OPTION_EGRESS_AND_MAX = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS 20 } """ HEADER_OPTION_EGRESS_AND_AF = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS inet } """ HEADER_OPTION_EGRESS_INET6 = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS inet6 ga } """ HEADER_OPTION_MAX_AND_AF = """ header { comment:: "The general policy comment." target:: gcp_hf displayname 20 inet } """ HEADER_VERY_LOW_DEFAULT_MAX = """ header { comment:: "The general policy comment." target:: gcp_hf displayname 1 } """ HEADER_OPTION_BETA = """ header { comment:: "The general policy comment." target:: gcp_hf displayname inet beta } """ HEADER_OPTION_GA = """ header { comment:: "The general policy comment." target:: gcp_hf displayname ga } """ HEADER_GA_NO_INET_OPTIONS = """ header { comment:: "The general policy comment." target:: gcp_hf displayname ga } """ HEADER_OPTION_INET6 = """ header { comment:: "The general policy comment." target:: gcp_hf displayname inet6 ga } """ HEADER_OPTION_MIXED = """ header { comment:: "The general policy comment." target:: gcp_hf displayname mixed ga } """ HEADER_OPTION_EGRESS_MIXED = """ header { comment:: "The general policy comment." target:: gcp_hf displayname EGRESS mixed ga } """ BAD_HEADER_NO_DISPLAYNAME = """ header { comment:: "Header without a policy name." target:: gcp_hf } """ BAD_HEADER_LONG_DISPLAYNAME = """ header { comment:: "Using a display name with 64 characters." target:: gcp_hf this-is-a-very-very-long-policy-name-which-is-over-63-characters } """ BAD_HEADER_INVALID_DISPLAYNAME_1 = """ header { comment:: "Using a display name with an upper case letter." target:: gcp_hf Displayname } """ BAD_HEADER_INVALID_DISPLAYNAME_2 = """ header { comment:: "Using a display name with an underscore character." target:: gcp_hf display_name } """ BAD_HEADER_INVALID_DISPLAYNAME_3 = """ header { comment:: "Using a display name that ends in a dash." target:: gcp_hf displayname- } """ BAD_HEADER_UNKNOWN_OPTION = """ header { comment:: "The general policy comment." target:: gcp_hf displayname INGRESS randomOption } """ BAD_HEADER_UNKNOWN_DIRECTION = """ header { comment:: "The general policy comment." target:: gcp_hf displayname BIGRESS } """ BAD_HEADER_INVALID_MAX_COST = """ header { comment:: "The general policy comment." target:: gcp_hf displayname INGRESS 888888888 } """ BAD_HEADER_WRONG_PLATFORM = """ header { comment:: "The general policy comment." target:: wrong_platform } """ TERM_ALLOW_ALL_INTERNAL = """ term allow-internal-traffic { comment:: "Generic description" protocol:: tcp icmp udp action:: next } """ TERM_PLATFORM_ALLOW_ALL_INTERNAL = """ term allow-internal-traffic { comment:: "Generic description" protocol:: tcp icmp udp action:: next platform:: gcp_hf } """ TERM_PLATFORM_EXCLUDE = """ term allow-internal-traffic { comment:: "Generic description" protocol:: tcp icmp udp action:: next platform-exclude:: gcp_hf } """ TERM_ALLOW_MULTIPLE_PROTOCOL = """ term allow-internal-traffic { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: tcp icmp udp action:: next } """ TERM_ALLOW_MULTIPLE_PROTOCOL_ICMPV6 = """ term allow-internal-traffic { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: tcp icmpv6 udp action:: next } """ TERM_ALLOW_DNS = """ term allow-dns-traffic { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: tcp udp destination-port:: DNS action:: next } """ TERM_ALLOW_PORT = """ term allow-traffic-to-port { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: tcp destination-port:: PORT action:: next } """ TERM_ALLOW_EGRESS_PORT = """ term allow-traffic-to-port { comment:: "Generic description" destination-address:: PUBLIC_NAT protocol:: tcp destination-port:: PORT action:: next } """ TERM_ALLOW_PORT_RANGE = """ term allow-port-range { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: tcp destination-port:: RANGE action:: next } """ TERM_RESTRICT_EGRESS = """ term restrict_egress { comment:: "Generic description" destination-address:: PUBLIC_NAT protocol:: tcp icmp udp action:: next } """ TERM_DENY_INGRESS = """ term default-deny-ingress { comment:: "Generic description" action:: deny } """ TERM_DENY_EGRESS = """ term default-deny-egress { comment:: "Generic description" action:: deny } """ TERM_WITH_TARGET_RESOURCES = """ term default-deny-ingress-on-target { comment:: "Generic description" source-address:: ANY action:: deny target-resources:: (project1, vpc1) target-resources:: (project2, vpc2) } """ TERM_WITH_TARGET_RESOURCES_2 = """ term default-deny-ingress-on-target { comment:: "Generic description" source-address:: ANY action:: deny target-resources:: [(project1, vpc1),(project2,vpc2)] } """ TERM_WITH_LOGGING = """ term term-with-logging { comment:: "Generic description" source-address:: ANY protocol:: tcp action:: accept logging:: true } """ TERM_NO_COMMENT = """ term allow-internal-traffic { source-address:: INTERNAL protocol:: tcp icmp udp action:: next } """ TERM_LONG_COMMENT = """ term allow-internal-traffic { comment:: "This is a very long description, it is longer than sixty-four chars" source-address:: INTERNAL protocol:: tcp icmp udp action:: next } """ TERM_NUMBERED_PROTOCOL = """ term term-numbered-protocol { comment:: "Generic description" source-address:: PUBLIC_NAT protocol:: igmp action:: next } """ BAD_TERM_USING_SOURCE_TAG = """ term bad-term-with-tag { comment:: "Generic description" source-address:: PUBLIC_NAT source-tag:: a-tag protocol:: tcp icmp udp action:: next } """ BAD_TERM_USING_DEST_TAG = """ term bad-term-with-tag { comment:: "Generic description" source-address:: PUBLIC_NAT destination-tag:: a-tag protocol:: tcp icmp udp action:: next } """ BAD_TERM_SOURCE_PORT = """ term allow-traffic-from-port { comment:: "Generic description" destination-address:: INTERNAL protocol:: tcp source-port:: PORT action:: next } """ BAD_TERM_TARGET_RESOURCES = """ term hf-too-many-target-resources { comment:: "Generic description" destination-address:: INTERNAL protocol:: tcp target-resources:: (proj1,vpc1) target-resources:: (proj2,vpc2) target-resources:: (proj3,vpc3) target-resources:: (proj4,vpc4) target-resources:: (proj5,vpc5) target-resources:: (proj6,vpc6) target-resources:: (proj7,vpc7) target-resources:: (proj8,vpc8) target-resources:: (proj9,vpc9) target-resources:: (proj10,vpc10) target-resources:: (proj11,vpc11) target-resources:: (proj12,vpc12) target-resources:: (proj13,vpc13) target-resources:: (proj14,vpc14) target-resources:: (proj15,vpc15) target-resources:: (proj16,vpc16) target-resources:: (proj17,vpc17) target-resources:: (proj18,vpc18) target-resources:: (proj19,vpc19) target-resources:: (proj20,vpc20) target-resources:: (proj21,vpc21) target-resources:: (proj22,vpc22) target-resources:: (proj23,vpc23) target-resources:: (proj24,vpc24) target-resources:: (proj25,vpc25) target-resources:: (proj26,vpc26) target-resources:: (proj27,vpc27) target-resources:: (proj28,vpc28) target-resources:: (proj29,vpc29) target-resources:: (proj30,vpc30) target-resources:: (proj31,vpc31) target-resources:: (proj32,vpc32) target-resources:: (proj33,vpc33) target-resources:: (proj34,vpc34) target-resources:: (proj35,vpc35) target-resources:: (proj36,vpc36) target-resources:: (proj37,vpc37) target-resources:: (proj38,vpc38) target-resources:: (proj39,vpc39) target-resources:: (proj40,vpc40) target-resources:: (proj41,vpc41) target-resources:: (proj42,vpc42) target-resources:: (proj43,vpc43) target-resources:: (proj44,vpc44) target-resources:: (proj45,vpc45) target-resources:: (proj46,vpc46) target-resources:: (proj47,vpc47) target-resources:: (proj48,vpc48) target-resources:: (proj49,vpc49) target-resources:: (proj50,vpc50) target-resources:: (proj51,vpc51) target-resources:: (proj52,vpc52) target-resources:: (proj53,vpc53) target-resources:: (proj54,vpc54) target-resources:: (proj55,vpc55) target-resources:: (proj56,vpc56) target-resources:: (proj57,vpc57) target-resources:: (proj58,vpc58) target-resources:: (proj59,vpc59) target-resources:: (proj60,vpc60) target-resources:: (proj61,vpc61) target-resources:: (proj62,vpc62) target-resources:: (proj63,vpc63) target-resources:: (proj64,vpc64) target-resources:: (proj65,vpc65) target-resources:: (proj66,vpc66) target-resources:: (proj67,vpc67) target-resources:: (proj68,vpc68) target-resources:: (proj69,vpc69) target-resources:: (proj70,vpc70) target-resources:: (proj71,vpc71) target-resources:: (proj72,vpc72) target-resources:: (proj73,vpc73) target-resources:: (proj74,vpc74) target-resources:: (proj75,vpc75) target-resources:: (proj76,vpc76) target-resources:: (proj77,vpc77) target-resources:: (proj78,vpc78) target-resources:: (proj79,vpc79) target-resources:: (proj80,vpc80) target-resources:: (proj81,vpc81) target-resources:: (proj82,vpc82) target-resources:: (proj83,vpc83) target-resources:: (proj84,vpc84) target-resources:: (proj85,vpc85) target-resources:: (proj86,vpc86) target-resources:: (proj87,vpc87) target-resources:: (proj88,vpc88) target-resources:: (proj89,vpc89) target-resources:: (proj90,vpc90) target-resources:: (proj91,vpc91) target-resources:: (proj92,vpc92) target-resources:: (proj93,vpc93) target-resources:: (proj94,vpc94) target-resources:: (proj95,vpc95) target-resources:: (proj96,vpc96) target-resources:: (proj97,vpc97) target-resources:: (proj98,vpc98) target-resources:: (proj99,vpc99) target-resources:: (proj100,vpc100) target-resources:: (proj101,vpc101) target-resources:: (proj102,vpc102) target-resources:: (proj103,vpc103) target-resources:: (proj104,vpc104) target-resources:: (proj105,vpc105) target-resources:: (proj106,vpc106) target-resources:: (proj107,vpc107) target-resources:: (proj108,vpc108) target-resources:: (proj109,vpc109) target-resources:: (proj110,vpc110) target-resources:: (proj111,vpc111) target-resources:: (proj112,vpc112) target-resources:: (proj113,vpc113) target-resources:: (proj114,vpc114) target-resources:: (proj115,vpc115) target-resources:: (proj116,vpc116) target-resources:: (proj117,vpc117) target-resources:: (proj118,vpc118) target-resources:: (proj119,vpc119) target-resources:: (proj120,vpc120) target-resources:: (proj121,vpc121) target-resources:: (proj122,vpc122) target-resources:: (proj123,vpc123) target-resources:: (proj124,vpc124) target-resources:: (proj125,vpc125) target-resources:: (proj126,vpc126) target-resources:: (proj127,vpc127) target-resources:: (proj128,vpc128) target-resources:: (proj129,vpc129) target-resources:: (proj130,vpc130) target-resources:: (proj131,vpc131) target-resources:: (proj132,vpc132) target-resources:: (proj133,vpc133) target-resources:: (proj134,vpc134) target-resources:: (proj135,vpc135) target-resources:: (proj136,vpc136) target-resources:: (proj137,vpc137) target-resources:: (proj138,vpc138) target-resources:: (proj139,vpc139) target-resources:: (proj140,vpc140) target-resources:: (proj141,vpc141) target-resources:: (proj142,vpc142) target-resources:: (proj143,vpc143) target-resources:: (proj144,vpc144) target-resources:: (proj145,vpc145) target-resources:: (proj146,vpc146) target-resources:: (proj147,vpc147) target-resources:: (proj148,vpc148) target-resources:: (proj149,vpc149) target-resources:: (proj150,vpc150) target-resources:: (proj151,vpc151) target-resources:: (proj152,vpc152) target-resources:: (proj153,vpc153) target-resources:: (proj154,vpc154) target-resources:: (proj155,vpc155) target-resources:: (proj156,vpc156) target-resources:: (proj157,vpc157) target-resources:: (proj158,vpc158) target-resources:: (proj159,vpc159) target-resources:: (proj160,vpc160) target-resources:: (proj161,vpc161) target-resources:: (proj162,vpc162) target-resources:: (proj163,vpc163) target-resources:: (proj164,vpc164) target-resources:: (proj165,vpc165) target-resources:: (proj166,vpc166) target-resources:: (proj167,vpc167) target-resources:: (proj168,vpc168) target-resources:: (proj169,vpc169) target-resources:: (proj170,vpc170) target-resources:: (proj171,vpc171) target-resources:: (proj172,vpc172) target-resources:: (proj173,vpc173) target-resources:: (proj174,vpc174) target-resources:: (proj175,vpc175) target-resources:: (proj176,vpc176) target-resources:: (proj177,vpc177) target-resources:: (proj178,vpc178) target-resources:: (proj179,vpc179) target-resources:: (proj180,vpc180) target-resources:: (proj181,vpc181) target-resources:: (proj182,vpc182) target-resources:: (proj183,vpc183) target-resources:: (proj184,vpc184) target-resources:: (proj185,vpc185) target-resources:: (proj186,vpc186) target-resources:: (proj187,vpc187) target-resources:: (proj188,vpc188) target-resources:: (proj189,vpc189) target-resources:: (proj190,vpc190) target-resources:: (proj191,vpc191) target-resources:: (proj192,vpc192) target-resources:: (proj193,vpc193) target-resources:: (proj194,vpc194) target-resources:: (proj195,vpc195) target-resources:: (proj196,vpc196) target-resources:: (proj197,vpc197) target-resources:: (proj198,vpc198) target-resources:: (proj199,vpc199) target-resources:: (proj200,vpc200) target-resources:: (proj201,vpc201) target-resources:: (proj202,vpc202) target-resources:: (proj203,vpc203) target-resources:: (proj204,vpc204) target-resources:: (proj205,vpc205) target-resources:: (proj206,vpc206) target-resources:: (proj207,vpc207) target-resources:: (proj208,vpc208) target-resources:: (proj209,vpc209) target-resources:: (proj210,vpc210) target-resources:: (proj211,vpc211) target-resources:: (proj212,vpc212) target-resources:: (proj213,vpc213) target-resources:: (proj214,vpc214) target-resources:: (proj215,vpc215) target-resources:: (proj216,vpc216) target-resources:: (proj217,vpc217) target-resources:: (proj218,vpc218) target-resources:: (proj219,vpc219) target-resources:: (proj220,vpc220) target-resources:: (proj221,vpc221) target-resources:: (proj222,vpc222) target-resources:: (proj223,vpc223) target-resources:: (proj224,vpc224) target-resources:: (proj225,vpc225) target-resources:: (proj226,vpc226) target-resources:: (proj227,vpc227) target-resources:: (proj228,vpc228) target-resources:: (proj229,vpc229) target-resources:: (proj230,vpc230) target-resources:: (proj231,vpc231) target-resources:: (proj232,vpc232) target-resources:: (proj233,vpc233) target-resources:: (proj234,vpc234) target-resources:: (proj235,vpc235) target-resources:: (proj236,vpc236) target-resources:: (proj237,vpc237) target-resources:: (proj238,vpc238) target-resources:: (proj239,vpc239) target-resources:: (proj240,vpc240) target-resources:: (proj241,vpc241) target-resources:: (proj242,vpc242) target-resources:: (proj243,vpc243) target-resources:: (proj244,vpc244) target-resources:: (proj245,vpc245) target-resources:: (proj246,vpc246) target-resources:: (proj247,vpc247) target-resources:: (proj248,vpc248) target-resources:: (proj249,vpc249) target-resources:: (proj250,vpc250) target-resources:: (proj251,vpc251) target-resources:: (proj252,vpc252) target-resources:: (proj253,vpc253) target-resources:: (proj254,vpc254) target-resources:: (proj255,vpc255) target-resources:: (proj256,vpc256) target-resources:: (proj257,vpc257) action:: next } """ BAD_TERM_DESTINATION_PORTS = """ term hf-too-many-destination-ports { comment:: "Generic description" source-address:: INTERNAL destination-port:: TP2000 destination-port:: TP2001 destination-port:: TP2002 destination-port:: TP2003 destination-port:: TP2004 destination-port:: TP2005 destination-port:: TP2006 destination-port:: TP2007 destination-port:: TP2008 destination-port:: TP2009 destination-port:: TP2010 destination-port:: TP2011 destination-port:: TP2012 destination-port:: TP2013 destination-port:: TP2014 destination-port:: TP2015 destination-port:: TP2016 destination-port:: TP2017 destination-port:: TP2018 destination-port:: TP2019 destination-port:: TP2020 destination-port:: TP2021 destination-port:: TP2022 destination-port:: TP2023 destination-port:: TP2024 destination-port:: TP2025 destination-port:: TP2026 destination-port:: TP2027 destination-port:: TP2028 destination-port:: TP2029 destination-port:: TP2030 destination-port:: TP2031 destination-port:: TP2032 destination-port:: TP2033 destination-port:: TP2034 destination-port:: TP2035 destination-port:: TP2036 destination-port:: TP2037 destination-port:: TP2038 destination-port:: TP2039 destination-port:: TP2040 destination-port:: TP2041 destination-port:: TP2042 destination-port:: TP2043 destination-port:: TP2044 destination-port:: TP2045 destination-port:: TP2046 destination-port:: TP2047 destination-port:: TP2048 destination-port:: TP2049 destination-port:: TP2050 destination-port:: TP2051 destination-port:: TP2052 destination-port:: TP2053 destination-port:: TP2054 destination-port:: TP2055 destination-port:: TP2056 destination-port:: TP2057 destination-port:: TP2058 destination-port:: TP2059 destination-port:: TP2060 destination-port:: TP2061 destination-port:: TP2062 destination-port:: TP2063 destination-port:: TP2064 destination-port:: TP2065 destination-port:: TP2066 destination-port:: TP2067 destination-port:: TP2068 destination-port:: TP2069 destination-port:: TP2070 destination-port:: TP2071 destination-port:: TP2072 destination-port:: TP2073 destination-port:: TP2074 destination-port:: TP2075 destination-port:: TP2076 destination-port:: TP2077 destination-port:: TP2078 destination-port:: TP2079 destination-port:: TP2080 destination-port:: TP2081 destination-port:: TP2082 destination-port:: TP2083 destination-port:: TP2084 destination-port:: TP2085 destination-port:: TP2086 destination-port:: TP2087 destination-port:: TP2088 destination-port:: TP2089 destination-port:: TP2090 destination-port:: TP2091 destination-port:: TP2092 destination-port:: TP2093 destination-port:: TP2094 destination-port:: TP2095 destination-port:: TP2096 destination-port:: TP2097 destination-port:: TP2098 destination-port:: TP2099 destination-port:: TP2100 destination-port:: TP2101 destination-port:: TP2102 destination-port:: TP2103 destination-port:: TP2104 destination-port:: TP2105 destination-port:: TP2106 destination-port:: TP2107 destination-port:: TP2108 destination-port:: TP2109 destination-port:: TP2110 destination-port:: TP2111 destination-port:: TP2112 destination-port:: TP2113 destination-port:: TP2114 destination-port:: TP2115 destination-port:: TP2116 destination-port:: TP2117 destination-port:: TP2118 destination-port:: TP2119 destination-port:: TP2120 destination-port:: TP2121 destination-port:: TP2122 destination-port:: TP2123 destination-port:: TP2124 destination-port:: TP2125 destination-port:: TP2126 destination-port:: TP2127 destination-port:: TP2128 destination-port:: TP2129 destination-port:: TP2130 destination-port:: TP2131 destination-port:: TP2132 destination-port:: TP2133 destination-port:: TP2134 destination-port:: TP2135 destination-port:: TP2136 destination-port:: TP2137 destination-port:: TP2138 destination-port:: TP2139 destination-port:: TP2140 destination-port:: TP2141 destination-port:: TP2142 destination-port:: TP2143 destination-port:: TP2144 destination-port:: TP2145 destination-port:: TP2146 destination-port:: TP2147 destination-port:: TP2148 destination-port:: TP2149 destination-port:: TP2150 destination-port:: TP2151 destination-port:: TP2152 destination-port:: TP2153 destination-port:: TP2154 destination-port:: TP2155 destination-port:: TP2156 destination-port:: TP2157 destination-port:: TP2158 destination-port:: TP2159 destination-port:: TP2160 destination-port:: TP2161 destination-port:: TP2162 destination-port:: TP2163 destination-port:: TP2164 destination-port:: TP2165 destination-port:: TP2166 destination-port:: TP2167 destination-port:: TP2168 destination-port:: TP2169 destination-port:: TP2170 destination-port:: TP2171 destination-port:: TP2172 destination-port:: TP2173 destination-port:: TP2174 destination-port:: TP2175 destination-port:: TP2176 destination-port:: TP2177 destination-port:: TP2178 destination-port:: TP2179 destination-port:: TP2180 destination-port:: TP2181 destination-port:: TP2182 destination-port:: TP2183 destination-port:: TP2184 destination-port:: TP2185 destination-port:: TP2186 destination-port:: TP2187 destination-port:: TP2188 destination-port:: TP2189 destination-port:: TP2190 destination-port:: TP2191 destination-port:: TP2192 destination-port:: TP2193 destination-port:: TP2194 destination-port:: TP2195 destination-port:: TP2196 destination-port:: TP2197 destination-port:: TP2198 destination-port:: TP2199 destination-port:: TP2200 destination-port:: TP2201 destination-port:: TP2202 destination-port:: TP2203 destination-port:: TP2204 destination-port:: TP2205 destination-port:: TP2206 destination-port:: TP2207 destination-port:: TP2208 destination-port:: TP2209 destination-port:: TP2210 destination-port:: TP2211 destination-port:: TP2212 destination-port:: TP2213 destination-port:: TP2214 destination-port:: TP2215 destination-port:: TP2216 destination-port:: TP2217 destination-port:: TP2218 destination-port:: TP2219 destination-port:: TP2220 destination-port:: TP2221 destination-port:: TP2222 destination-port:: TP2223 destination-port:: TP2224 destination-port:: TP2225 destination-port:: TP2226 destination-port:: TP2227 destination-port:: TP2228 destination-port:: TP2229 destination-port:: TP2230 destination-port:: TP2231 destination-port:: TP2232 destination-port:: TP2233 destination-port:: TP2234 destination-port:: TP2235 destination-port:: TP2236 destination-port:: TP2237 destination-port:: TP2238 destination-port:: TP2239 destination-port:: TP2240 destination-port:: TP2241 destination-port:: TP2242 destination-port:: TP2243 destination-port:: TP2244 destination-port:: TP2245 destination-port:: TP2246 destination-port:: TP2247 destination-port:: TP2248 destination-port:: TP2249 destination-port:: TP2250 destination-port:: TP2251 destination-port:: TP2252 destination-port:: TP2253 destination-port:: TP2254 destination-port:: TP2255 destination-port:: TP2256 destination-port:: TP2257 destination-port:: TP2258 destination-port:: TP2259 protocol:: tcp action:: next } """ BAD_TERM_IP_VERSION_MISMATCH = """ term icmpv6-in-inet-term { comment:: "Generic description" source-address:: INTERNAL protocol:: icmpv6 action:: next } """ BAD_TERM_ICMP_VERSION_MISMATCH = """ term icmp-in-inet6-term { comment:: "Generic description" source-address:: INTERNAL protocol:: icmp action:: next } """ BAD_TERM_IGMP_VERSION_MISMATCH = """ term igmp-in-inet6-term { comment:: "Generic description" source-address:: INTERNAL protocol:: igmp action:: next } """ BAD_TERM_OPTIONS = """ term term-with-options { comment:: "Generic description" destination-address:: INTERNAL option:: TCP_ESTABLISHED action:: next } """ BAD_TERM_NON_VALID_PROJECT_ID = """ term default-deny-ingress-on-target { comment:: "Generic description" protocol:: tcp source-address:: ANY action:: deny target-resources:: (proj, vpc1) } """ BAD_TERM_NON_VALID_VPC_NAME = """ term default-deny-ingress-on-target { comment:: "Generic description" protocol:: tcp source-address:: ANY action:: deny target-resources:: (project, Vpc) } """ EXPECTED_ONE_RULE_INGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-internal-traffic: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_ONE_RULE_INGRESS_W_LOGGING_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "allow", "description": "term-with-logging: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" } ], "srcIpRanges": ["10.0.0.0/8"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": true } ] } ] """ EXPECTED_ONE_RULE_EGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "restrict_egress: Generic description", "direction": "EGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "destIpRanges": ["10.0.0.0/8"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_MULTIPLE_RULE_INGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-internal-traffic: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-dns-traffic: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["53"] }, { "ipProtocol": "udp", "ports": ["53"] } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ EXPECTED_MULTIPLE_RULE_INGRESS_W_DENY = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["10.0.0.0/8"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false }, { "action": "deny", "description": "Generic description", "direction": "INGRESS", "match": { "config": { "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 2, "enableLogging": false } ] } ] """ EXPECTED_PORT_RANGE_INGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-port-range: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["8000-9000"] } ], "srcIpRanges": ["10.0.0.0/8"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_DENY_INGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-ingress: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_IPV6_DENY_INGRESS_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-ingress: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["::/0"], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_MIXED_DENY_INGRESS_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-ingress: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["0.0.0.0/0"], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false }, { "action": "deny", "description": "default-deny-ingress-v6: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["::/0"], "versionedExpr": "FIREWALL" }, "priority": 2, "enableLogging": false } ] } ] """ EXPECTED_DENY_INGRESS_ON_TARGET_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-ingress-on-target: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false, "targetResources": ["https://www.googleapis.com/compute/v1/projects/project1/global/networks/vpc1", "https://www.googleapis.com/compute/v1/projects/project2/global/networks/vpc2"] } ] } ] """ EXPECTED_INGRESS_AND_EGRESS_W_DENY_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-internal-traffic: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false }, { "action": "deny", "description": "default-deny-ingress: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "all" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 2, "enableLogging": false }, { "action": "goto_next", "description": "restrict_egress: Generic description", "direction": "EGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "destIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 3, "enableLogging": false }, { "action": "deny", "description": "default-deny-egress: Generic description", "direction": "EGRESS", "match": { "config": { "destIpRanges": ["0.0.0.0/0"], "layer4Configs": [ { "ipProtocol": "all" } ] }, "versionedExpr": "FIREWALL" }, "priority": 4, "enableLogging": false } ] } ] """ EXPECTED_DENY_EGRESS_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-egress: Generic description", "direction": "EGRESS", "match": { "config": { "destIpRanges": ["0.0.0.0/0"], "layer4Configs": [ { "ipProtocol": "all" } ] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_IPV6_DENY_EGRESS_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-egress: Generic description", "direction": "EGRESS", "match": { "destIpRanges": ["::/0"], "layer4Configs": [ { "ipProtocol": "all" } ], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_MIXED_DENY_EGRESS_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "deny", "description": "default-deny-egress: Generic description", "direction": "EGRESS", "match": { "destIpRanges": ["0.0.0.0/0"], "layer4Configs": [ { "ipProtocol": "all" } ], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false }, { "action": "deny", "description": "default-deny-egress-v6: Generic description", "direction": "EGRESS", "match": { "destIpRanges": ["::/0"], "layer4Configs": [ { "ipProtocol": "all" } ], "versionedExpr": "FIREWALL" }, "priority": 2, "enableLogging": false } ] } ] """ EXPECTED_COST_OF_ONE = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["10.1.1.0/24"]/t }, "versionedExpr": "FIREWALL" }, "priority": 1 } ] } ] """ EXPECTED_CHUNKED_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": [ "10.0.0.1/32", "10.0.1.1/32", "192.168.0.0/32", "192.168.1.0/32", "192.168.2.0/32", "192.168.3.0/32", "192.168.4.0/32", "192.168.5.0/32", "192.168.6.0/32", "192.168.7.0/32", "192.168.8.0/32", "192.168.9.0/32", "192.168.10.0/32", "192.168.11.0/32", "192.168.12.0/32", "192.168.13.0/32", "192.168.14.0/32", "192.168.15.0/32", "192.168.16.0/32", "192.168.17.0/32", "192.168.18.0/32", "192.168.19.0/32", "192.168.20.0/32", "192.168.21.0/32", "192.168.22.0/32", "192.168.23.0/32", "192.168.24.0/32", "192.168.25.0/32", "192.168.26.0/32", "192.168.27.0/32", "192.168.28.0/32", "192.168.29.0/32", "192.168.30.0/32", "192.168.31.0/32", "192.168.32.0/32", "192.168.33.0/32", "192.168.34.0/32", "192.168.35.0/32", "192.168.36.0/32", "192.168.37.0/32", "192.168.38.0/32", "192.168.39.0/32", "192.168.40.0/32", "192.168.41.0/32", "192.168.42.0/32", "192.168.43.0/32", "192.168.44.0/32", "192.168.45.0/32", "192.168.46.0/32", "192.168.47.0/32", "192.168.48.0/32", "192.168.49.0/32", "192.168.50.0/32", "192.168.51.0/32", "192.168.52.0/32", "192.168.53.0/32", "192.168.54.0/32", "192.168.55.0/32", "192.168.56.0/32", "192.168.57.0/32", "192.168.58.0/32", "192.168.59.0/32", "192.168.60.0/32", "192.168.61.0/32", "192.168.62.0/32", "192.168.63.0/32", "192.168.64.0/32", "192.168.65.0/32", "192.168.66.0/32", "192.168.67.0/32", "192.168.68.0/32", "192.168.69.0/32", "192.168.70.0/32", "192.168.71.0/32", "192.168.72.0/32", "192.168.73.0/32", "192.168.74.0/32", "192.168.75.0/32", "192.168.76.0/32", "192.168.77.0/32", "192.168.78.0/32", "192.168.79.0/32", "192.168.80.0/32", "192.168.81.0/32", "192.168.82.0/32", "192.168.83.0/32", "192.168.84.0/32", "192.168.85.0/32", "192.168.86.0/32", "192.168.87.0/32", "192.168.88.0/32", "192.168.89.0/32", "192.168.90.0/32", "192.168.91.0/32", "192.168.92.0/32", "192.168.93.0/32", "192.168.94.0/32", "192.168.95.0/32", "192.168.96.0/32", "192.168.97.0/32", "192.168.98.0/32", "192.168.99.0/32", "192.168.100.0/32", "192.168.101.0/32", "192.168.102.0/32", "192.168.103.0/32", "192.168.104.0/32", "192.168.105.0/32", "192.168.106.0/32", "192.168.107.0/32", "192.168.108.0/32", "192.168.109.0/32", "192.168.110.0/32", "192.168.111.0/32", "192.168.112.0/32", "192.168.113.0/32", "192.168.114.0/32", "192.168.115.0/32", "192.168.116.0/32", "192.168.117.0/32", "192.168.118.0/32", "192.168.119.0/32", "192.168.120.0/32", "192.168.121.0/32", "192.168.122.0/32", "192.168.123.0/32", "192.168.124.0/32", "192.168.125.0/32", "192.168.126.0/32", "192.168.127.0/32", "192.168.128.0/32", "192.168.129.0/32", "192.168.130.0/32", "192.168.131.0/32", "192.168.132.0/32", "192.168.133.0/32", "192.168.134.0/32", "192.168.135.0/32", "192.168.136.0/32", "192.168.137.0/32", "192.168.138.0/32", "192.168.139.0/32", "192.168.140.0/32", "192.168.141.0/32", "192.168.142.0/32", "192.168.143.0/32", "192.168.144.0/32", "192.168.145.0/32", "192.168.146.0/32", "192.168.147.0/32", "192.168.148.0/32", "192.168.149.0/32", "192.168.150.0/32", "192.168.151.0/32", "192.168.152.0/32", "192.168.153.0/32", "192.168.154.0/32", "192.168.155.0/32", "192.168.156.0/32", "192.168.157.0/32", "192.168.158.0/32", "192.168.159.0/32", "192.168.160.0/32", "192.168.161.0/32", "192.168.162.0/32", "192.168.163.0/32", "192.168.164.0/32", "192.168.165.0/32", "192.168.166.0/32", "192.168.167.0/32", "192.168.168.0/32", "192.168.169.0/32", "192.168.170.0/32", "192.168.171.0/32", "192.168.172.0/32", "192.168.173.0/32", "192.168.174.0/32", "192.168.175.0/32", "192.168.176.0/32", "192.168.177.0/32", "192.168.178.0/32", "192.168.179.0/32", "192.168.180.0/32", "192.168.181.0/32", "192.168.182.0/32", "192.168.183.0/32", "192.168.184.0/32", "192.168.185.0/32", "192.168.186.0/32", "192.168.187.0/32", "192.168.188.0/32", "192.168.189.0/32", "192.168.190.0/32", "192.168.191.0/32", "192.168.192.0/32", "192.168.193.0/32", "192.168.194.0/32", "192.168.195.0/32", "192.168.196.0/32", "192.168.197.0/32", "192.168.198.0/32", "192.168.199.0/32", "192.168.200.0/32", "192.168.201.0/32", "192.168.202.0/32", "192.168.203.0/32", "192.168.204.0/32", "192.168.205.0/32", "192.168.206.0/32", "192.168.207.0/32", "192.168.208.0/32", "192.168.209.0/32", "192.168.210.0/32", "192.168.211.0/32", "192.168.212.0/32", "192.168.213.0/32", "192.168.214.0/32", "192.168.215.0/32", "192.168.216.0/32", "192.168.217.0/32", "192.168.218.0/32", "192.168.219.0/32", "192.168.220.0/32", "192.168.221.0/32", "192.168.222.0/32", "192.168.223.0/32", "192.168.224.0/32", "192.168.225.0/32", "192.168.226.0/32", "192.168.227.0/32", "192.168.228.0/32", "192.168.229.0/32", "192.168.230.0/32", "192.168.231.0/32", "192.168.232.0/32", "192.168.233.0/32", "192.168.234.0/32", "192.168.235.0/32", "192.168.236.0/32", "192.168.237.0/32", "192.168.238.0/32", "192.168.239.0/32", "192.168.240.0/32", "192.168.241.0/32", "192.168.242.0/32", "192.168.243.0/32", "192.168.244.0/32", "192.168.245.0/32", "192.168.246.0/32", "192.168.247.0/32", "192.168.248.0/32", "192.168.249.0/32", "192.168.250.0/32", "192.168.251.0/32", "192.168.252.0/32", "192.168.253.0/32" ] }, "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": [ "192.168.254.0/32", "192.168.255.0/32" ] }, "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ EXPECTED_EGRESS_CHUNKED_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "EGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "destIpRanges": [ "10.0.0.1/32", "10.0.1.1/32", "192.168.0.0/32", "192.168.1.0/32", "192.168.2.0/32", "192.168.3.0/32", "192.168.4.0/32", "192.168.5.0/32", "192.168.6.0/32", "192.168.7.0/32", "192.168.8.0/32", "192.168.9.0/32", "192.168.10.0/32", "192.168.11.0/32", "192.168.12.0/32", "192.168.13.0/32", "192.168.14.0/32", "192.168.15.0/32", "192.168.16.0/32", "192.168.17.0/32", "192.168.18.0/32", "192.168.19.0/32", "192.168.20.0/32", "192.168.21.0/32", "192.168.22.0/32", "192.168.23.0/32", "192.168.24.0/32", "192.168.25.0/32", "192.168.26.0/32", "192.168.27.0/32", "192.168.28.0/32", "192.168.29.0/32", "192.168.30.0/32", "192.168.31.0/32", "192.168.32.0/32", "192.168.33.0/32", "192.168.34.0/32", "192.168.35.0/32", "192.168.36.0/32", "192.168.37.0/32", "192.168.38.0/32", "192.168.39.0/32", "192.168.40.0/32", "192.168.41.0/32", "192.168.42.0/32", "192.168.43.0/32", "192.168.44.0/32", "192.168.45.0/32", "192.168.46.0/32", "192.168.47.0/32", "192.168.48.0/32", "192.168.49.0/32", "192.168.50.0/32", "192.168.51.0/32", "192.168.52.0/32", "192.168.53.0/32", "192.168.54.0/32", "192.168.55.0/32", "192.168.56.0/32", "192.168.57.0/32", "192.168.58.0/32", "192.168.59.0/32", "192.168.60.0/32", "192.168.61.0/32", "192.168.62.0/32", "192.168.63.0/32", "192.168.64.0/32", "192.168.65.0/32", "192.168.66.0/32", "192.168.67.0/32", "192.168.68.0/32", "192.168.69.0/32", "192.168.70.0/32", "192.168.71.0/32", "192.168.72.0/32", "192.168.73.0/32", "192.168.74.0/32", "192.168.75.0/32", "192.168.76.0/32", "192.168.77.0/32", "192.168.78.0/32", "192.168.79.0/32", "192.168.80.0/32", "192.168.81.0/32", "192.168.82.0/32", "192.168.83.0/32", "192.168.84.0/32", "192.168.85.0/32", "192.168.86.0/32", "192.168.87.0/32", "192.168.88.0/32", "192.168.89.0/32", "192.168.90.0/32", "192.168.91.0/32", "192.168.92.0/32", "192.168.93.0/32", "192.168.94.0/32", "192.168.95.0/32", "192.168.96.0/32", "192.168.97.0/32", "192.168.98.0/32", "192.168.99.0/32", "192.168.100.0/32", "192.168.101.0/32", "192.168.102.0/32", "192.168.103.0/32", "192.168.104.0/32", "192.168.105.0/32", "192.168.106.0/32", "192.168.107.0/32", "192.168.108.0/32", "192.168.109.0/32", "192.168.110.0/32", "192.168.111.0/32", "192.168.112.0/32", "192.168.113.0/32", "192.168.114.0/32", "192.168.115.0/32", "192.168.116.0/32", "192.168.117.0/32", "192.168.118.0/32", "192.168.119.0/32", "192.168.120.0/32", "192.168.121.0/32", "192.168.122.0/32", "192.168.123.0/32", "192.168.124.0/32", "192.168.125.0/32", "192.168.126.0/32", "192.168.127.0/32", "192.168.128.0/32", "192.168.129.0/32", "192.168.130.0/32", "192.168.131.0/32", "192.168.132.0/32", "192.168.133.0/32", "192.168.134.0/32", "192.168.135.0/32", "192.168.136.0/32", "192.168.137.0/32", "192.168.138.0/32", "192.168.139.0/32", "192.168.140.0/32", "192.168.141.0/32", "192.168.142.0/32", "192.168.143.0/32", "192.168.144.0/32", "192.168.145.0/32", "192.168.146.0/32", "192.168.147.0/32", "192.168.148.0/32", "192.168.149.0/32", "192.168.150.0/32", "192.168.151.0/32", "192.168.152.0/32", "192.168.153.0/32", "192.168.154.0/32", "192.168.155.0/32", "192.168.156.0/32", "192.168.157.0/32", "192.168.158.0/32", "192.168.159.0/32", "192.168.160.0/32", "192.168.161.0/32", "192.168.162.0/32", "192.168.163.0/32", "192.168.164.0/32", "192.168.165.0/32", "192.168.166.0/32", "192.168.167.0/32", "192.168.168.0/32", "192.168.169.0/32", "192.168.170.0/32", "192.168.171.0/32", "192.168.172.0/32", "192.168.173.0/32", "192.168.174.0/32", "192.168.175.0/32", "192.168.176.0/32", "192.168.177.0/32", "192.168.178.0/32", "192.168.179.0/32", "192.168.180.0/32", "192.168.181.0/32", "192.168.182.0/32", "192.168.183.0/32", "192.168.184.0/32", "192.168.185.0/32", "192.168.186.0/32", "192.168.187.0/32", "192.168.188.0/32", "192.168.189.0/32", "192.168.190.0/32", "192.168.191.0/32", "192.168.192.0/32", "192.168.193.0/32", "192.168.194.0/32", "192.168.195.0/32", "192.168.196.0/32", "192.168.197.0/32", "192.168.198.0/32", "192.168.199.0/32", "192.168.200.0/32", "192.168.201.0/32", "192.168.202.0/32", "192.168.203.0/32", "192.168.204.0/32", "192.168.205.0/32", "192.168.206.0/32", "192.168.207.0/32", "192.168.208.0/32", "192.168.209.0/32", "192.168.210.0/32", "192.168.211.0/32", "192.168.212.0/32", "192.168.213.0/32", "192.168.214.0/32", "192.168.215.0/32", "192.168.216.0/32", "192.168.217.0/32", "192.168.218.0/32", "192.168.219.0/32", "192.168.220.0/32", "192.168.221.0/32", "192.168.222.0/32", "192.168.223.0/32", "192.168.224.0/32", "192.168.225.0/32", "192.168.226.0/32", "192.168.227.0/32", "192.168.228.0/32", "192.168.229.0/32", "192.168.230.0/32", "192.168.231.0/32", "192.168.232.0/32", "192.168.233.0/32", "192.168.234.0/32", "192.168.235.0/32", "192.168.236.0/32", "192.168.237.0/32", "192.168.238.0/32", "192.168.239.0/32", "192.168.240.0/32", "192.168.241.0/32", "192.168.242.0/32", "192.168.243.0/32", "192.168.244.0/32", "192.168.245.0/32", "192.168.246.0/32", "192.168.247.0/32", "192.168.248.0/32", "192.168.249.0/32", "192.168.250.0/32", "192.168.251.0/32", "192.168.252.0/32", "192.168.253.0/32" ] }, "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "EGRESS", "enableLogging": false, "match": { "config": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "destIpRanges": [ "192.168.254.0/32", "192.168.255.0/32" ] }, "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ EXPECTED_ONE_RULE_NUMBERED_PROTOCOL_BETA = """ [ { "displayName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "term-numbered-protocol: Generic description", "direction": "INGRESS", "match": { "config": { "layer4Configs": [ { "ipProtocol": "2" } ], "srcIpRanges": ["0.0.0.0/0"] }, "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_ONE_RULE_IPV6_PROTOCOL_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["2001:4860:8000::5/128"], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_ONE_RULE_MIXED_IPV6_PROTOCOL_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port-v6: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["2001:4860:8000::5/128"], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_ONE_RULE_MIXED_IPV4_PROTOCOL_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "match": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["10.0.0.0/8"], "versionedExpr": "FIREWALL" }, "priority": 1, "enableLogging": false } ] } ] """ EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-traffic-to-port: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["10.0.0.0/8"], "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-traffic-to-port-v6: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp", "ports": ["80"] } ], "srcIpRanges": ["2001:4860:8000::5/128"], "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_WITH_ICMP_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-internal-traffic: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "icmp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["10.0.0.0/8"], "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-internal-traffic-v6: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["2001:4860:8000::5/128"], "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_WITH_ICMPV6_GA = """ [ { "shortName": "displayname", "type": "FIREWALL", "rules": [ { "action": "goto_next", "description": "allow-internal-traffic: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["10.0.0.0/8"], "versionedExpr": "FIREWALL" }, "priority": 1 }, { "action": "goto_next", "description": "allow-internal-traffic-v6: Generic description", "direction": "INGRESS", "enableLogging": false, "match": { "layer4Configs": [ { "ipProtocol": "tcp" }, { "ipProtocol": "58" }, { "ipProtocol": "udp" } ], "srcIpRanges": ["2001:4860:8000::5/128"], "versionedExpr": "FIREWALL" }, "priority": 2 } ] } ] """ SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'destination_address', 'destination_port', 'destination_tag', 'logging', 'name', 'option', 'protocol', 'source_address', 'source_port', 'source_tag', 'stateless_reply', 'target_resources', 'translated', 'platform', 'platform_exclude', }) SUPPORTED_SUB_TOKENS = { 'action': { 'accept', 'deny', 'next' } } EXP_INFO = 2 TEST_IP = [nacaddr.IP('10.0.0.0/8')] TEST_IPV6_IP = [ nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('::ffff:a02:301/128'), # IPv4-mapped nacaddr.IP('2002::/16'), # 6to4 nacaddr.IP('::0000:a02:301/128'), # IPv4-compatible ] TEST_MIXED_IPS = [ nacaddr.IP('10.0.0.0/8'), nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('::ffff:a02:301/128'), # IPv4-mapped nacaddr.IP('2002::/16'), # 6to4 nacaddr.IP('::0000:a02:301/128'), # IPv4-compatible ] ALL_IPV4_IPS = [nacaddr.IP('0.0.0.0/0')] ALL_IPV6_IPS = [nacaddr.IP('::/0')] MANY_IPS = [nacaddr.IP('192.168.' + str(x) +'.0/32') for x in range( 0, 256)] MANY_IPS.extend([nacaddr.IP('10.0.0.1'), nacaddr.IP('10.0.1.1')]) class GcpHfTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def _StripAclHeaders(self, acl): return '\n'.join([line for line in str(acl).split('\n') if not line.lstrip().startswith('#')]) def testDefaultHeader(self): """Test that a header without options is accepted.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionMaxHeader(self): """Test that a header with a default maximum cost is accepted.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MAX + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionEgressHeader(self): """Test that a header with direction is accepted.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS + TERM_RESTRICT_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_EGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionAFHeader(self): """Test that a header with address family is accepted.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_AF + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionEgressAndMaxHeader(self): """Test a header with direction and default maximum cost is accepted.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_MAX + TERM_RESTRICT_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_EGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionEgressAndAF(self): """Test a header with a direction and address family is accepted.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_AF + TERM_RESTRICT_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_EGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionMaxAndAF(self): """Test a header with default maximum cost & address family is accepted.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MAX_AND_AF + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testOptionApiVersionAFHeader(self): """Test that a header with api_version is accepted.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_BETA + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testRaisesHeaderErrorOnUnknownOption(self): """Test that an unknown header option raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_UNKNOWN_OPTION + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnUnknownDirection(self): """Test that an unknown direction option raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_UNKNOWN_DIRECTION + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnInvalidMaxCost(self): """Test that a maximum default cost over 2^16 raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy( BAD_HEADER_INVALID_MAX_COST + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnUnequalMaxCostInMultiplePolicies(self): """Test that unequal max costs across multiple policies raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy( HEADER_OPTION_MAX + TERM_ALLOW_ALL_INTERNAL + HEADER_OPTION_HIGH_QUOTA + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnUnequalMaxCostInMultiplePoliciesWithDefault(self): """Test that unspecified, and set max costs across multiple policies raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy( HEADER_OPTION_MAX + TERM_ALLOW_ALL_INTERNAL + HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnLongDisplayName(self): """Test that a long displayName raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_LONG_DISPLAYNAME + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnHeaderWithoutDisplayName(self): """Test that a header without a policy name raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_NO_DISPLAYNAME + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnIncorrectDisplayName1(self): """Test that an invalid displayName raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_1 + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnIncorrectDisplayName2(self): """Test that an invalid displayName raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_2 + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesHeaderErrorOnIncorrectDisplayName3(self): """Test that an invalid displayName raises a HeaderError.""" with self.assertRaises(gcp.HeaderError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_3 + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testRaisesTermErrorOnTermWithDestinationTag(self): """Test that a term with a destination tag raises an error. Tags are not supported in HF. """ with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_USING_DEST_TAG, self.naming), EXP_INFO) def testRaisesTermErrorOnTermWithSourceTag(self): """Test that a term with a source tag raises an error. Tags are not supported in HF. """ with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_USING_SOURCE_TAG, self.naming), EXP_INFO) def testTermWithNumberedProtocol(self): """Test that a protocol number is supported.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_NUMBERED_PROTOCOL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_NUMBERED_PROTOCOL_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testRaisesTermErrorOnTermWithSourcePort(self): """Test that a term with a source port raises Term error.""" self.naming.GetNetAddr.return_value = TEST_IP self.naming.GetServiceByProto.side_effect = [['53']] with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_SOURCE_PORT, self.naming), EXP_INFO) def testRaisesTermErrorOnTermWithTooManyTargetResources(self): """Test that a term with > 256 targetResources raises TermError.""" self.naming.GetNetAddr.return_value = TEST_IP with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_TARGET_RESOURCES, self.naming), EXP_INFO) def testRaisesTermErrorOnTermWithTooManyDestinationPorts(self): """Test that a term with > 256 destination ports raises TermError.""" self.naming.GetNetAddr.return_value = TEST_IP # Create a list of 260 numbers to use as destination ports and raise an # error. # Using even numbers ensures that the port list does not get condensed to a # range. se_array = [] for x in range(2000, 2520): if x % 2 == 0: se_array.append([str(x)]) # Use destination port list to successively mock return values. self.naming.GetServiceByProto.side_effect = se_array with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_DESTINATION_PORTS, self.naming), EXP_INFO) def testRaisesTermErrorOnTermWithOptions(self): """Test that a term with a source port raises Term error.""" self.naming.GetNetAddr.return_value = TEST_IP with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_OPTIONS, self.naming), EXP_INFO) def testRaisesTermErrorOnInvalidProjectID(self): """Test that an invalid project ID on target resources raises Term error.""" self.naming.GetNetAddr.return_value = TEST_IP with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_NON_VALID_PROJECT_ID, self.naming), EXP_INFO) def testRaisesTermErrorOnInvalidVPCName(self): """Test that an invalid VPC name on target resources raises Term error.""" self.naming.GetNetAddr.return_value = TEST_IP with self.assertRaises(gcp.TermError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_NON_VALID_VPC_NAME, self.naming), EXP_INFO) def testRaisesDifferentPolicyNameErrorWhenDifferentPolicyNames(self): """Test that different policy names raises DifferentPolicyNameError.""" with self.assertRaises(gcp_hf.DifferentPolicyNameError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_DENY_INGRESS + HEADER_OPTION_EGRESS_2 + TERM_DENY_EGRESS, self.naming), EXP_INFO) def testIgnorePolicyFromADifferentPlatform(self): """Test that a policy with a header from a different platform is ignored.""" acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(BAD_HEADER_WRONG_PLATFORM + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) self.assertEqual([], json.loads(self._StripAclHeaders(str(acl)))) def testIgnoreTermWithPlatformExclude(self): """Test that a term with platform exclude is ignored.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy( HEADER_OPTION_AF + TERM_PLATFORM_EXCLUDE + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testTermWithPlatformExists(self): """Test that a term with platform is rendered.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy( HEADER_OPTION_AF + TERM_PLATFORM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testIgnoreTermWithICMPv6(self): """Test that a term with only an icmpv6 protocol is not rendered.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_AF + BAD_TERM_IP_VERSION_MISMATCH, self.naming), EXP_INFO) exp = [{'displayName': 'displayname', 'rules': [], 'type': 'FIREWALL'}] self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl)))) def testInet6IgnoreTermWithICMP(self): """Test that a term with only an icmp protocol is not rendered for inet6.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + BAD_TERM_ICMP_VERSION_MISMATCH, self.naming), EXP_INFO) exp = [{'shortName': 'displayname', 'rules': [], 'type': 'FIREWALL'}] self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl)))) def testInet6IgnoreTermWithIGMP(self): """Test that a term with only an igmp protocol is not rendered for inet6.""" self.naming.GetNetAddr.return_value = TEST_IP acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + BAD_TERM_IGMP_VERSION_MISMATCH, self.naming), EXP_INFO) exp = [{'shortName': 'displayname', 'rules': [], 'type': 'FIREWALL'}] self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl)))) def testInet6TermWithIPv6Addresses(self): """Test that IPv6 addresses are supported with inet6.""" self.naming.GetNetAddr.return_value = TEST_IPV6_IP self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_IPV6_PROTOCOL_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInet6TermWithMixedAddresses(self): """Test that Mixed addresses are supported with inet6.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_IPV6_PROTOCOL_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInet6TermWithIPv4Addresses(self): """Test that IPv4 addresses are not rendered with inet6.""" self.naming.GetNetAddr.return_value = TEST_IP self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + TERM_ALLOW_PORT, self.naming), EXP_INFO) exp = [{'shortName': 'displayname', 'rules': [], 'type': 'FIREWALL'}] self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl)))) def testInetTermWithMixedAddresses(self): """Test that Mixed addresses are supported with inet.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_AF + TERM_RESTRICT_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_EGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInetTermWithIPv6Addresses(self): """Test that IPv6 addresses are not rendered with inet.""" self.naming.GetNetAddr.return_value = TEST_IPV6_IP self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_AF + TERM_RESTRICT_EGRESS, self.naming), EXP_INFO) exp = [{'displayName': 'displayname', 'rules': [], 'type': 'FIREWALL'}] self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermWithMixedAddresses(self): """Test that IPv4 and IPv6 addresses are supported with mixed.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MIXED + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermWithIPv4Addresses(self): """Test that IPv4 addresses are supported with mixed.""" self.naming.GetNetAddr.return_value = TEST_IP self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MIXED + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_MIXED_IPV4_PROTOCOL_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermWithIPv6Addresses(self): """Test that IPv6 addresses are supported with mixed.""" self.naming.GetNetAddr.return_value = TEST_IPV6_IP self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MIXED + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_MIXED_IPV6_PROTOCOL_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermWithICMP(self): """Test that ICMP protocol is supported with mixed.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MIXED + TERM_ALLOW_MULTIPLE_PROTOCOL, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_WITH_ICMP_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedTermWithICMPv6(self): """Test that ICMPv6 protocol is supported with mixed.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy( HEADER_OPTION_MIXED + TERM_ALLOW_MULTIPLE_PROTOCOL_ICMPV6, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MULTIPLE_MIXED_RULE_INGRESS_WITH_ICMPV6_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInetIsDefaultInetVersion(self): """Test that inet is the default inet version when not specified.""" self.naming.GetNetAddr.return_value = TEST_MIXED_IPS self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_GA_NO_INET_OPTIONS + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_MIXED_IPV4_PROTOCOL_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testPriority(self): """Test that priority is set based on terms' ordering.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL + TERM_ALLOW_DNS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MULTIPLE_RULE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testLogging(self): """Test that logging is used when it is set on a term.""" self.naming.GetNetAddr.return_value = TEST_IP self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_LOGGING, self.naming), EXP_INFO) expected = json.loads(EXPECTED_ONE_RULE_INGRESS_W_LOGGING_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testTargetResources(self): """Test that the target resources is used correctly.""" self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0')] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_TARGET_RESOURCES, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DENY_INGRESS_ON_TARGET_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testSecondWayOfPassingTargetResources(self): """Test that the target resources is used correctly.""" self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0')] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_TARGET_RESOURCES_2, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DENY_INGRESS_ON_TARGET_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMultiplePolicies(self): """Tests that both ingress and egress rules are included in one policy.""" self.maxDiff = None self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL + TERM_DENY_INGRESS + HEADER_OPTION_EGRESS + TERM_RESTRICT_EGRESS + TERM_DENY_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_INGRESS_AND_EGRESS_W_DENY_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testPortRange(self): """Test that a port range is accepted and used correctly.""" self.naming.GetNetAddr.return_value = TEST_IP self.naming.GetServiceByProto.side_effect = [['8000-9000']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_PORT_RANGE, self.naming), EXP_INFO) expected = json.loads(EXPECTED_PORT_RANGE_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testTermLongComment(self): """Test that a term's long comment gets truncated and prefixed with term name.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_LONG_COMMENT, self.naming), EXP_INFO) comment_truncated = EXPECTED_ONE_RULE_INGRESS_BETA.replace( 'Generic description', 'This is a very long description, it is l') expected = json.loads(comment_truncated) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testDefaultDenyIngressCreation(self): """Test that the correct IP is correctly set on a deny all ingress term.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_DENY_INGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DENY_INGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInet6DefaultDenyIngressCreation(self): """Test that the IPv6 IP is correctly set on a deny all ingress term.""" self.naming.GetNetAddr.return_value = ALL_IPV6_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_INET6 + TERM_DENY_INGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV6_DENY_INGRESS_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedDefaultDenyIngressCreation(self): """Test that the mixed IPs are correctly set on a deny all ingress term.""" acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_MIXED + TERM_DENY_INGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MIXED_DENY_INGRESS_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testDefaultDenyEgressCreation(self): """Test that the correct IP is correctly set on a deny all egress term.""" self.naming.GetNetAddr.return_value = ALL_IPV4_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS + TERM_DENY_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_DENY_EGRESS_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testInet6DefaultDenyEgressCreation(self): """Test that the IPv6 IP is correctly set on a deny all egress term.""" self.naming.GetNetAddr.return_value = ALL_IPV6_IPS acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_INET6 + TERM_DENY_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_IPV6_DENY_EGRESS_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testMixedDefaultDenyEgressCreation(self): """Test that the mixed IPs are correctly set on a deny all egress term.""" acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_MIXED + TERM_DENY_EGRESS, self.naming), EXP_INFO) expected = json.loads(EXPECTED_MIXED_DENY_EGRESS_GA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testBuildTokens(self): """Test that _BuildTokens generates the expected list of tokens.""" self.naming.GetNetAddr.side_effect = [TEST_IP] pol1 = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testRaisesExceededCostError(self): """Test that ExceededCostError is raised when policy exceeds max cost.""" self.naming.GetNetAddr.side_effect = [TEST_IP] with self.assertRaises(gcp_hf.ExceededCostError): gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_VERY_LOW_DEFAULT_MAX + TERM_ALLOW_ALL_INTERNAL, self.naming), EXP_INFO) def testChunkedIPRanges(self): """Test that source IP ranges that exceed limit are chunked.""" self.maxDiff = None self.naming.GetNetAddr.side_effect = [MANY_IPS] self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_HIGH_QUOTA + TERM_ALLOW_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_CHUNKED_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) def testChunkedEgressIPRanges(self): """Test that destination IP ranges that exceed limit are chunked.""" self.maxDiff = None self.naming.GetNetAddr.side_effect = [MANY_IPS] self.naming.GetServiceByProto.side_effect = [['80']] acl = gcp_hf.HierarchicalFirewall( policy.ParsePolicy(HEADER_OPTION_EGRESS_HIGH_QUOTA + TERM_ALLOW_EGRESS_PORT, self.naming), EXP_INFO) expected = json.loads(EXPECTED_EGRESS_CHUNKED_BETA) self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl)))) @parameterized.named_parameters( ('1 ip, 2 protocols', {'match': { 'config': { 'destIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp'}, {'ipProtocol': 'icmp'} ] } }}, 3), ('1 ip, 3 protocols, ', {'match': { 'config': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp'}, {'ipProtocol': 'icmp'}, {'ipProtocol': 'udp'} ] } }}, 4), ('1 ip, 1 protocol with 1 port', {'match': { 'config': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp', 'ports': ['22']} ] } }}, 3), ('1 ip, 2 protocols with 2 ports each', {'match': { 'config': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp', 'ports': ['22']}, {'ipProtocol': 'udp', 'ports': ['22']} ] } }}, 5), ('1 ip, 1 protocol with 2 ports', {'match': { 'config': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp', 'ports': ['22', '23']} ] } }}, 4), ('2 ips, 1 protocol with 2 ports', {'match': { 'config': { 'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'], 'layer4Configs': [ {'ipProtocol': 'tcp', 'ports': ['22', '23']} ] } }}, 5), ('2 ips, 2 protocols with 2 ports each', {'match': { 'config': { 'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'], 'layer4Configs': [ {'ipProtocol': 'tcp', 'ports': ['22', '23']}, {'ipProtocol': 'udp', 'ports': ['22', '23']} ] } }}, 8), ('1 ip, 2 protocols, 2 targets', {'match': { 'config': { 'destIpRanges': ['0.0.0.0/0'], 'layer4Configs': [ {'ipProtocol': 'tcp'}, {'ipProtocol': 'icmp'} ] } }, 'targetResources': ['target1', 'target2'] }, 5), ) def testGetRuleTupleCount(self, dict_term, expected): self.assertEqual(gcp_hf.GetRuleTupleCount(dict_term, 'beta'), expected) @parameterized.named_parameters( ('1 ip, 2 protocols', { 'match': { 'destIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp' }, { 'ipProtocol': 'icmp' }] } }, 3), ('1 ip, 3 protocols, ', { 'match': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp' }, { 'ipProtocol': 'icmp' }, { 'ipProtocol': 'udp' }] } }, 4), ('1 ip, 1 protocol with 1 port', { 'match': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp', 'ports': ['22'] }] } }, 3), ('1 ip, 2 protocols with 2 ports each', { 'match': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp', 'ports': ['22'] }, { 'ipProtocol': 'udp', 'ports': ['22'] }] } }, 5), ('1 ip, 1 protocol with 2 ports', { 'match': { 'srcIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp', 'ports': ['22', '23'] }] } }, 4), ('2 ips, 1 protocol with 2 ports', { 'match': { 'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'], 'layer4Configs': [{ 'ipProtocol': 'tcp', 'ports': ['22', '23'] }] } }, 5), ('2 ips, 2 protocols with 2 ports each', { 'match': { 'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'], 'layer4Configs': [{ 'ipProtocol': 'tcp', 'ports': ['22', '23'] }, { 'ipProtocol': 'udp', 'ports': ['22', '23'] }] } }, 8), ('1 ip, 2 protocols, 2 targets', { 'match': { 'destIpRanges': ['0.0.0.0/0'], 'layer4Configs': [{ 'ipProtocol': 'tcp' }, { 'ipProtocol': 'icmp' }] }, 'targetResources': ['target1', 'target2'] }, 5), ) def testGAGetRuleTupleCount(self, dict_term, expected): self.assertEqual(gcp_hf.GetRuleTupleCount(dict_term, 'ga'), expected) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/gcp_test.py000066400000000000000000000046511437377527500172500ustar00rootroot00000000000000"""Unittest for GCP Firewall Generator module.""" import unittest from absl.testing import absltest from absl.testing import parameterized from capirca.lib import gcp class HelperFunctionsTest(parameterized.TestCase): @parameterized.named_parameters( ('lowercase', 'project'), ('lowercase_w_hyphen', 'project-id'), ('lowercase_w_numbers', 'project123'), ('lowercase_w_numbers_hyphens', 'project-1-2-3')) def testIsProjectIDValidPasses(self, project): self.assertTrue(gcp.IsProjectIDValid(project)) @parameterized.named_parameters( ('trailing_hyphen', 'project-'), ('start_w_number', '1project'), ('start_w_hyphen', '-project'), ('uppercase', 'Project'), ('too_short_by_one_char', 'proje'), ('too_long_by_one_char', 31 * 'a')) def testIsProjectIDValidFails(self, project): self.assertFalse(gcp.IsProjectIDValid(project)) @parameterized.named_parameters( ('lowercase', 'vpc'), ('lowercase_w_hyphen', 'v-p-c'), ('lowercase_w_numbers', 'vpc123'), ('lowercase_w_numbers_hyphens', 'vpc-1-2-3'), ('one_letter', 'v')) def testIsVPCNameValidPasses(self, vpc): self.assertTrue(gcp.IsVPCNameValid(vpc)) @parameterized.named_parameters( ('trailing_hyphen', 'vpc-'), ('start_w_number', '1vpc'), ('start_w_hyphen', '-vpc'), ('uppercase', 'Vpc'), ('too_short_by_one_char', ''), ('too_long_by_one_char', 64 * 'a')) def testIsVPCNameValidFails(self, vpc): self.assertFalse(gcp.IsVPCNameValid(vpc)) @parameterized.named_parameters( ('term', 'good-term', 'good-term-v6'), ('term_with_v6_suffix', 'good-term-v6', 'good-term-v6-v6'), ('one_letter', 'v', 'v-v6')) def testGetIpv6TermName(self, term_name, expected): self.assertEqual(expected, gcp.GetIpv6TermName(term_name)) @parameterized.named_parameters( ('ipv4_mapped', ['::ffff:a02:301/128'], []), ('6to4', ['2002::/16'], []), ('ipv4_compatible', ['::0000:a02:301/128'], []), ('ipv4', ['10.2.3.4/32'], ['10.2.3.4/32']), ('ipv6', ['2001:4860:8000::5/128'], ['2001:4860:8000::5/128']), ('ipv4_wildcard', ['0.0.0.0/0'], ['0.0.0.0/0']), ('ipv6_wildcard', ['::/0'], ['::/0'])) def testFilterIPv4InIPv6FormatAddrs(self, addrs, expected): self.assertEqual(expected, gcp.FilterIPv4InIPv6FormatAddrs(addrs)) with self.assertRaises(ValueError): gcp.FilterIPv4InIPv6FormatAddrs(['dshjgsjfhgsd']) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/ipset_test.py000066400000000000000000000252421437377527500176220ustar00rootroot00000000000000# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Ipset rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import ipset from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: ipset OUTPUT DROP } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: ipset OUTPUT DROP exists } """ GOOD_TERM_1 = """ term good-term-1 { source-address:: INTERNAL action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { destination-address:: EXTERNAL action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { source-address:: INTERNAL destination-address:: EXTERNAL action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { source-address:: INTERNAL destination-address:: EXTERNAL policer:: batman action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_interface', 'destination_port', 'destination_prefix', 'expiration', 'fragment_offset', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'log_limit', 'name', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'protocol', 'routing_instance', 'source_address', 'source_address_exclude', 'source_interface', 'source_port', 'source_prefix', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'initial', 'sample', 'tcp-established', 'tcp-initial', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class IpsetTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testMarkers(self): self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl) self.assertIn('# begin:ipset-rules', result) self.assertIn('# end:ipset-rules', result) self.naming.GetNetAddr.assert_called_once_with('INTERNAL') def testGenerateSetName(self): # iptables superclass currently limits term name length to 26 characters, # but that could change policy_term = mock.MagicMock() policy_term.name = 'filter_name' policy_term.protocol = ['tcp'] term = ipset.Term(policy_term, 'filter_name', False, None) self.assertEqual(term._GenerateSetName('good-term-1', 'src'), 'good-term-1-src') self.assertEqual(term._GenerateSetName('good-but-way-too-long-term-name', 'src'), 'good-but-way-too-long-term--src') term = ipset.Term(policy_term, 'filter_name', False, None, 'inet6') self.assertEqual(term._GenerateSetName('good-term-1', 'src'), 'good-term-1-src-v6') self.assertEqual(term._GenerateSetName('good-but-way-too-long-term-name', 'src'), 'good-but-way-too-long-te-src-v6') def testOneSourceAddress(self): self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl) self.assertIn('-s 10.0.0.0/8', result) self.assertNotIn('-m set --match-set good-term-3-src src', result) self.naming.GetNetAddr.assert_called_once_with('INTERNAL') def testOneDestinationAddress(self): self.naming.GetNetAddr.return_value = [nacaddr.IPv4('172.16.0.0/12')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) result = str(acl) self.assertIn('-d 172.16.0.0/12', result) self.assertNotIn('-m set --match-set good-term-2-dst dst', result) self.naming.GetNetAddr.assert_called_once_with('EXTERNAL') def testOneSourceAndDestinationAddress(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('172.16.0.0/12')]] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) result = str(acl) self.assertIn('-s 10.0.0.0/8', result) self.assertIn('-d 172.16.0.0/12', result) self.assertNotIn('-m set --match-set good-term-3-src src', result) self.assertNotIn('-m set --match-set good-term-3-dst dst', result) self.naming.GetNetAddr.assert_has_calls([ mock.call('INTERNAL'), mock.call('EXTERNAL')]) def testManySourceAddresses(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('10.0.0.0/24'), nacaddr.IPv4('10.1.0.0/24')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl) self.assertIn('create good-term-1-src hash:net family inet hashsize' ' 4 maxelem 4', result) self.assertIn('add good-term-1-src 10.0.0.0/24', result) self.assertIn('add good-term-1-src 10.1.0.0/24', result) self.assertIn('-m set --match-set good-term-1-src src', result) self.assertNotIn('-s ', result) self.assertNotIn('-exist', result) self.naming.GetNetAddr.assert_called_once_with('INTERNAL') def testManyDestinationAddresses(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('172.16.0.0/24'), nacaddr.IPv4('172.17.0.0/24')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) result = str(acl) self.assertIn('create good-term-2-dst hash:net family inet hashsize ' '4 maxelem 4', result) self.assertIn('add good-term-2-dst 172.16.0.0/24', result) self.assertIn('add good-term-2-dst 172.17.0.0/24', result) self.assertIn('-m set --match-set good-term-2-dst dst', result) self.assertNotIn('-s ', result) self.assertNotIn('-exist', result) self.naming.GetNetAddr.assert_called_once_with('EXTERNAL') def testManySourceAndDestinationAddresses(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/24'), nacaddr.IPv4('10.1.0.0/24')], [nacaddr.IPv4('172.16.0.0/24'), nacaddr.IPv4('172.17.0.0/24')]] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) result = str(acl) self.assertIn('create good-term-3-src hash:net family inet hashsize ' '4 maxelem 4', result) self.assertIn('create good-term-3-dst hash:net family inet hashsize ' '4 maxelem 4', result) self.assertIn('add good-term-3-src 10.0.0.0/24', result) self.assertIn('add good-term-3-src 10.1.0.0/24', result) self.assertIn('add good-term-3-dst 172.16.0.0/24', result) self.assertIn('add good-term-3-dst 172.17.0.0/24', result) self.assertIn('-m set --match-set good-term-3-src src', result) self.assertIn('-m set --match-set good-term-3-dst dst', result) self.assertNotIn('-s ', result) self.assertNotIn('-d ', result) self.naming.GetNetAddr.assert_has_calls([ mock.call('INTERNAL'), mock.call('EXTERNAL')]) def testBuildTokens(self): pol1 = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_4, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testAddsExistsOption(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('10.0.0.0/24'), nacaddr.IPv4('10.1.0.0/24')] acl = ipset.Ipset(policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_1, self.naming), EXP_INFO) self.assertIn('create -exist', str(acl)) self.assertIn('add -exist', str(acl)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/iptables_test.py000066400000000000000000001166671437377527500203150ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for iptables rendering module.""" import datetime import re from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import iptables from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: iptables INPUT ACCEPT } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: iptables OUTPUT DROP } """ GOOD_HEADER_3 = """ header { comment:: "this is a test acl with abbreviation" target:: iptables INPUT ACCEPT abbreviateterms } """ GOOD_HEADER_4 = """ header { comment:: "this is a test acl with truncation" target:: iptables INPUT ACCEPT truncateterms } """ GOOD_HEADER_5 = """ header { comment:: "this is a test acl with no default target" target:: iptables INPUT } """ GOOD_HEADER_6 = """ header { comment:: "this is a test acl with a custom chain and no default target" target:: iptables foo } """ GOOD_HEADER_7 = """ header { comment:: "this is a test acl with a custom chain and no default target" target:: iptables foo noverbose } """ IPV6_HEADER_1 = """ header { comment:: "test header for inet6 terms" target:: iptables INPUT DROP inet6 } """ NON_STANDARD_CHAIN = """ header { comment:: "this is a test acl with non-standard chain" target:: iptables foo ACCEPT } """ NOSTATE_HEADER = """ header { comment:: "iptables filter without stateful" target:: iptables INPUT ACCEPT nostate } """ CHAIN_HEADER_1 = """ header { comment:: "this is a test acl" target:: iptables foobar_chain nostate } """ BAD_HEADER_2 = """ header { target:: juniper } """ BAD_HEADER_3 = """ header { target:: iptables INPUT MAYBE } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { source-address:: INTERNAL source-exclude:: OOB_NET protocol:: tcp source-port:: HTTP action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { source-port:: HTTP protocol:: tcp option:: rst fin tcp-established established action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { protocol:: tcp udp esp ah gre icmp 50 action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { verbatim:: iptables "mary had a little lamb" verbatim:: cisco "mary had second lamb" verbatim:: juniper "mary had third lamb" } """ GOOD_TERM_6 = """ term good-term-6 { comment:: "Some text describing what this block does, possibly including newines, blank lines, and extra-long comments (over 255 characters) %(long_line)s All these cause problems if passed verbatim to iptables. " comment:: "" protocol:: tcp action:: accept } """ % {'long_line': '-' * 260} GOOD_TERM_7 = """ term drop-short-initial-fragments { option:: first-fragment packet-length:: 1-119 action:: deny } term drop-header-overwrite { fragment-offset:: 1-119 action:: deny } """ GOOD_TERM_8 = """ term block-some-icmp { protocol:: icmp icmp-type:: router-solicitation information-request unreachable echo-reply action:: deny } """ GOOD_TERM_9 = """ term good-term-9 { source-address:: SOME_SOURCE destination-address:: SOME_DEST protocol:: tcp source-port:: HTTP action:: accept } """ GOOD_TERM_10 = """ term good-term-10 { owner:: foo@google.com action:: accept } """ GOOD_TERM_11 = """ term good_term_11 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_12 = """ term good_term_12 { comment:: "FOOO" action:: accept } """ GOOD_TERM_13 = """ term good_term_13 { logging:: syslog log-limit:: 99/day action:: accept } """ HOPOPT_TERM = """ term hopopt-term { protocol:: hopopt action:: accept } """ BAD_LOGGING_TERM = """ term bad_logging_term { log-limit:: 99/day action:: accept } """ BAD_QUOTE_TERM_1 = """ term bad-quote-term-1 { comment:: "Text describing without quotes" protocol:: tcp action:: accept } """ IPV6_TERM_1 = """ term inet6-icmp { protocol:: icmpv6 icmp-type:: destination-unreachable time-exceeded echo-reply action:: deny } """ IPV6_HEADERS = """ term ipv6-header-1 { protocol:: hopopt action:: deny } term ipv6-header-2 { protocol:: fragment action:: deny } """ ICMPV6_TERM_1 = """ term inet6-icmp { source-address:: IPV6_INTERNAL protocol:: icmpv6 icmp-type:: destination-unreachable action:: deny } """ LOGGING_TERM_1 = """ term foo { protocol:: tcp logging:: syslog action:: accept } """ UDP_STATE_TERM = """ term test-conntrack-udp { protocol:: udp option:: established action:: accept } """ TCP_STATE_TERM = """ term tcp-established-only { protocol:: tcp option:: established action:: accept } """ STATEFUL_ONLY_TERM = """ term stateful-only { option:: established action:: accept } """ BAD_LONG_TERM_NAME = """ term this-term-name-is-really-far-too-long { protocol:: tcp action:: accept } """ GOOD_LONG_TERM_NAME = """ term google-experiment-abbreviations { protocol:: tcp action:: accept } """ GOOD_MULTIPORT = """ term multiport { source-port:: FOURTEEN_PORTS protocol:: tcp action:: accept } """ MULTIPORT_SWAP = """ term multiport { source-port:: HTTP HTTPS destination-port:: SSH protocol:: tcp action:: accept } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ GOOD_MULTIPORT_RANGE = """ term bad-mport-ranges { destination-port:: FIFTEEN_PORTS_WITH_RANGES protocol:: tcp action:: accept } """ LARGE_MULTIPORT = """ term bad-multiport { destination-port:: LOTS_OF_PORTS protocol:: tcp action:: accept } """ DUAL_LARGE_MULTIPORT = """ term bad-multiport { source-port:: LOTS_OF_SPORTS destination-port:: LOTS_OF_DPORTS protocol:: tcp action:: accept } """ UNSUPPORTED_TERM = """ term ether-type-filter { ether-type:: arp action:: accept } """ UNKNOWN_TERM_KEYWORD = """ term unknown-keyword { comment:: "imaginary new keyword added to the policy library." comment:: "i.e. ip-options-count:: 2-255" comment:: "must be added in tests due to checking in policy library." action:: deny } """ UNSUPPORTED_EXCEPT = """ term block-non-standard { protocol-except:: tcp udp icmp action:: deny } """ REJECT_TERM1 = """ term reject-term1 { action:: reject-with-tcp-rst } """ REJECT_TERM2 = """ term reject-term2 { action:: reject } """ NEXT_TERM1 = """ term next-term1 { action:: next } """ BAD_PROTOCOL_MATCHES = """ term proto-accept-and-reject { protocol:: tcp udp icmp protocol-except:: gre action:: accept } """ SOURCE_INTERFACE_TERM = """ term src-interface { protocol:: tcp source-interface:: eth0 action:: accept } """ DESTINATION_INTERFACE_TERM = """ term dst-interface { protocol:: tcp destination-interface:: eth0 action:: accept } """ GOOD_WARNING_TERM = """ term good-warning-term { source-port:: HTTP protocol:: tcp option:: rst fin tcp-established established policer:: batman action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_interface', 'destination_port', 'destination_prefix', 'expiration', 'fragment_offset', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'log_limit', 'name', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'protocol', 'routing_instance', 'source_address', 'source_address_exclude', 'source_interface', 'source_port', 'source_prefix', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'initial', 'sample', 'tcp-established', 'tcp-initial', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class FakeTerm: name = '' protocol = ['tcp'] class AclCheckTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) @mock.patch.object(iptables.logging, 'warning') def testChainFilter(self, mock_warn): filter_name = 'foobar_chain' pol = policy.ParsePolicy(CHAIN_HEADER_1 + GOOD_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) # is the chain right? self.assertIn('-A foobar_chain -j f_good-term-1', result) # is the term named appropriately? self.assertIn('-N f_good-term-1', result) mock_warn.assert_called_once_with( 'Filter is generating a non-standard chain that will ' 'not apply to traffic unless linked from INPUT, ' 'OUTPUT or FORWARD filters. New chain name is: %s', filter_name) def testUnsupportedTargetOption(self): pol = policy.ParsePolicy(BAD_HEADER_3 + GOOD_TERM_1, self.naming) self.assertRaises(iptables.UnsupportedTargetOptionError, iptables.Iptables, pol, EXP_INFO) def testGoodPolicy(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl) self.assertIn('-P OUTPUT DROP', result, 'output default policy of drop not set.') self.assertIn('-N O_good-term-1', result, 'did not find new chain for good-term-1.') self.assertIn('-A O_good-term-1 -p icmp -m state ' '--state NEW,ESTABLISHED,RELATED -j ACCEPT', result, 'did not find append for good-term-1.') def testCustomChain(self): acl = iptables.Iptables(policy.ParsePolicy(NON_STANDARD_CHAIN + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl).split('\n') self.assertIn('-N foo', result, 'did not find new chain for foo.') self.assertNotIn('-P foo', result, 'chain foo may not have a policy set.') def testChainNoTarget(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_5 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl).split('\n') for line in result: self.assertFalse(line.startswith(':INPUT'), 'chain may not have a policy set.') self.assertFalse(line.startswith('-P INPUT'), 'chain may not have a policy set.') self.assertFalse(line.startswith('-N INPUT'), 'attempting to create a built-in chain.') def testCustomChainNoTarget(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_6 + GOOD_TERM_1, self.naming), EXP_INFO) result = str(acl).split('\n') self.assertIn('-N foo', result, 'did not find a new chain for foo.') for line in result: self.assertFalse(line.startswith(':foo'), 'chain may not have a policy set.') self.assertFalse(line.startswith('-P foo'), 'chain may not have a policy set.') def testExcludeReturnsPolicy(self): # # In this test, we should get fewer lines of output by performing # early return jumps on excluded addresses. # self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.0.0.0/24')]] self.naming.GetServiceByProto.return_value = ['80'] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) result = str(acl) self.assertIn('-P INPUT ACCEPT', result, 'no default policy found.') self.assertIn('-p tcp', result, 'no protocol specification found.') self.assertIn('-s ', result, 'no source address found.') self.assertIn('-s 10.0.0.0/24 -j RETURN', result, 'expected address 10.0.0.0/24 not jumping to RETURN.') self.assertIn('--sport 80 -s 10.0.0.0/8', result, 'expected source address 10.0.0.0/8 not accepted.') self.naming.GetNetAddr.assert_has_calls([ mock.call('INTERNAL'), mock.call('OOB_NET')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testExcludeAddressesPolicy(self): # # In this test, we should get fewer lines of output from excluding # addresses from the specified destination. # self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.128.0.0/9'), nacaddr.IPv4('10.64.0.0/10')]] self.naming.GetServiceByProto.return_value = ['80'] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) result = str(acl) self.assertIn('--sport 80 -s 10.0.0.0/10', result, 'expected source address 10.0.0.0/10 not accepted.') self.naming.GetNetAddr.assert_has_calls([ mock.call('INTERNAL'), mock.call('OOB_NET')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testAddExcludeSourceForLengthPolicy(self): # # In this test, we should generate fewer lines of output by # excluding the inverted the source and performing early returns on # the excluded range. # source_range = [] for i in range(18): address = nacaddr.IPv4(10 * 256 * 256 * 256 + i * 256 * 256) source_range.append(address.supernet(15)) # Grow to /17 dest_range = [] for i in range(40): address = nacaddr.IPv4(10 * 256 * 256 * 256 + i * 256) dest_range.append(address.supernet(7)) # Grow to /25 self.naming.GetNetAddr.side_effect = [source_range, dest_range] self.naming.GetServiceByProto.return_value = ['80'] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_9, self.naming), EXP_INFO) result = str(acl) self.assertIn('-P INPUT ACCEPT', result, 'no default policy found.') self.assertIn('-p tcp', result, 'no protocol specification found.') self.assertLess(result.count('\n'), len(source_range) * len(dest_range), 'expected less than %d rows, got %d' % (len(source_range) * len(dest_range), result.count('\n'))) self.assertIn( '-s 0.0.0.0/5 -j RETURN', result, 'expected address 0.0.0.0/5 to RETURN:\n' + result) self.assertIn( '-s 10.0.128.0/17 -j RETURN', result, 'expected address 10.0.128.0/17 not jumping to RETURN:\n' + result) self.assertTrue( re.search('--sport 80 -d 10.0.1.0/25 [^\n]* -j ACCEPT', result), 'expected destination addresss 10.0.1.0/25 accepted:\n' + result) self.naming.GetNetAddr.assert_has_calls([ mock.call('SOME_SOURCE'), mock.call('SOME_DEST')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testAddExcludeDestForLengthPolicy(self): # # In this test, we should generate fewer lines of output by # excluding the inverted the source and performing early returns on # the excluded range. # source_range = [] for i in range(40): address = nacaddr.IPv4(10 * 256 * 256 * 256 + i * 256) source_range.append(address.supernet(7)) # Grow to /25 dest_range = [] for i in range(18): address = nacaddr.IPv4(10 * 256 * 256 * 256 + i * 256 * 256) dest_range.append(address.supernet(15)) # Grow to /17 self.naming.GetNetAddr.side_effect = [source_range, dest_range] self.naming.GetServiceByProto.return_value = ['80'] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_9, self.naming), EXP_INFO) result = str(acl) self.assertIn('-P INPUT ACCEPT', result, 'no default policy found.') self.assertIn('-p tcp', result, 'no protocol specification found.') self.assertLess(result.count('\n'), len(source_range) * len(dest_range), 'expected less than %d rows, got %d' % (len(source_range) * len(dest_range), result.count('\n'))) self.assertIn( '-d 0.0.0.0/5 -j RETURN', result, 'expected address 0.0.0.0/5 to RETURN:\n' + result) self.assertIn( '-d 10.0.128.0/17 -j RETURN', result, 'expected address 10.0.128.0/17 not jumping to RETURN:\n' + result) self.assertTrue( re.search('--sport 80 -s 10.0.1.0/25 [^\n]* -j ACCEPT', result), 'expected destination addresss 10.0.1.0/25 accepted:\n' + result) self.naming.GetNetAddr.assert_has_calls([ mock.call('SOME_SOURCE'), mock.call('SOME_DEST')]) self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testOptions(self): self.naming.GetServiceByProto.return_value = ['80'] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) result = str(acl) self.assertIn('--tcp-flags FIN,RST FIN,RST', result, 'tcp flags missing or incorrect.') self.assertNotIn('-dport 1024:65535', result, 'destination port present.') self.assertIn( '-m state --state ESTABLISHED,RELATED', result, 'missing or incorrect state information.') self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testRejectReset(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + REJECT_TERM1, self.naming), EXP_INFO) result = str(acl) self.assertIn('-j REJECT --reject-with tcp-reset', result, 'missing or incorrect reject specification.') def testReject(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + REJECT_TERM2, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-j REJECT --reject-with icmp-host-prohibited', result, 'missing or incorrect reject specification.') def testRejectIpv6(self): pol = policy.ParsePolicy(IPV6_HEADER_1 + REJECT_TERM2, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertNotIn('-p all', result, 'protocol spec present') self.assertIn('-j REJECT --reject-with icmp6-adm-prohibited', result, 'missing or incorrect reject specification.') def testIPv6Headers(self): pol = policy.ParsePolicy(IPV6_HEADER_1 + IPV6_HEADERS, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-m u32 --u32 "0x3&0xff=0x0"', result, 'match for hop-by-hop header is missing') self.assertIn('-m u32 --u32 "0x3&0xff=0x2c"', result, 'match for fragment header is missing') def testNextTerm(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + NEXT_TERM1, self.naming), EXP_INFO) result = str(acl) self.assertIn('-j RETURN', result, 'jump to RETURN not found.') def testProtocols(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_4, self.naming), EXP_INFO) result = str(acl) self.assertIn('-p tcp', result, 'protocol tcp not found.') self.assertIn('-p udp', result, 'protocol udp not found.') self.assertIn('-p esp', result, 'protocol esp not found.') self.assertIn('-p ah', result, 'protocol ah not found.') self.assertIn('-p gre', result, 'protocol gre not found.') self.assertIn('-p icmp', result, 'protocol icmp not found.') self.assertIn('-p 50', result, 'protocol 50 not found.') def testVerbatimTerm(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming), EXP_INFO) result = str(acl) self.assertIn('mary had a little lamb', result, 'first verbatim output is missing or incorrect.') # check if another platforms verbatim shows up self.assertNotIn('mary had a second lamb', result, 'second vebatim output is missing or incorrect.') self.assertNotIn('mary had a third lamb', result, 'third verbatim output is missing or incorrect.') def testCommentReflowing(self): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_6, self.naming), EXP_INFO) result = str(acl) self.assertNotIn('--comments ""', result, 'Iptables cannot handle empty comments') self.assertFalse(re.search('--comments "[^"]{256,}"', result), 'Iptables comments must be under 255 characters.') self.assertFalse(re.search('--comments "[^"]*\n', result), 'Iptables comments may not contain newline characters.') def testCommentQuoteStripping(self): parsed_policy = policy.ParsePolicy(GOOD_HEADER_1 + BAD_QUOTE_TERM_1, self.naming) parsed_policy.filters[0][1][0].comment = ['Text "describing" "with" quotes'] acl = iptables.Iptables(parsed_policy, EXP_INFO) result = str(acl) self.assertTrue(re.search( '--comment "Text describing with quotes"', result), 'Iptables did not strip out quotes') def testLongTermName(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + BAD_LONG_TERM_NAME, self.naming) self.assertRaises(aclgenerator.TermNameTooLongError, iptables.Iptables, pol, EXP_INFO) def testLongTermAbbreviation(self): pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_LONG_TERM_NAME, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-abbreviations', result, 'Our strings disappeared during abbreviation.') def testLongTermTruncation(self): pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_LONG_TERM_NAME, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('google-experiment-abbrev', result, 'Our strings disappeared during truncation.') self.assertNotIn('google-experiment-abbreviations', result, 'Term name was not truncated as expected.') def testFragmentOptions(self): pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_7, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('--u32 4&0x3FFF=0x2000', result, 'first-fragment rule is missing') self.assertIn('--length 1:119', result, 'length match is missing') self.assertIn('--u32 4&0x1FFF=1:119', result, 'fragment-offset rule is missing') def testIcmpMatching(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_8, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('--icmp-type 0', result, 'icmp-type 0 (echo-reply) is missing') self.assertIn('--icmp-type 3', result, 'icmp-type 3 (destination-unreachable) is missing') self.assertIn('--icmp-type 10', result, 'icmp-type 10 (router-solicit) is missing') self.assertIn('--icmp-type 15', result, 'icmp-type 15 (info-request) is missing') def testIcmpCode(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_11, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('--icmp-type 3/3', result, result) self.assertIn('--icmp-type 3/4', result, result) def testConntrackUDP(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + UDP_STATE_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-m state --state ESTABLISHED,RELATED', result, 'udp connection tracking is missing state module') self.assertNotIn('-dport 1024:65535', result, 'udp connection tracking contains destination high-ports') self.assertIn('-p udp', result, 'udp connection tracking is missing protocol specification') def testConntrackAll(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + STATEFUL_ONLY_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-m state --state ESTABLISHED,RELATED', result, 'connection tracking is missing state module arguments') self.assertNotIn('-dport 1024:65535', result, 'High-ports should not appear for non-TCP/UDP protocols') def testTcpEstablishedNostate(self): pol = policy.ParsePolicy(NOSTATE_HEADER + TCP_STATE_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn( '%s %s' % ('--tcp-flags ACK,FIN,RST,SYN RST', '--dport 1024:65535 -j ACCEPT'), result, 'No rule matching packets with RST bit only.\n' + result) self.assertNotIn('--state', result, 'Nostate header should not use nf_conntrack --state flag') def testUdpEstablishedNostate(self): pol = policy.ParsePolicy(NOSTATE_HEADER + UDP_STATE_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-p udp --dport 1024:65535 -j ACCEPT', result, 'No rule matching TCP packets with ACK bit.\n' + result) self.assertNotIn('--state', result, 'Nostate header should not use nf_conntrack --state flag') def testEstablishedNostate(self): # when using "nostate" filter and a term with "option:: established" # have any protocol other than TCP and/or UDP should raise error. pol = policy.ParsePolicy(NOSTATE_HEADER + STATEFUL_ONLY_TERM, self.naming) self.assertRaises(aclgenerator.EstablishedError, iptables.Iptables, pol, EXP_INFO) def testUnsupportedFilter(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + UNSUPPORTED_TERM, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, iptables.Iptables, pol, EXP_INFO) def testUnknownTermKeyword(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + UNKNOWN_TERM_KEYWORD, self.naming) # Adding a (fake) new property, e.g. if policy.py is updated. pol.filters[0][1][0].ip_options_count = '2-255' self.assertRaises(aclgenerator.UnsupportedFilterError, iptables.Iptables, pol, EXP_INFO) def testProtocolExceptUnsupported(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + UNSUPPORTED_EXCEPT, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, iptables.Iptables, pol, EXP_INFO) def testTermNameConflict(self): pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_1 + GOOD_TERM_1 + GOOD_TERM_1, self.naming) self.assertRaises(aclgenerator.DuplicateTermError, iptables.Iptables, pol, EXP_INFO) def testMultiPort(self): ports = [str(x) for x in range(1, 29, 2)] self.naming.GetServiceByProto.return_value = ports acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_MULTIPORT, self.naming), EXP_INFO) self.assertIn('-m multiport --sports %s' % ','.join(ports), str(acl), 'multiport module not used as expected.') # b/10626420 self.assertNotIn('-m multiport --dports -d', str(acl), 'invalid multiport syntax produced.') self.naming.GetServiceByProto.assert_called_once_with( 'FOURTEEN_PORTS', 'tcp') def testMultiPortWithRanges(self): ports = [str(x) for x in (1, 3, 5, 7, 9, 11, 13, 15, 17, '19-21', '23-25', '27-29')] self.naming.GetServiceByProto.return_value = ports acl = iptables.Iptables(policy.ParsePolicy( GOOD_HEADER_1 + GOOD_MULTIPORT_RANGE, self.naming), EXP_INFO) expected = '-m multiport --dports %s' % ','.join(ports).replace('-', ':') self.assertIn(expected, str(acl), 'multiport module not used as expected.') self.naming.GetServiceByProto.assert_called_once_with( 'FIFTEEN_PORTS_WITH_RANGES', 'tcp') def testMultiportSwap(self): self.naming.GetServiceByProto.side_effect = [['80'], ['443'], ['22']] acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + MULTIPORT_SWAP, self.naming), EXP_INFO) expected = '--dport 22 -m multiport --sports 80,443' self.assertIn(expected, str(acl), 'failing to move single port before multiport values.') self.naming.GetServiceByProto.assert_has_calls([ mock.call('HTTP', 'tcp'), mock.call('HTTPS', 'tcp'), mock.call('SSH', 'tcp')]) def testMultiportLargePortCount(self): ports = [str(x) for x in range(1, 71, 2)] self.naming.GetServiceByProto.return_value = ports acl = iptables.Iptables(policy.ParsePolicy( GOOD_HEADER_1 + LARGE_MULTIPORT, self.naming), EXP_INFO) self.assertIn('-m multiport --dports 1,3,5,7,9', str(acl)) self.assertIn('-m multiport --dports 29,31,33,35,37', str(acl)) self.assertIn('-m multiport --dports 57,59,61,63,65,67,69', str(acl)) self.naming.GetServiceByProto.assert_called_once_with( 'LOTS_OF_PORTS', 'tcp') def testMultiportDualLargePortCount(self): ports = [str(x) for x in range(1, 71, 2)] self.naming.GetServiceByProto.return_value = ports acl = iptables.Iptables(policy.ParsePolicy( GOOD_HEADER_1 + DUAL_LARGE_MULTIPORT, self.naming), EXP_INFO) self.assertIn('-m multiport --sports 1,3,5', str(acl)) self.assertIn('-m multiport --sports 29,31,33', str(acl)) self.assertIn('-m multiport --sports 57,59,61', str(acl)) self.assertIn('23,25,27 -m multiport --dports 1,3,5', str(acl)) self.assertIn('23,25,27 -m multiport --dports 29,31,33', str(acl)) self.assertIn('23,25,27 -m multiport --dports 57,59,61', str(acl)) self.assertIn('51,53,55 -m multiport --dports 1,3,5', str(acl)) self.assertIn('51,53,55 -m multiport --dports 29,31,33', str(acl)) self.assertIn('51,53,55 -m multiport --dports 57,59,61', str(acl)) self.assertIn('65,67,69 -m multiport --dports 1,3,5', str(acl)) self.assertIn('65,67,69 -m multiport --dports 29,31,33', str(acl)) self.assertIn('65,67,69 -m multiport --dports 57,59,61', str(acl)) self.naming.GetServiceByProto.assert_has_calls([ mock.call('LOTS_OF_SPORTS', 'tcp'), mock.call('LOTS_OF_DPORTS', 'tcp')]) def testGeneratePortBadArguments(self): term = iptables.Term(FakeTerm(), 'test', True, 'test') # Both source and dest are true self.assertRaises(iptables.BadPortsError, term._GeneratePortStatement, [(1, 1), (2, 2)], source=True, dest=True) def testGeneratePortNotImplemented(self): term = iptables.Term(FakeTerm(), 'test', True, 'test') # Both source and dest are false self.assertRaises(NotImplementedError, term._GeneratePortStatement, [(1, 1), (2, 2)], source=False, dest=False) def testLogging(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + LOGGING_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-j LOG --log-prefix foo', result, 'logging jump does not appear in output.') self.assertIn('-j ACCEPT', result, 'action jump does not appear in output.') def testSourceInterface(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + SOURCE_INTERFACE_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-i eth0', result, 'source interface specification not in output.') def testDestinationInterface(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + DESTINATION_INTERFACE_TERM, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-o eth0', result, 'destination interface specification not in output.') @mock.patch.object(iptables.logging, 'warning') def testExpired(self, mock_warn): _ = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired' ' and will not be rendered.', 'is_expired', 'INPUT') @mock.patch.object(iptables.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'INPUT') def testIPv6Icmp(self): pol = policy.ParsePolicy(IPV6_HEADER_1 + IPV6_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('--icmpv6-type 1', result, 'icmpv6-type 1 (echo-reply) is missing') self.assertIn('--icmpv6-type 3', result, 'icmpv6-type 3 (destination-unreachable) is missing') self.assertIn('--icmpv6-type 129', result, 'icmpv6-type 129 (router-solicit) is missing') def testIPv6IcmpOrder(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv6('fd87:6044:ac54:3558::/64')] pol = policy.ParsePolicy(IPV6_HEADER_1 + ICMPV6_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertIn('-s fd87:6044:ac54:3558::/64 -p ipv6-icmp -m icmp6' ' --icmpv6-type 1', result, 'incorrect order of ICMPv6 match elements') self.naming.GetNetAddr.assert_called_once_with('IPV6_INTERNAL') @mock.patch.object(iptables.logging, 'debug') def testIcmpv6InetMismatch(self, mock_debug): acl = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + IPV6_TERM_1, self.naming), EXP_INFO) # output happens in __str_ str(acl) mock_debug.assert_called_once_with( 'Term inet6-icmp will not be rendered,' ' as it has icmpv6 match specified but ' 'the ACL is of inet address family.') @mock.patch.object(iptables.logging, 'debug') def testIcmpInet6Mismatch(self, mock_debug): acl = iptables.Iptables(policy.ParsePolicy(IPV6_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) # output happens in __str_ str(acl) mock_debug.assert_called_once_with( 'Term good-term-1 will not be rendered,' ' as it has icmp match specified but ' 'the ACL is of inet6 address family.') def testOwner(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_10, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl).split('\n') self.assertIn('-A I_good-term-10 -m comment --comment "Owner: ' 'foo@google.com"', result, 'missing or incorrect comment specification.') def testSetTarget(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) acl.SetTarget('OUTPUT', 'DROP') result = str(acl).split('\n') self.assertIn('-P OUTPUT DROP', result, 'output default policy of drop not set.') def testSetCustomTarget(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) acl.SetTarget('foobar') result = str(acl).split('\n') self.assertIn('-N foobar', result, 'did not find a new chain for foobar.') def testBuildTokens(self): pol1 = iptables.Iptables(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_5, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetServiceByProto.return_value = ['80'] pol1 = iptables.Iptables( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_WARNING_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testNoVerbose(self): pol = policy.ParsePolicy(GOOD_HEADER_7 + GOOD_TERM_12, self.naming) acl = iptables.Iptables(pol, EXP_INFO) self.assertNotIn('comment --comment "FOOO"', str(acl), acl) def testLogLimit(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_13, self.naming) acl = iptables.Iptables(pol, EXP_INFO) self.assertIn( '-m --limit 99/day -j LOG --log-prefix good_term_13', str(acl), acl) def testLogLimitFailsWithoutLogging(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + BAD_LOGGING_TERM, self.naming) self.assertRaises(iptables.LimitButNoLogError, iptables.Iptables, pol, EXP_INFO) def testSkipHopByHopinV4(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + HOPOPT_TERM + GOOD_TERM_1, self.naming) acl = iptables.Iptables(pol, EXP_INFO) result = str(acl) self.assertNotIn('-m u32 --u32 "0x3&0xff=0x0"', result, 'match for hop-by-hop header is missing') if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/juniper_test.py000066400000000000000000001651411437377527500201550ustar00rootroot00000000000000# Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for juniper acl rendering module.""" import datetime import re from absl.testing import absltest from unittest import mock from absl import logging from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import juniper from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: juniper test-filter } """ GOOD_HEADER_2 = """ header { target:: juniper test-filter bridge } """ GOOD_HEADER_V6 = """ header { target:: juniper test-filter inet6 } """ GOOD_HEADER_MIXED = """ header { target:: juniper test-filter mixed } """ GOOD_HEADER_BRIDGE = """ header { target:: juniper test-filter bridge } """ GOOD_DSMO_HEADER = """ header { target:: juniper test-filter enable_dsmo } """ GOOD_FILTER_ENHANCED_MODE_HEADER = """ header { target:: juniper test-filter filter_enhanced_mode } """ GOOD_NOVERBOSE_V4_HEADER = """ header { target:: juniper test-filter inet noverbose } """ GOOD_NOVERBOSE_V6_HEADER = """ header { target:: juniper test-filter inet6 noverbose } """ GOOD_HEADER_NOT_INTERFACE_SPECIFIC = """ header { target:: juniper test-filter bridge not-interface-specific } """ BAD_HEADER = """ header { comment:: "this is a test acl" target:: cisco test-filter } """ BAD_HEADER_2 = """ header { target:: juniper test-filter inetpoop } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_1_V6 = """ term good-term-1 { protocol:: icmpv6 action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_2 = """ term good-term-3 { protocol:: tcp destination-address:: SOME_HOST source-port:: HTTP option:: established tcp-established action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: icmp icmp-type:: echo-reply information-reply information-request icmp-type:: router-solicitation timestamp-request action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { protocol:: icmp protocol:: tcp action:: accept } """ GOOD_TERM_7 = """ term good-term-7 { protocol-except:: tcp action:: accept } """ GOOD_TERM_8 = """ term good-term-8 { source-prefix:: foo_prefix_list destination-prefix:: bar_prefix_list baz_prefix_list action:: accept } """ GOOD_TERM_9 = """ term good-term-9 { ether-type:: arp action:: accept } """ GOOD_TERM_10 = """ term good-term-10 { traffic-type:: unknown-unicast action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { verbatim:: juniper "mary had a little lamb" verbatim:: iptables "mary had a second lamb" verbatim:: cisco "mary had a third lamb" } """ GOOD_TERM_12 = """ term good-term-12 { source-address:: LOCALHOST action:: accept } """ GOOD_TERM_13 = """ term routing-instance-setting { protocol:: tcp routing-instance:: EXTERNAL-NAT } """ GOOD_TERM_14 = """ term loss-priority-setting { protocol:: tcp loss-priority:: low action:: accept } """ GOOD_TERM_15 = """ term precedence-setting { protocol:: tcp destination-port:: SSH precedence:: 7 action:: accept } """ GOOD_TERM_16 = """ term precedence-setting { protocol:: tcp destination-port:: SSH precedence:: 5 7 action:: accept } """ GOOD_TERM_17 = """ term owner-term { owner:: foo@google.com action:: accept } """ GOOD_TERM_18_SRC = """ term address-exclusions { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ GOOD_TERM_18_DST = """ term address-exclusions { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ GOOD_TERM_19 = """ term minimize-prefix-list { source-address:: INCLUDES source-exclude:: EXCLUDES action:: accept } """ GOOD_TERM_V6_HOP_LIMIT = """ term good-term-v6-hl { hop-limit:: 25 action:: accept } """ GOOD_TERM_20_V6 = """ term good-term-20-v6 { protocol-except:: icmpv6 action:: accept } """ GOOD_TERM_21 = """ term good_term_21 { ttl:: 10 action:: accept } """ GOOD_TERM_22 = """ term good_term_22 { protocol:: tcp source-port:: DNS dscp-set:: b111000 action:: accept } """ GOOD_TERM_23 = """ term good_term_23 { protocol:: tcp source-port:: DNS dscp-set:: af42 dscp-match:: af41-af42 5 dscp-except:: be action:: accept } """ GOOD_TERM_24 = """ term good_term_24 { protocol:: tcp source-port:: DNS qos:: af1 action:: accept } """ GOOD_TERM_25 = """ term good_term_25 { protocol:: tcp source-port:: DNS action:: accept } """ GOOD_TERM_26 = """ term good_term_26 { protocol:: tcp source-port:: DNS action:: deny } """ GOOD_TERM_26_V6 = """ term good_term_26-v6 { protocol:: tcp source-port:: DNS action:: deny } """ GOOD_TERM_26_V6_REJECT = """ term good_term_26-v6 { protocol:: tcp source-port:: DNS action:: reject } """ GOOD_TERM_27 = """ term good_term_27 { forwarding-class:: Floop action:: deny } """ GOOD_TERM_28 = """ term good_term_28 { next-ip:: TEST_NEXT } """ GOOD_TERM_29 = """ term multiple-forwarding-class { forwarding-class:: floop fluup fleep action:: deny } """ GOOD_TERM_30 = """ term good-term-30 { source-prefix-except:: foo_prefix_list destination-prefix-except:: bar_prefix_list action:: accept } """ GOOD_TERM_31 = """ term good-term-31 { source-prefix:: foo_prefix source-prefix-except:: foo_except destination-prefix:: bar_prefix destination-prefix-except:: bar_except action:: accept } """ GOOD_TERM_32 = """ term good_term_32 { forwarding-class-except:: floop action:: deny } """ GOOD_TERM_33 = """ term multiple-forwarding-class-except { forwarding-class-except:: floop fluup fleep action:: deny } """ GOOD_TERM_34 = """ term good_term_34 { traffic-class-count:: floop action:: deny } """ GOOD_TERM_35 = """ term good_term_35 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_36 = """ term good-term-36 { protocol:: tcp destination-address:: SOME_HOST destination-address:: SOME_HOST option:: inactive action:: accept } """ GOOD_TERM_37 = """ term good-term-37 { destination-address:: SOME_HOST restrict-address-family:: inet action:: accept } """ GOOD_TERM_COMMENT = """ term good-term-comment { comment:: "This is a COMMENT" action:: accept } """ GOOD_TERM_FILTER = """ term good-term-filter { comment:: "This is a COMMENT" filter-term:: my-filter } """ BAD_TERM_1 = """ term bad-term-1 { protocol:: tcp udp source-port:: DNS option:: tcp-established action:: accept } """ ESTABLISHED_TERM_1 = """ term established-term-1 { protocol:: tcp source-port:: DNS option:: established action:: accept } """ OPTION_TERM_1 = """ term option-term { protocol:: tcp source-port:: SSH option:: is-fragment action:: accept } """ BAD_ICMPTYPE_TERM_1 = """ term icmptype-mismatch { comment:: "error when icmpv6 paired with inet filter" protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ BAD_ICMPTYPE_TERM_2 = """ term icmptype-mismatch { comment:: "error when icmp paired with inet6 filter" protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } """ DEFAULT_TERM_1 = """ term default-term-1 { action:: deny } """ ENCAPSULATE_GOOD_TERM_1 = """ term good-term-1 { protocol:: tcp encapsulate:: template-name } """ ENCAPSULATE_GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp encapsulate:: template-name counter:: count-name } """ ENCAPSULATE_BAD_TERM_1 = """ term bad-term-1 { protocol:: tcp encapsulate:: template-name action:: accept } """ ENCAPSULATE_BAD_TERM_2 = """ term bad-term-2 { protocol:: tcp encapsulate:: template-name routing-instance:: instance-name } """ DECAPSULATE_GOOD_TERM_1 = """ term good-term-1 { protocol:: udp decapsulate:: template-name } """ DECAPSULATE_GOOD_TERM_2 = """ term good-term-2 { protocol:: udp decapsulate:: template-name counter:: count-name } """ DECAPSULATE_BAD_TERM_1 = """ term bad-term-1 { protocol:: udp decapsulate:: template-name action:: accept } """ DECAPSULATE_BAD_TERM_2 = """ term bad-term-2 { protocol:: udp decapsulate:: template-name routing-instance:: instance-name } """ DECAPSULATE_BAD_TERM_3 = """ term bad-term-2 { protocol:: udp decapsulate:: template-name encapsulate:: something } """ PORTMIRROR_GOOD_TERM_1 = """ term good-term-1 { protocol:: tcp port-mirror:: true } """ PORTMIRROR_GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp port-mirror:: true counter:: count-name action:: deny } """ LONG_COMMENT_TERM_1 = """ term long-comment-term-1 { comment:: "this is very very very very very very very very very very very comment:: "very very very very very very very long." action:: deny } """ LONG_POLICER_TERM_1 = """ term long-policer-term-1 { policer:: this-is-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-very-long action:: deny } """ HOPOPT_TERM = """ term good-term-1 { protocol:: hopopt action:: accept } """ HOPOPT_TERM_EXCEPT = """ term good-term-1 { protocol-except:: hopopt action:: accept } """ FRAGOFFSET_TERM = """ term good-term-1 { fragment-offset:: 1-7 action:: accept } """ GOOD_FLEX_MATCH_TERM = """ term flex-match-term-1 { protocol:: tcp flexible-match-range:: bit-length 8 flexible-match-range:: range 0x08 flexible-match-range:: match-start payload flexible-match-range:: byte-offset 16 flexible-match-range:: bit-offset 7 action:: deny } """ BAD_FLEX_MATCH_TERM_1 = """ term flex-match-term-1 { protocol:: tcp flexible-match-range:: bit-length 36 flexible-match-range:: range 0x08 flexible-match-range:: match-start payload flexible-match-range:: byte-offset 16 flexible-match-range:: bit-offset 7 action:: deny } """ BAD_FLEX_MATCH_TERM_2 = """ term flex-match-term-1 { protocol:: tcp flexible-match-range:: bit-length 8 flexible-match-range:: range 0x08 flexible-match-range:: match-start wrong flexible-match-range:: byte-offset 16 flexible-match-range:: bit-offset 7 action:: deny } """ BAD_FLEX_MATCH_TERM_3 = """ term flex-match-term-1 { protocol:: tcp flexible-match-range:: bit-length 8 flexible-match-range:: range 0x08 flexible-match-range:: match-start payload flexible-match-range:: byte-offset 260 flexible-match-range:: bit-offset 7 action:: deny } """ BAD_FLEX_MATCH_TERM_4 = """ term flex-match-term-1 { protocol:: tcp flexible-match-range:: bit-length 8 flexible-match-range:: range 0x08 flexible-match-range:: match-start payload flexible-match-range:: byte-offset 16 flexible-match-range:: bit-offset 8 action:: deny } """ BAD_TERM_FILTER = """ term bad_term_filter { filter-term:: my-filter action:: deny } """ MIXED_TESTING_TERM = """ term good-term { protocol:: tcp source-address:: SOME_HOST destination-port:: SMTP destination-address:: SOME_OTHER_HOST action:: accept } """ SUPPORTED_TOKENS = frozenset([ 'action', 'address', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_prefix', 'destination_prefix_except', 'dscp_except', 'dscp_match', 'dscp_set', 'decapsulate', 'encapsulate', 'ether_type', 'expiration', 'filter_term', 'flexible_match_range', 'forwarding_class', 'forwarding_class_except', 'fragment_offset', 'hop_limit', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'loss_priority', 'name', 'next_ip', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'policer', 'port', 'port_mirror', 'precedence', 'protocol', 'protocol_except', 'qos', 'restrict_address_family', 'routing_instance', 'source_address', 'source_address_exclude', 'source_port', 'source_prefix', 'source_prefix_except', 'traffic_class_count', 'traffic_type', 'translated', 'ttl', 'verbatim']) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'inactive', 'is-fragment', '.*', # not actually a lex token! 'sample', 'tcp-established', 'tcp-initial'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class JuniperTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testOptions(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['80'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) output = str(jcl) self.assertIn('destination-port 1024-65535;', output, output) # Verify that tcp-established; doesn't get duplicated if both 'established' # and 'tcp-established' options are included in term self.assertEqual(output.count('tcp-established;'), 1) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp') def testTermAndFilterName(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('term good-term-1 {', output, output) self.assertIn('replace: filter test-filter {', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testBadFilterType(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(BAD_HEADER_2 + GOOD_TERM_1, self.naming) self.assertRaises(aclgenerator.UnsupportedAFError, juniper.Juniper, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testBridgeFilterType(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('ip-protocol tcp;', output, output) self.assertNotIn(' destination-address {', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testCommentShrinking(self): long_comment = ' this is a very descriptive comment ' * 10 expected = ( ' ' * 24 + '/* this is a very descriptive comment this is a\n' + ' ' * 25 + '** very descriptive comment this is a very\n' + ' ' * 25 + '** descriptive comment this is a very descript */' ) self.naming.GetNetAddr.return_value = ( [nacaddr.IPv4('10.0.0.0/8', comment=long_comment)]) self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn(expected, output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDefaultDeny(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertNotIn('from {', output, output) def testEncapsulate(self): jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + ENCAPSULATE_GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('encapsulate template-name;', output, output) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + ENCAPSULATE_GOOD_TERM_2, self.naming), EXP_INFO) output = str(jcl) self.assertIn('encapsulate template-name;', output, output) self.assertIn('count count-name;', output, output) def testFailEncapsulate(self): jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + ENCAPSULATE_BAD_TERM_1, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperMultipleTerminatingActionError, str, jcl) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + ENCAPSULATE_BAD_TERM_2, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperMultipleTerminatingActionError, str, jcl) def testDecapsulate(self): jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + DECAPSULATE_GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('decapsulate template-name;', output, output) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + DECAPSULATE_GOOD_TERM_2, self.naming), EXP_INFO) output = str(jcl) self.assertIn('decapsulate template-name;', output, output) self.assertIn('count count-name;', output, output) def testFailDecapsulate(self): jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + DECAPSULATE_BAD_TERM_1, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperMultipleTerminatingActionError, str, jcl) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + DECAPSULATE_BAD_TERM_2, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperMultipleTerminatingActionError, str, jcl) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + DECAPSULATE_BAD_TERM_3, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperMultipleTerminatingActionError, str, jcl) def testPortMirror(self): jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + PORTMIRROR_GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('port-mirror;', output, output) jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + PORTMIRROR_GOOD_TERM_2, self.naming), EXP_INFO) output = str(jcl) self.assertIn('port-mirror;', output, output) self.assertIn('count count-name;', output, output) self.assertIn('discard;', output, output) def testIcmpType(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) output = str(jcl) # verify proper translation from policy icmp-type text to juniper-esque self.assertIn(' icmp-type [', output, output) self.assertIn(' 0 ', output, output) self.assertIn(' 15 ', output, output) self.assertIn(' 10 ', output, output) self.assertIn(' 13 ', output, output) self.assertIn(' 16 ', output, output) self.assertIn('];', output, output) def testIcmpCode(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_35, self.naming), EXP_INFO) output = str(jcl) self.assertIn('icmp-code [ 3 4 ];', output, output) def testInactiveTerm(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_36, self.naming), EXP_INFO) output = str(jcl) self.assertIn('inactive: term good-term-36 {', output) def testInet6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/33')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_1_V6, self.naming), EXP_INFO) output = str(jcl) self.assertTrue('next-header icmp6;' in output and 'next-header tcp;' in output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testNotInterfaceSpecificHeader(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER_NOT_INTERFACE_SPECIFIC + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertNotIn('interface-specific;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testInterfaceSpecificHeader(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('interface-specific;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testFilterEnhancedModeHeader(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper( policy.ParsePolicy(GOOD_FILTER_ENHANCED_MODE_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('enhanced-mode;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testHopLimit(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_V6_HOP_LIMIT, self.naming), EXP_INFO) output = str(jcl) self.assertIn('hop-limit 25;', output, output) def testHopLimitInet(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_V6_HOP_LIMIT, self.naming), EXP_INFO) output = str(jcl) self.assertNotIn('hop-limit 25;', output, output) def testProtocolExcept(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_7, self.naming), EXP_INFO) output = str(jcl) self.assertIn('next-header-except tcp;', output, output) def testIcmpv6Except(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_20_V6, self.naming), EXP_INFO) output = str(jcl) self.assertIn('next-header-except icmp6;', output, output) def testProtocolCase(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) output = str(jcl) self.assertIn('protocol [ icmp tcp ];', output, output) def testPrefixList(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list {\W+foo_prefix_list;\W+}') dpfx_re = re.compile( r'destination-prefix-list {\W+bar_prefix_list;\W+baz_prefix_list;\W+}') output = str(jcl) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testPrefixListExcept(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_30, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list {\W+foo_prefix_list except;\W+}') dpfx_re = re.compile( r'destination-prefix-list {\W+bar_prefix_list except;\W+}') output = str(jcl) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testPrefixListMixed(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_31, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list {\W+foo_prefix;\W+' r'foo_except except;\W+}') dpfx_re = re.compile(r'destination-prefix-list {\W+bar_prefix;\W+' r'bar_except except;\W+}') output = str(jcl) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testEtherType(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_9, self.naming), EXP_INFO) output = str(jcl) self.assertIn('ether-type arp;', output, output) def testTrafficType(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10, self.naming), EXP_INFO) output = str(jcl) self.assertIn('traffic-type unknown-unicast;', output, output) def testVerbatimTerm(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) output = str(jcl) self.assertIn('mary had a little lamb', output, output) # check if other platforms verbatim shows up in output self.assertNotIn('mary had a second lamb', output, output) self.assertNotIn('mary had a third lamb', output, output) def testDscpByte(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_22 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('dscp b111000;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testDscpClass(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_23 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('dscp af42;', output, output) self.assertIn('dscp [ af41-af42 5 ];', output, output) self.assertIn('dscp-except [ be ];', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testDscpIPv6(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER_V6 + GOOD_TERM_23 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('traffic-class af42;', output, output) self.assertIn('traffic-class [ af41-af42 5 ];', output, output) self.assertIn('traffic-class-except [ be ];', output, output) self.assertNotIn('dscp', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testSimplifiedThenStatement(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_24 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('forwarding-class af1', output, output) self.assertIn('accept', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testSimplifiedThenStatementWithSingleAction(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_25 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('then accept;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testSimplifiedThenStatementWithSingleActionDiscardIPv4(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_26 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('then {', output, output) self.assertIn('discard;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testSimplifiedThenStatementWithSingleActionDiscardIPv6(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER_V6 + GOOD_TERM_26_V6 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('then discard;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testSimplifiedThenStatementWithSingleActionRejectIPv6(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER_V6 + GOOD_TERM_26_V6_REJECT jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('then {', output, output) self.assertIn('reject;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testTcpEstablished(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + ESTABLISHED_TERM_1 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('tcp-established', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testNonTcpWithTcpEstablished(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + BAD_TERM_1 pol_obj = policy.ParsePolicy(policy_text, self.naming) jcl = juniper.Juniper(pol_obj, EXP_INFO) self.assertRaises(juniper.TcpEstablishedWithNonTcpError, str, jcl) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')]) def testMixedFilterInetType(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('127.0.0.1'), nacaddr.IPv6('::1/128')] jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_12, self.naming), EXP_INFO) output = str(jcl) self.assertIn('test-filter4', output, output) self.assertIn('127.0.0.1', output, output) self.assertIn('test-filter6', output, output) self.assertIn('::1/128', output, output) self.naming.GetNetAddr.assert_called_once_with('LOCALHOST') def testRestrictAddressFamilyType(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('127.0.0.1'), nacaddr.IPv6('::1/128')] jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_37, self.naming), EXP_INFO) output = str(jcl) self.assertIn('127.0.0.1', output, output) self.assertNotIn('::1/128', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testBridgeFilterInetType(self): self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('127.0.0.1'), nacaddr.IPv6('::1/128')] jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER_BRIDGE + GOOD_TERM_12, self.naming), EXP_INFO) output = str(jcl) self.assertNotIn('::1/128', output, output) self.naming.GetNetAddr.assert_called_once_with('LOCALHOST') def testNoVerboseV4(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP('192.168.' + str(octet) + '.64/27') addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper( policy.ParsePolicy( GOOD_NOVERBOSE_V4_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn('192.168.0.64/27;', str(jcl)) self.assertNotIn('COMMENT', str(jcl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testNoVerboseV6(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IPv6('2001:db8:1010:' + str(octet) + '::64/64', strict=False) addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper( policy.ParsePolicy( GOOD_NOVERBOSE_V6_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn('2001:db8:1010:90::/61;', str(jcl)) self.assertNotIn('COMMENT', str(jcl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDsmo(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP('192.168.' + str(octet) + '.64/27') addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_DSMO_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) self.assertIn('192.168.0.64/255.255.0.224;', str(jcl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDsmoJuniperFriendly(self): addr_list = [nacaddr.IP('192.168.%d.0/24' % octet) for octet in range(256)] self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_DSMO_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) self.assertIn('192.168.0.0/16;', str(jcl)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDsmoExclude(self): big = nacaddr.IPv4('0.0.0.0/1') ip1 = nacaddr.IPv4('192.168.0.64/27') ip2 = nacaddr.IPv4('192.168.1.64/27') terms = (GOOD_TERM_18_SRC, GOOD_TERM_18_DST) self.naming.GetNetAddr.side_effect = [[big], [ip1, ip2]] * len(terms) mock_calls = [] for term in terms: jcl = juniper.Juniper( policy.ParsePolicy(GOOD_DSMO_HEADER + term, self.naming), EXP_INFO) self.assertIn('192.168.0.64/255.255.254.224 except;', str(jcl)) mock_calls.append(mock.call('INTERNAL')) mock_calls.append(mock.call('SOME_HOST')) self.naming.GetNetAddr.assert_has_calls(mock_calls) def testTermTypeIndexKeys(self): # ensure an _INET entry for each _TERM_TYPE entry self.assertEqual(sorted(juniper.Term._TERM_TYPE.keys()), sorted(juniper.Term.AF_MAP.keys())) def testRoutingInstance(self): jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_13, self.naming), EXP_INFO) output = str(jcl) self.assertIn('routing-instance EXTERNAL-NAT;', output, output) def testLossPriority(self): jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_14, self.naming), EXP_INFO) output = str(jcl) self.assertIn('loss-priority low;', output, output) def testPrecedence(self): self.naming.GetServiceByProto.return_value = ['22'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_15, self.naming), EXP_INFO) output = str(jcl) self.assertIn('precedence 7;', output, output) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testMultiplePrecedence(self): self.naming.GetServiceByProto.return_value = ['22'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_16, self.naming), EXP_INFO) output = str(jcl) self.assertIn('precedence [ 5 7 ];', output, output) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testFilterTerm(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_FILTER, self.naming), EXP_INFO) output = str(jcl) self.assertIn('filter my-filter;', output, output) def testFilterActionTerm(self): with self.assertRaises(policy.InvalidTermActionError): policy.ParsePolicy(GOOD_HEADER + BAD_TERM_FILTER, self.naming) def testArbitraryOptions(self): self.naming.GetServiceByProto.return_value = ['22'] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + OPTION_TERM_1, self.naming), EXP_INFO) output = str(jcl) self.assertIn('is-fragment;', output, output) self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') @mock.patch.object(juniper.logging, 'debug') def testIcmpv6InetMismatch(self, mock_debug): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + BAD_ICMPTYPE_TERM_1, self.naming), EXP_INFO) # output happens in __str_ str(jcl) mock_debug.assert_called_once_with( 'Term icmptype-mismatch will not be rendered,' ' as it has icmpv6 match specified but ' 'the ACL is of inet address family.') @mock.patch.object(juniper.logging, 'debug') def testIcmpInet6Mismatch(self, mock_debug): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + BAD_ICMPTYPE_TERM_2, self.naming), EXP_INFO) # output happens in __str_ str(jcl) mock_debug.assert_called_once_with( 'Term icmptype-mismatch will not be rendered,' ' as it has icmp match specified but ' 'the ACL is of inet6 address family.') @mock.patch.object(juniper.logging, 'warning') def testExpiredTerm(self, mock_warn): _ = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and will ' 'not be rendered.', 'is_expired', 'test-filter') @mock.patch.object(juniper.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testOwnerTerm(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_17, self.naming), EXP_INFO) output = str(jcl) self.assertIn(' /*\n' ' ** Owner: foo@google.com\n' ' */', output, output) def testAddressExclude(self): big = nacaddr.IPv4('0.0.0.0/1') ip1 = nacaddr.IPv4('10.0.0.0/8') ip2 = nacaddr.IPv4('172.16.0.0/12') terms = (GOOD_TERM_18_SRC, GOOD_TERM_18_DST) self.naming.GetNetAddr.side_effect = [[big, ip1, ip2], [ip1]] * len(terms) mock_calls = [] for term in terms: jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + term, self.naming), EXP_INFO) output = str(jcl) self.assertIn('10.0.0.0/8 except;', output, output) self.assertNotIn('10.0.0.0/8;', output, output) self.assertIn('172.16.0.0/12;', output, output) self.assertNotIn('172.16.0.0/12 except;', output, output) mock_calls.append(mock.call('INTERNAL')) mock_calls.append(mock.call('SOME_HOST')) self.naming.GetNetAddr.assert_has_calls(mock_calls) def testMinimizePrefixes(self): includes = ['1.0.0.0/8', '2.0.0.0/8'] excludes = ['1.1.1.1/32', '2.0.0.0/8', '3.3.3.3/32'] expected = ['1.0.0.0/8;', '1.1.1.1/32 except;'] unexpected = ['2.0.0.0/8;', '2.0.0.0/8 except;', '3.3.3.3/32'] self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4(ip) for ip in includes], [nacaddr.IPv4(ip) for ip in excludes]] jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming), EXP_INFO) output = str(jcl) for result in expected: self.assertIn(result, output, 'expected "%s" in %s' % (result, output)) for result in unexpected: self.assertNotIn(result, output, 'unexpected "%s" in %s' % (result, output)) self.naming.GetNetAddr.assert_has_calls([ mock.call('INCLUDES'), mock.call('EXCLUDES')]) def testNoMatchReversal(self): includes = ['10.0.0.0/8', '10.0.0.0/10'] excludes = ['10.0.0.0/9'] expected = ['10.0.0.0/8;', '10.0.0.0/10;', '10.0.0.0/9 except;'] self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4(ip) for ip in includes], [nacaddr.IPv4(ip) for ip in excludes]] jcl = juniper.Juniper( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming), EXP_INFO) output = str(jcl) for result in expected: self.assertIn(result, output) def testConfigHelper(self): config = juniper.Config() config.Append('test {') config.Append('blah {') config.Append('foo;') config.Append('bar;') config.Append('}') # close blah{} config.Append(' Mr. T Pities the fool!', verbatim=True) # haven't closed everything yet self.assertRaises(juniper.JuniperIndentationError, lambda: str(config)) config.Append('}') # close test{} self.assertMultiLineEqual(str(config), 'test {\n' ' blah {\n' ' foo;\n' ' bar;\n' ' }\n' ' Mr. T Pities the fool!\n' '}') # one too many '}' self.assertRaises(juniper.JuniperIndentationError, lambda: config.Append('}')) def testForwardingClass(self): policy_text = GOOD_HEADER + GOOD_TERM_27 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('forwarding-class Floop;', output, output) def testForwardingClassExcept(self): policy_text = GOOD_HEADER + GOOD_TERM_32 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('forwarding-class-except floop;', output, output) def testTrafficClassCount(self): policy_text = GOOD_HEADER + GOOD_TERM_34 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('traffic-class-count floop;', output, output) def testFragmentOffset(self): policy_text = GOOD_HEADER + FRAGOFFSET_TERM jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('fragment-offset 1-7;', output, output) def testMultipleForwardingClass(self): policy_text = GOOD_HEADER + GOOD_TERM_29 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('forwarding-class [ floop fluup fleep ];', output, output) def testMultipleForwardingClassExcept(self): policy_text = GOOD_HEADER + GOOD_TERM_33 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(jcl) self.assertIn('forwarding-class-except [ floop fluup fleep ];', output, output) def testLongPolicer(self): with mock.patch.object(juniper.logging, 'warning', spec=logging.warn) as warn: policy_text = GOOD_HEADER + LONG_POLICER_TERM_1 jcl = juniper.Juniper(policy.ParsePolicy(policy_text, self.naming), EXP_INFO) _ = str(jcl) warn.assert_called_with('WARNING: %s is longer than %d bytes. Due to' ' limitation in JUNOS, OIDs longer than %dB' ' can cause SNMP timeout issues.', 'this-is-very' '-very-very-very-very-very-very-very-very-very' '-very-very-very-very-very-very-very-very-very' '-very-very-very-very-very-very-very-very-very' '-very-very-long', 128, 128) def testNextIp(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) output = str(jcl) self.assertIn( ('next-ip 10.1.1.1/32'), output) self.naming.GetNetAddr.assert_called_once_with('TEST_NEXT') def testTTL(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming), EXP_INFO) output = str(jcl) self.assertIn('ttl 10;', output) def testTTLInet6(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_21, self.naming), EXP_INFO) output = str(jcl) self.assertNotIn('ttl 10;', output) def testNextIpFormat(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) output = str(jcl) self.assertIn( (' then {\n' ' next-ip 10.1.1.1/32;\n' ' }'), output) self.naming.GetNetAddr.assert_called_once_with('TEST_NEXT') def testNextIpv6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/128')] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) output = str(jcl) self.assertIn( ('next-ip6 2001::/128;'), output) self.naming.GetNetAddr.assert_called_once_with('TEST_NEXT') def testFailNextIpMultipleIP(self): self.naming.GetNetAddr.return_value = [ nacaddr.IP('10.1.1.1/32'), nacaddr.IP('192.168.1.1/32')] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperNextIpError, str, jcl) self.naming.GetNetAddr.assert_called_once_with('TEST_NEXT') def testFailNextIpNetworkIP(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/26', strict=False)] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) self.assertRaises(juniper.JuniperNextIpError, str, jcl) self.naming.GetNetAddr.assert_called_once_with('TEST_NEXT') def testBuildTokens(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/26', strict=False)] jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) st, sst = jcl._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_28, self.naming), EXP_INFO) st, sst = jcl._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testHopOptProtocol(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + HOPOPT_TERM, self.naming), EXP_INFO) output = str(jcl) self.assertIn('protocol hop-by-hop;', output, output) self.assertNotIn('protocol hopopt;', output, output) def testHopOptProtocolExcept(self): jcl = juniper.Juniper(policy.ParsePolicy(GOOD_HEADER + HOPOPT_TERM_EXCEPT, self.naming), EXP_INFO) output = str(jcl) self.assertIn('protocol-except hop-by-hop;', output, output) self.assertNotIn('protocol-except hopopt;', output, output) def testFlexibleMatch(self): jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER + GOOD_FLEX_MATCH_TERM, self.naming), EXP_INFO) output = str(jcl) flexible_match_expected = [ 'flexible-match-range {', 'bit-length 8;', 'range 0x08;', 'match-start payload;', 'byte-offset 16;', 'bit-offset 7;' ] self.assertEqual(all([x in output for x in flexible_match_expected]), True) def testFlexibleMatchIPv6(self): jcl = juniper.Juniper(policy.ParsePolicy( GOOD_HEADER_V6 + GOOD_FLEX_MATCH_TERM, self.naming), EXP_INFO) output = str(jcl) flexible_match_expected = [ 'flexible-match-range {', 'bit-length 8;', 'range 0x08;', 'match-start payload;', 'byte-offset 16;', 'bit-offset 7;' ] self.assertEqual(all([x in output for x in flexible_match_expected]), True) def testFailIsFragmentInV6(self): self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(GOOD_HEADER_V6 + OPTION_TERM_1, self.naming) self.assertRaises(juniper.JuniperFragmentInV6Error, juniper.Juniper, pol, EXP_INFO) def testFailFlexibleMatch(self): # bad bit-length self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER + BAD_FLEX_MATCH_TERM_1, self.naming) self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER_V6 + BAD_FLEX_MATCH_TERM_1, self.naming) # bad match-start self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER + BAD_FLEX_MATCH_TERM_2, self.naming) self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER_V6 + BAD_FLEX_MATCH_TERM_2, self.naming) # bad byte-offset self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER + BAD_FLEX_MATCH_TERM_3, self.naming) self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER_V6 + BAD_FLEX_MATCH_TERM_3, self.naming) # bad bit-offset self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER + BAD_FLEX_MATCH_TERM_4, self.naming) self.assertRaises(policy.FlexibleMatchError, policy.ParsePolicy, GOOD_HEADER_V6 + BAD_FLEX_MATCH_TERM_4, self.naming) @parameterized.named_parameters( ('MIXED_TO_V4', [[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }' ], ['2001::/33']), ('V4_TO_MIXED', [ [nacaddr.IPv4('192.168.0.0/24')], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], ], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 192.168.0.0/24;\n' + ' }\n' + ' destination-address {\n' + ' 0.0.0.0/1;\n' + ' }' ], ['2001::/33']), ('MIXED_TO_V6', [[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], ['0.0.0.0/1']), ('V6_TO_MIXED', [[ nacaddr.IPv6('2201::/48') ], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 2201::/48;\n' + ' }\n' + ' destination-address {\n' + ' 2001::/33;\n' + ' }' ], ['0.0.0.0/1']), ('MIXED_TO_MIXED', [[ nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33') ], [nacaddr.IPv4('192.168.0.0/24'), nacaddr.IPv6('2201::/48')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }', ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], []), ('V4_TO_V4', [[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv4('192.168.0.0/24')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }' ], []), ('V6_TO_V6', [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], [ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], []), ( 'V4_TO_V6', [[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv6('2201::/48')]], [], ['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), ( 'V6_TO_V4', [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], [], ['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), ( 'PARTLY_UNSPECIFIED', [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], ['term good_term_25 '], [ '0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48', 'term good-term-both-icmp-and-icmpv6-' ], ), ) def testMixed(self, addresses, expected, notexpected): self.naming.GetNetAddr.side_effect = addresses self.naming.GetServiceByProto.return_value = ['25'] jcl = juniper.Juniper( policy.ParsePolicy( GOOD_HEADER_MIXED + MIXED_TESTING_TERM + GOOD_TERM_25, self.naming), EXP_INFO) output = str(jcl) for expect in expected: self.assertIn(expect, output, output) for notexpect in notexpected: self.assertNotIn(notexpect, output, output) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/juniperevo_test.py000066400000000000000000000345121437377527500206640ustar00rootroot00000000000000# Copyright 2022 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for juniper evo acl rendering module.""" from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import juniperevo from capirca.lib import naming from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: juniperevo test-filter inet6 ingress } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: juniperevo test-filter inet6 ingress loopback } """ GOOD_HEADER_3 = """ header { comment:: "this is a test acl" target:: juniperevo test-filter inet6 egress physical } """ GOOD_HEADER_4 = """ header { comment:: "this is a test acl" target:: juniperevo test-filter inet6 egress loopback } """ BAD_HEADER_1 = """ header { comment:: "this is a test acl" target:: juniperevo test-filter inet6 } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: hopopt action:: deny } """ GOOD_TERM_2 = """ term good-term-2 { protocol-except:: hopopt action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: fragment action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { protocol-except:: fragment action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { protocol:: tcp action:: accept } """ GOOD_TERM_6 = """ term good-term-6 { protocol-except:: tcp action:: accept } """ SUPPORTED_TOKENS = frozenset([ 'action', 'address', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_prefix', 'destination_prefix_except', 'dscp_except', 'dscp_match', 'dscp_set', 'encapsulate', 'ether_type', 'expiration', 'filter_term', 'flexible_match_range', 'forwarding_class', 'forwarding_class_except', 'fragment_offset', 'hop_limit', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'loss_priority', 'name', 'next_ip', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'policer', 'port', 'port_mirror', 'precedence', 'protocol', 'protocol_except', 'qos', 'restrict_address_family', 'routing_instance', 'source_address', 'source_address_exclude', 'source_port', 'source_prefix', 'source_prefix_except', 'traffic_class_count', 'traffic_type', 'translated', 'ttl', 'verbatim']) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'inactive', 'is-fragment', '.*', # not actually a lex token! 'sample', 'tcp-established', 'tcp-initial'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class JuniperEvoTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testIPv6HopOptProtocolIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO)) self.assertIn('next-header hop-by-hop;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('payload-protocol hop-by-hop;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('next-header 0;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('payload-protocol 0;', output, 'missing or incorrect HOPOPT specification') def testIPv6HopOptProtocolExceptIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO)) self.assertIn('next-header-except hop-by-hop;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('payload-protocol-except hop-by-hop;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('next-header-except 0;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('payload-protocol-except 0;', output, 'missing or incorrect HOPOPT specification') def testIPv6FragmentProtocolIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO)) self.assertIn('next-header fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol 44;', output, 'missing or incorrect IPv6-Frag specification') def testIPv6FragmentProtocolExceptIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_4, self.naming), EXP_INFO)) self.assertIn('next-header-except fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol-except fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header-except 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol-except 44;', output, 'missing or incorrect IPv6-Frag specification') def testIPv6TcpProtocolIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_5, self.naming), EXP_INFO)) self.assertIn('payload-protocol tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('payload-protocol 6;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header 6;', output, 'missing or incorrect TCP specification') def testIPv6TcpProtocolExceptIngressPhysical(self): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_6, self.naming), EXP_INFO)) self.assertIn('payload-protocol-except tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header-except tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('payload-protocol-except 6;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header-except 6;', output, 'missing or incorrect TCP specification') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6HopOptProtocol(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_1, self.naming), EXP_INFO)) self.assertIn('payload-protocol 0;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('next-header 0;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('payload-protocol hop-by-hop;', output, 'missing or incorrect HOPOPT specification') self.assertNotIn('next-header hop-by-hop;', output, 'missing or incorrect HOPOPT specification') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6HopOptProtocolExcept(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_2, self.naming), EXP_INFO)) self.assertIn('payload-protocol-except 0;', output, 'missing or incorrect HOPOPT specifications') self.assertNotIn('next-header-except 0;', output, 'missing or incorrect HOPOPT specifications') self.assertNotIn('payload-protocol-except hop-by-hop;', output, 'missing or incorrect HOPOPT specifications') self.assertNotIn('next-header-except hop-by-hop;', output, 'missing or incorrect HOPOPT specifications') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6FragmentProtocol(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_3, self.naming), EXP_INFO)) self.assertIn('payload-protocol 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header fragment;', output, 'missing or incorrect IPv6-Frag specification') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6FragmentProtocolExcept(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_4, self.naming), EXP_INFO)) self.assertIn('payload-protocol-except 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header-except 44;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('payload-protocol-except fragment;', output, 'missing or incorrect IPv6-Frag specification') self.assertNotIn('next-header-except fragment;', output, 'missing or incorrect IPv6-Frag specification') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6TcpProtocol(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_5, self.naming), EXP_INFO)) self.assertIn('payload-protocol tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('payload-protocol 6;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header 6;', output, 'missing or incorrect TCP specification') @parameterized.named_parameters( ('IngressLoopback', GOOD_HEADER_2), ('EgressPhysical', GOOD_HEADER_3), ('EgressLoopback', GOOD_HEADER_4), ) def testIPv6TcpProtocolExcept(self, header): output = str( juniperevo.JuniperEvo( policy.ParsePolicy( header + GOOD_TERM_6, self.naming), EXP_INFO)) self.assertIn('payload-protocol-except tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header-except tcp;', output, 'missing or incorrect TCP specification') self.assertNotIn('payload-protocol-except 6;', output, 'missing or incorrect TCP specification') self.assertNotIn('next-header-except 6;', output, 'missing or incorrect TCP specification') def testIPv6FilterWithNoDirection(self): evojcl = juniperevo.JuniperEvo( policy.ParsePolicy(BAD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) self.assertRaises(juniperevo.FilterDirectionError, str, evojcl) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/junipermsmpc_test.py000066400000000000000000002046641437377527500212210ustar00rootroot00000000000000# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unittest for junipermsmpc acl rendering module.""" import datetime import re from absl.testing import absltest from unittest import mock from absl.testing import parameterized from capirca.lib import junipermsmpc from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: msmpc test-filter inet } """ GOOD_HEADER_V6 = """ header { target:: msmpc test-filter inet6 } """ GOOD_HEADER_MIXED = """ header { target:: msmpc test-filter mixed } """ GOOD_HEADER_MIXED_IMPLICIT = """ header { target:: msmpc test-filter } """ GOOD_NOVERBOSE_V4_HEADER = """ header { target:: msmpc test-filter noverbose inet } """ GOOD_NOVERBOSE_V6_HEADER = """ header { target:: msmpc test-filter inet6 noverbose } """ GOOD_HEADER_INGRESS = """ header { comment:: "this is a test acl" target:: msmpc test-filter ingress inet } """ GOOD_HEADER_EGRESS = """ header { comment:: "this is a test acl" target:: msmpc test-filter egress inet } """ BAD_HEADER_DIRECTION = """ header { comment:: "this is a test acl" target:: msmpc test-filter ingress egress inet } """ BAD_HEADER = """ header { comment:: "this is a test acl" target:: cisco test-filter } """ BAD_HEADER_2 = """ header { target:: msmpc test-filter inetpoop } """ BAD_HEADER_3 = """ header { target:: msmpc test-filter inet inet6 } """ EXPIRED_TERM = """ term is_expired { expiration:: 2001-01-01 action:: accept } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_1_V6 = """ term good-term-1 { protocol:: icmpv6 action:: accept } term good-term-2 { protocol:: tcp destination-port:: SMTP destination-address:: SOME_HOST action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: icmp icmp-type:: echo-reply information-reply information-request icmp-type:: router-solicitation timestamp-request action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { protocol:: icmp protocol:: tcp action:: accept } """ GOOD_TERM_8 = """ term good-term-8 { source-prefix:: foo_prefix_list destination-prefix:: bar_prefix_list baz_prefix_list action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { verbatim:: msmpc "mary had a little lamb" verbatim:: iptables "mary had a second lamb" verbatim:: cisco "mary had a third lamb" verbatim:: juniper "mary had a fourth lamb" } """ GOOD_TERM_17 = """ term owner-term { owner:: foo@google.com action:: accept } """ GOOD_TERM_18_SRC = """ term address-exclusions { source-address:: INTERNAL source-exclude:: SOME_HOST action:: accept } """ GOOD_TERM_18_DST = """ term address-exclusions { destination-address:: INTERNAL destination-exclude:: SOME_HOST action:: accept } """ GOOD_TERM_19 = """ term minimize-prefix-list { source-address:: INCLUDES source-exclude:: EXCLUDES action:: accept } """ GOOD_TERM_25 = """ term good_term_25 { protocol:: tcp source-port:: DNS action:: accept } """ GOOD_TERM_26 = """ term good_term_26 { protocol:: tcp source-port:: DNS action:: deny } """ GOOD_TERM_26_V6 = """ term good_term_26-v6 { protocol:: tcp source-port:: DNS action:: deny } """ GOOD_TERM_26_V6_REJECT = """ term good_term_26-v6 { protocol:: tcp source-port:: DNS action:: reject } """ GOOD_TERM_30 = """ term good-term-30 { source-prefix-except:: foo_prefix_list destination-prefix-except:: bar_prefix_list action:: accept } """ GOOD_TERM_31 = """ term good-term-31 { source-prefix:: foo_prefix source-prefix-except:: foo_except destination-prefix:: bar_prefix destination-prefix-except:: bar_except action:: accept } """ GOOD_TERM_35 = """ term good_term_35 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_36 = """ term good-term-36 { protocol:: tcp destination-address:: SOME_HOST destination-address:: SOME_HOST option:: inactive action:: accept } """ GOOD_TERM_NUMERIC_PROTOCOL = """ term good-term-numeric { protocol:: %s action:: accept } """ GOOD_TERM_COMMENT = """ term good-term-comment { comment:: "This is a COMMENT" action:: accept } """ ESTABLISHED_TERM_1 = """ term established-term-1 { protocol:: tcp source-port:: DNS option:: established action:: accept } """ BAD_ICMPTYPE_TERM_1 = """ term icmptype-mismatch { comment:: "error when icmpv6 paired with inet filter" protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ BAD_ICMPTYPE_TERM_2 = """ term icmptype-mismatch { comment:: "error when icmp paired with inet6 filter" protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } """ DEFAULT_TERM_1 = """ term default-term-1 { action:: deny } """ RANGE_PORTS_TERM = """ term ranged-ports-1 { protocol:: udp destination-port:: BOOTPS destination-port:: BOOTPC action:: accept } """ MIXED_TESTING_TERM = """ term good-term { protocol:: tcp source-address:: SOME_HOST destination-port:: SMTP destination-address:: SOME_OTHER_HOST action:: accept } """ MIXED_TESTING_TERM_ICMP = """ term good-term-icmp { protocol:: icmp source-address:: SOME_HOST destination-address:: SOME_OTHER_HOST action:: accept } term good-term-icmp-2 { protocol:: icmp action:: accept } term good-term-icmpv6 { protocol:: icmpv6 source-address:: SOME_HOST destination-address:: SOME_OTHER_HOST action:: accept } term good-term-icmpv6-2 { protocol:: icmpv6 action:: accept } term good-term-both-icmp-and-icmpv6 { protocol:: icmp protocol:: icmpv6 source-address:: SOME_HOST destination-address:: SOME_OTHER_HOST action:: accept } term good-term-both-icmp-and-icmpv6-2 { protocol:: icmp protocol:: icmpv6 source-address:: SOME_HOST destination-address:: SOME_OTHER_HOST action:: accept } """ LOGGING_TERM = """ term good-term-1 { protocol:: icmp action:: accept logging:: %s } """ TERM_NAME_COLLISION = """ term good-term-1%s { protocol:: icmp action:: accept } term hood-term-1%s { protocol:: tcp action:: accept } """ SUPPORTED_TOKENS = frozenset([ 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_prefix', 'destination_prefix_except', 'expiration', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'source_prefix', 'source_prefix_except', 'translated', 'verbatim' ]) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': { 'established', 'inactive', '.*', # not actually a lex token! 'tcp-established', } } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class JuniperMSMPCTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testTermAndFilterName(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('term good-term-1 {', output, output) self.assertIn('rule test-filter {', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testBadFilterType(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(BAD_HEADER_2 + GOOD_TERM_1, self.naming) self.assertRaises(junipermsmpc.UnsupportedHeaderError, junipermsmpc.JuniperMSMPC, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testMultipleFilterType(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(BAD_HEADER_3 + GOOD_TERM_1, self.naming) self.assertRaises(junipermsmpc.ConflictingTargetOptionsError, junipermsmpc.JuniperMSMPC, pol, EXP_INFO) def testMixedv4(self): self.naming.GetNetAddr.return_value = ([nacaddr.IPv4('192.168.0.0/24')]) self.naming.GetServiceByProto.return_value = ['25'] expected = (' term good-term-2 {\n' + ' from {\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expected, output, output) def testMixedv6(self): self.naming.GetNetAddr.return_value = ([nacaddr.IPv6('2001::/33')]) self.naming.GetServiceByProto.return_value = ['25'] expected = (' term good-term-2 {\n' + ' from {\n' + ' destination-address {\n' + ' 2001::/33;\n' + ' }') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expected, output, output) def testMixedBoth(self): self.naming.GetNetAddr.return_value = ([ nacaddr.IPv4('192.168.0.0/24'), nacaddr.IPv6('2001::/33') ]) self.naming.GetServiceByProto.return_value = ['25'] expectedv4 = (' term good-term-2-inet {\n' + ' from {\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }') expectedv6 = (' term good-term-2-inet6 {\n' + ' from {\n' + ' destination-address {\n' + ' 2001::/33;\n' + ' }') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expectedv4, output, output) self.assertIn(expectedv6, output, output) def testCommentShrinking(self): long_comment = ' this is a very descriptive comment ' * 10 expected = (' ' * 32 + '/* this is a very descriptive comment this\n' + ' ' * 33 + '** is a very descriptive comment this is a\n' + ' ' * 33 + '** very descriptive comment this is a very\n' + ' ' * 33 + '** descript */') self.naming.GetNetAddr.return_value = ([ nacaddr.IPv4('10.0.0.0/8', comment=long_comment) ]) self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expected, output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDefaultDeny(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertNotIn('from {', output, output) self.assertIn('discard;', output, output) def testIcmpType(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) output = str(msmpc) # verify proper translation from policy icmp-type text to juniper-esque self.assertIn('icmp-type 0;', output, output) self.assertIn('icmp-type 15;', output, output) self.assertIn('icmp-type 10;', output, output) self.assertIn('icmp-type 13;', output, output) self.assertIn('icmp-type 16;', output, output) def testIcmpCode(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_35, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('icmp-code [ 3 4 ];', output, output) def testInactiveTerm(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_36, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('inactive: term good-term-36 {', output) def testInet6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('2001::/33')] self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_V6 + GOOD_TERM_1_V6, self.naming), EXP_INFO) output = str(msmpc) self.assertTrue('protocol icmp6;' in output and 'protocol tcp;' in output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testProtocolCase(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5, self.naming), EXP_INFO) output = str(msmpc) expected_output = ( ' application test-filtergood-term-5-app1 {\n' + ' protocol icmp;\n' + ' }\n' + ' application test-filtergood-term-5-app2 {\n' + ' protocol tcp;\n' + ' destination-port 1-65535;\n' + ' }') self.assertIn(expected_output, output, output) def testPrefixList(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_8, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list foo_prefix_list;') dpfx_re = re.compile( r'destination-prefix-list bar_prefix_list;\W+destination-prefix-list baz_prefix_list;' ) output = str(msmpc) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testPrefixListExcept(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_30, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list foo_prefix_list except;') dpfx_re = re.compile(r'destination-prefix-list bar_prefix_list except;') output = str(msmpc) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testPrefixListMixed(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_31, self.naming), EXP_INFO) spfx_re = re.compile(r'source-prefix-list foo_prefix;\W+' r'source-prefix-list foo_except except;') dpfx_re = re.compile(r'destination-prefix-list bar_prefix;\W+' r'destination-prefix-list bar_except except;') output = str(msmpc) self.assertTrue(spfx_re.search(output), output) self.assertTrue(dpfx_re.search(output), output) def testVerbatimTerm(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('mary had a little lamb', output, output) # check if other platforms verbatim shows up in output self.assertNotIn('mary had a second lamb', output, output) self.assertNotIn('mary had a third lamb', output, output) self.assertNotIn('mary had a fourth lamb', output, output) def testAccept(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_25 msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('then {', output, output) self.assertIn('accept;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testDiscardIPv4(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + GOOD_TERM_26 msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('then {', output, output) self.assertIn('discard;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testDiscardIPv6(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER_V6 + GOOD_TERM_26_V6 msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('then {', output, output) self.assertIn('discard;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testRejectIPv6(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER_V6 + GOOD_TERM_26_V6_REJECT msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(msmpc) self.assertIn('then {', output, output) self.assertIn('reject;', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testTcpEstablished(self): self.naming.GetServiceByProto.return_value = ['53'] policy_text = GOOD_HEADER + ESTABLISHED_TERM_1 msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(policy_text, self.naming), EXP_INFO) output = str(msmpc) self.assertNotIn('term established-term-1', output, output) self.assertNotIn('tcp-established', output, output) self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testStatelessReply(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.1/32')] self.naming.GetServiceByProto.return_value = ['25'] ret = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming) _, terms = ret.filters[0] for term in terms: if term.protocol[0] == 'icmp': term.stateless_reply = True msmpc = junipermsmpc.JuniperMSMPC(ret, EXP_INFO) output = str(msmpc) self.assertNotIn('term good-term-1 {', output, output) self.assertIn('term good-term-2 {', output, output) def testNoVerboseV4(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IP('192.168.' + str(octet) + '.64/27') addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy( GOOD_NOVERBOSE_V4_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn('192.168.0.64/27;', str(msmpc)) self.assertNotIn('COMMENT', str(msmpc)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testNoVerboseV6(self): addr_list = list() for octet in range(0, 256): net = nacaddr.IPv6( '2001:db8:1010:' + str(octet) + '::64/64', strict=False) addr_list.append(net) self.naming.GetNetAddr.return_value = addr_list self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy( GOOD_NOVERBOSE_V6_HEADER + GOOD_TERM_1 + GOOD_TERM_COMMENT, self.naming), EXP_INFO) self.assertIn('2001:db8:1010:90::/61;', str(msmpc)) self.assertNotIn('COMMENT', str(msmpc)) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testTermTypeIndexKeys(self): # ensure an _INET entry for each _TERM_TYPE entry self.assertCountEqual(junipermsmpc.Term._TERM_TYPE.keys(), junipermsmpc.Term.AF_MAP.keys()) @mock.patch.object(junipermsmpc.logging, 'debug') def testIcmpv6InetMismatch(self, mock_debug): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + BAD_ICMPTYPE_TERM_1, self.naming), EXP_INFO) # output happens in __str_ str(msmpc) mock_debug.assert_called_once_with( 'Term icmptype-mismatch will not be rendered,' ' as it has icmpv6 match specified but ' 'the ACL is of inet address family.') @mock.patch.object(junipermsmpc.logging, 'debug') def testIcmpInet6Mismatch(self, mock_debug): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_V6 + BAD_ICMPTYPE_TERM_2, self.naming), EXP_INFO) # output happens in __str_ str(msmpc) mock_debug.assert_called_once_with( 'Term icmptype-mismatch will not be rendered,' ' as it has icmp match specified but ' 'the ACL is of inet6 address family.') @mock.patch.object(junipermsmpc.logging, 'warning') def testExpiredTerm(self, mock_warn): _ = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and will ' 'not be rendered.', 'is_expired', 'test-filter') @mock.patch.object(junipermsmpc.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = junipermsmpc.JuniperMSMPC( policy.ParsePolicy( GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testOwnerTerm(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_17, self.naming), EXP_INFO) output = str(msmpc) self.assertIn( ' /*\n' ' ** Owner: foo@google.com\n' ' */', output, output) def testOwnerNoVerboseTerm(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_NOVERBOSE_V4_HEADER + GOOD_TERM_17, self.naming), EXP_INFO) output = str(msmpc) self.assertNotIn('** Owner: ', output, output) def testAddressExclude(self): big = nacaddr.IPv4('0.0.0.0/1', comment='half of everything') ip1 = nacaddr.IPv4('10.0.0.0/8', comment='RFC1918 10-net') ip2 = nacaddr.IPv4('172.16.0.0/12', comment='RFC1918 172-net') terms = (GOOD_TERM_18_SRC, GOOD_TERM_18_DST) self.naming.GetNetAddr.side_effect = [[big, ip1, ip2], [ip1]] * len(terms) mock_calls = [] for term in terms: msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + term, self.naming), EXP_INFO) output = str(msmpc) expected_output = ( ' ' + ('source' if term == GOOD_TERM_18_SRC else 'destination') + '-address {\n' + ' /* half of everything, RFC1918 ' '10-net */\n' + ' 0.0.0.0/1;\n' + ' /* RFC1918 172-net */\n' + ' 172.16.0.0/12;\n' + ' /* RFC1918 10-net */\n' + ' 10.0.0.0/8 except;\n' + ' }') self.assertIn(expected_output, output, output) self.assertNotIn('10.0.0.0/8;', output, output) self.assertNotIn('172.16.0.0/12 except;', output, output) mock_calls.append(mock.call('INTERNAL')) mock_calls.append(mock.call('SOME_HOST')) self.naming.GetNetAddr.assert_has_calls(mock_calls) def testMinimizePrefixes(self): includes = ['1.0.0.0/8', '2.0.0.0/8'] excludes = ['1.1.1.1/32', '2.0.0.0/8', '3.3.3.3/32'] expected = ['1.0.0.0/8;', '1.1.1.1/32 except;'] unexpected = ['2.0.0.0/8;', '2.0.0.0/8 except;', '3.3.3.3/32'] self.naming.GetNetAddr.side_effect = [[nacaddr.IPv4(ip) for ip in includes], [nacaddr.IPv4(ip) for ip in excludes]] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming), EXP_INFO) output = str(msmpc) for result in expected: self.assertIn(result, output, 'expected "%s" in %s' % (result, output)) for result in unexpected: self.assertNotIn(result, output, 'unexpected "%s" in %s' % (result, output)) self.naming.GetNetAddr.assert_has_calls( [mock.call('INCLUDES'), mock.call('EXCLUDES')]) def testNoMatchReversal(self): includes = ['10.0.0.0/8', '10.0.0.0/10'] excludes = ['10.0.0.0/9'] expected = ['10.0.0.0/8;', '10.0.0.0/10;', '10.0.0.0/9 except;'] self.naming.GetNetAddr.side_effect = [[nacaddr.IPv4(ip) for ip in includes], [nacaddr.IPv4(ip) for ip in excludes]] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming), EXP_INFO) output = str(msmpc) for result in expected: self.assertIn(result, output) def testBuildTokens(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_35, self.naming), EXP_INFO) st, sst = msmpc._BuildTokens() self.assertSetEqual(st, SUPPORTED_TOKENS) self.assertDictEqual(sst, SUPPORTED_SUB_TOKENS) def testRangedPorts(self): self.naming.GetServiceByProto.side_effect = [['67'], ['68']] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + RANGE_PORTS_TERM, self.naming), EXP_INFO) self.assertIn('destination-port 67-68;', str(msmpc)) def testNotRangedPorts(self): self.naming.GetServiceByProto.side_effect = [['67'], ['69']] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + RANGE_PORTS_TERM, self.naming), EXP_INFO) self.assertNotIn('destination-port 67-68;', str(msmpc)) self.assertIn('destination-port 67;', str(msmpc)) self.assertIn('destination-port 69;', str(msmpc)) def testApplicationSets(self): self.naming.GetServiceByProto.side_effect = [['67'], ['69']] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + RANGE_PORTS_TERM, self.naming), EXP_INFO) expected = (' applications {\n' ' application test-filterranged-ports-1-app1 {\n' ' protocol udp;\n' ' destination-port 67;\n' ' }\n' ' application test-filterranged-ports-1-app2 {\n' ' protocol udp;\n' ' destination-port 69;\n' ' }\n' ' application-set test-filterranged-ports-1-app {\n' ' application test-filterranged-ports-1-app1;\n' ' application test-filterranged-ports-1-app2;\n' ' }\n' ' }\n') self.assertIn(expected, str(msmpc)) def testGroup(self): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1, self.naming), EXP_INFO) self.assertEqual('b;', msmpc._Group(['B'])) self.assertEqual('B;', msmpc._Group(['B'], lc=False)) self.assertEqual('b;', msmpc._Group(['B'], lc=True)) self.assertEqual('100;', msmpc._Group([100])) self.assertEqual('100-200;', msmpc._Group([(100, 200)])) self.assertEqual('[ b a ];', msmpc._Group(['b', 'A'])) self.assertEqual('[ 99 101-199 ];', msmpc._Group([99, (101, 199)])) self.assertEqual('[ 99 101-199 ];', msmpc._Group([99, (101, 199)])) @parameterized.named_parameters( dict( testcase_name='MIXED_TO_V4', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term-inet {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }' ], notexpected=['2001::/33']), dict( testcase_name='V4_TO_MIXED', addresses=[ [nacaddr.IPv4('192.168.0.0/24')], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], ], expected=[ ' term good-term-inet {\n' + ' from {\n' + ' source-address {\n' + ' 192.168.0.0/24;\n' + ' }\n' + ' destination-address {\n' + ' 0.0.0.0/1;\n' + ' }' ], notexpected=['2001::/33']), dict( testcase_name='MIXED_TO_V6', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], expected=[ ' term good-term-inet6 {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], notexpected=['0.0.0.0/1']), dict( testcase_name='V6_TO_MIXED', addresses=[[nacaddr.IPv6('2201::/48')], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')]], expected=[ ' term good-term-inet6 {\n' + ' from {\n' + ' source-address {\n' + ' 2201::/48;\n' + ' }\n' + ' destination-address {\n' + ' 2001::/33;\n' + ' }' ], notexpected=['0.0.0.0/1']), dict( testcase_name='MIXED_TO_MIXED', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [ nacaddr.IPv4('192.168.0.0/24'), nacaddr.IPv6('2201::/48') ]], expected=[ ' term good-term-inet {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }', ' term good-term-inet6 {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], notexpected=[]), dict( testcase_name='V4_TO_V4', addresses=[[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 0.0.0.0/1;\n' + ' }\n' + ' destination-address {\n' + ' 192.168.0.0/24;\n' + ' }' ], notexpected=[]), dict( testcase_name='V6_TO_V6', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], expected=[ ' term good-term {\n' + ' from {\n' + ' source-address {\n' + ' 2001::/33;\n' + ' }\n' + ' destination-address {\n' + ' 2201::/48;\n' + ' }' ], notexpected=[]), dict( testcase_name='V4_TO_V6', addresses=[[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv6('2201::/48')]], expected=[], notexpected=['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), dict( testcase_name='V6_TO_V4', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[], notexpected=['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), dict( testcase_name='PARTLY_UNSPECIFIED', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=['term good_term_25 '], notexpected=[ '0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48', 'term good-term-both-icmp-and-icmpv6-' ], ), ) def testMixed(self, addresses, expected, notexpected): self.naming.GetNetAddr.side_effect = addresses self.naming.GetServiceByProto.return_value = ['25'] msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy( GOOD_HEADER_MIXED + MIXED_TESTING_TERM + GOOD_TERM_25, self.naming), EXP_INFO) output = str(msmpc) for expect in expected: self.assertIn(expect, output, output) for notexpect in notexpected: self.assertNotIn(notexpect, output, output) @parameterized.named_parameters( dict( testcase_name='MIXED_TO_V4', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term-icmp-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=['2001::/33']), dict( testcase_name='V4_TO_MIXED', addresses=[ [nacaddr.IPv4('192.168.0.0/24')], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], ], expected=[ ' term good-term-icmp-inet {\n' ' from {\n' ' source-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' destination-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet {\n' ' from {\n' ' source-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' destination-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet {\n' ' from {\n' ' source-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' destination-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=['2001::/33']), dict( testcase_name='MIXED_TO_V6', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=['0.0.0.0/1']), dict( testcase_name='V6_TO_MIXED', addresses=[[nacaddr.IPv6('2201::/48')], [nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2201::/48;\n' ' }\n' ' destination-address {\n' ' 2001::/33;\n' ' }\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2201::/48;\n' ' }\n' ' destination-address {\n' ' 2001::/33;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet6 {\n' ' from {\n' ' source-address {\n' ' 2201::/48;\n' ' }\n' ' destination-address {\n' ' 2001::/33;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=['0.0.0.0/1']), dict( testcase_name='MIXED_TO_MIXED', addresses=[[nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('2001::/33')], [ nacaddr.IPv4('192.168.0.0/24'), nacaddr.IPv6('2201::/48') ]], expected=[ ' term good-term-icmp-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2-inet6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=[]), dict( testcase_name='V4_TO_V4', addresses=[[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term-icmp {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6 {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2 {\n' ' from {\n' ' source-address {\n' ' 0.0.0.0/1;\n' ' }\n' ' destination-address {\n' ' 192.168.0.0/24;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=[]), dict( testcase_name='V6_TO_V6', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ' term good-term-both-icmp-and-icmpv6-2 {\n' ' from {\n' ' source-address {\n' ' 2001::/33;\n' ' }\n' ' destination-address {\n' ' 2201::/48;\n' ' }\n' ' application-sets test-filterd-term-both-icmp-and-icmpv6-app;\n' ' }', ], notexpected=[]), dict( testcase_name='V4_TO_V6', addresses=[[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv6('2201::/48')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ], notexpected=['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), dict( testcase_name='V6_TO_V4', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ], notexpected=['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'], ), dict( testcase_name='PARTLY_UNSPECIFIED', addresses=[[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], expected=[ ' term good-term-icmp-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmp-app;\n' ' }', ' term good-term-icmpv6-2 {\n' ' from {\n' ' application-sets test-filtergood-term-icmpv6-app;\n' ' }', ], notexpected=[ '0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48', 'term good-term-icmp-i', 'term good-term-icmpv6-i', 'term good-term-both-icmp-and-icmpv6-' ], ), ) def testMixedICMP(self, addresses, expected, notexpected): self.naming.GetNetAddr.side_effect = addresses * 4 msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + MIXED_TESTING_TERM_ICMP, self.naming), EXP_INFO) output = str(msmpc) for expect in expected: self.assertIn(expect, output, output) for notexpect in notexpected: self.assertNotIn(notexpect, output, output) @parameterized.named_parameters( dict(testcase_name='true', option='true', want_logging=True), dict(testcase_name='True', option='True', want_logging=True), dict(testcase_name='syslog', option='syslog', want_logging=True), dict(testcase_name='local', option='local', want_logging=True), dict(testcase_name='disable', option='disable', want_logging=False), dict(testcase_name='log-both', option='log-both', want_logging=True), ) def testLogging(self, option, want_logging): self.naming.GetNetAddr.return_value = [nacaddr.IPv4('192.168.0.0/24')] self.naming.GetServiceByProto.return_value = ['25'] expected_output = ( ' test-filter {\n' + ' services {\n' + ' stateful-firewall {\n' + ' rule test-filter {\n' + ' match-direction input-output;\n' + ' term good-term-1 {\n' + ' from {\n' + ' application-sets ' 'test-filtergood-term-1-app;\n' + ' }\n' + ' then {\n' + ' accept;\n' + (' syslog;\n' if want_logging else '') + ' }\n' + ' }\n' + ' }\n' + ' }\n' + ' }\n' + ' applications {\n' + ' application test-filtergood-term-1-app1 {\n' + ' protocol icmp;\n' + ' }\n' + ' application-set test-filtergood-term-1-app {\n' + ' application test-filtergood-term-1-app1;\n' + ' }\n' + ' }\n' + ' }\n' + '}\n' + 'apply-groups test-filter;') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED_IMPLICIT + (LOGGING_TERM % option), self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expected_output, output, output) @parameterized.named_parameters( dict( testcase_name='default', header=GOOD_HEADER, direction='input-output'), dict( testcase_name='ingress', header=GOOD_HEADER_INGRESS, direction='input'), dict( testcase_name='egress', header=GOOD_HEADER_EGRESS, direction='output')) def testDirection(self, header, direction): msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(header + GOOD_TERM_3, self.naming), EXP_INFO) output = str(msmpc) expected_output = (' rule test-filter {\n' + ' match-direction %s;') self.assertIn(expected_output % direction, output, output) def testBadDirectionCombo(self): pol = policy.ParsePolicy(BAD_HEADER_DIRECTION + GOOD_TERM_3, self.naming) self.assertRaises(junipermsmpc.ConflictingTargetOptionsError, junipermsmpc.JuniperMSMPC, pol, EXP_INFO) def testTermNameCollision(self): short_append = '1' * ( junipermsmpc.MAX_IDENTIFIER_LEN // 2 - len('?ood-term-1')) long_append = short_append + '1' not_too_long_name = (TERM_NAME_COLLISION % (short_append, short_append)) too_long_name = (TERM_NAME_COLLISION % (long_append, long_append)) pol = policy.ParsePolicy(GOOD_HEADER + too_long_name, self.naming) self.assertRaises(junipermsmpc.ConflictingApplicationSetsError, junipermsmpc.JuniperMSMPC, pol, EXP_INFO) _ = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER + not_too_long_name, self.naming), EXP_INFO) def testSlashZeroReplacement(self): self.naming.GetNetAddr.return_value = ([ nacaddr.IPv4('0.0.0.0/0'), nacaddr.IPv6('::/0') ]) self.naming.GetServiceByProto.return_value = ['25'] expectedv4 = (' term good-term-2-inet {\n' + ' from {\n' + ' destination-address {\n' + ' any-ipv4;\n' + ' }') expectedv6 = (' term good-term-2-inet6 {\n' + ' from {\n' + ' destination-address {\n' + ' any-ipv6;\n' + ' }') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expectedv4, output, output) self.assertIn(expectedv6, output, output) def testV6SlashFourteenReplacement(self): self.naming.GetNetAddr.return_value = ([ nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv6('::/14') ]) self.naming.GetServiceByProto.return_value = ['25'] expectedv4 = (' term good-term-2-inet {\n' + ' from {\n' + ' destination-address {\n' + ' 0.0.0.0/1;\n' + ' }') expectedv6 = (' term good-term-2-inet6 {\n' + ' from {\n' + ' destination-address {\n' + ' ::/16;\n' + ' 1::/16;\n' + ' 2::/16;\n' + ' 3::/16;\n' + ' }') msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy(GOOD_HEADER_MIXED + GOOD_TERM_1, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expectedv4, output, output) self.assertIn(expectedv6, output, output) @parameterized.named_parameters( dict(testcase_name='tcp', protoname='tcp', protonum='tcp'), dict(testcase_name='hopopt', protoname='hopopt', protonum='0'), dict(testcase_name='vrrp', protoname='vrrp', protonum='112'), ) def testProtocolAsNumber(self, protoname, protonum): expected = (' application test-filtergood-term-numeric-app1 {\n' + ' protocol %s;') % protonum msmpc = junipermsmpc.JuniperMSMPC( policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_TERM_NUMERIC_PROTOCOL % protoname, self.naming), EXP_INFO) output = str(msmpc) self.assertIn(expected, output, output) def testSupportedNamedProtocols(self): supported_as_names = junipermsmpc.Term._SUPPORTED_PROTOCOL_NAMES all_supported_protocols = junipermsmpc.Term.PROTO_MAP.keys() for want in supported_as_names: self.assertIn(want, all_supported_protocols) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/junipersrx_test.py000066400000000000000000002030671437377527500207120ustar00rootroot00000000000000# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for Juniper SRX acl rendering module.""" import copy import datetime import re from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import junipersrx from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust } """ GOOD_HEADER_2 = """ header { comment:: "This is a header from untrust to trust" target:: srx from-zone untrust to-zone trust } """ GOOD_HEADER_3 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust inet } """ GOOD_HEADER_4 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust inet6 } """ GOOD_HEADER_5 = """ header { target:: srx from-zone trust to-zone untrust inet apply-groups:: tcp-test1 tcp-test2 } """ GOOD_HEADER_6 = """ header { target:: srx from-zone trust to-zone untrust inet apply-groups-except:: tcp-test1 tcp-test2 } """ GOOD_HEADER_7 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust address-book-zone inet } """ GOOD_HEADER_8 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust address-book-zone inet6 } """ GOOD_HEADER_9 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust address-book-zone } """ GOOD_HEADER_10 = """ header { comment:: "This is a test acl with a global policy" target:: srx from-zone all to-zone all address-book-global } """ GOOD_HEADER_11 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone dmz } """ GOOD_HEADER_12 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone untrust to-zone trust address-book-zone inet } """ GOOD_HEADER_13 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust inet expresspath } """ GOOD_HEADER_14 = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust expresspath inet } """ GOOD_HEADER_NOVERBOSE = """ header { comment:: "This is a test acl with a comment" target:: srx from-zone trust to-zone untrust noverbose } """ BAD_HEADER = """ header { target:: srx something } """ BAD_HEADER_1 = """ header { comment:: "This header has two address families" target:: srx from-zone trust to-zone untrust inet6 mixed } """ BAD_HEADER_3 = """ header { comment:: "This is a test acl with a global policy" target:: srx from-zone all to-zone all address-book-zone } """ BAD_HEADER_4 = """ header { comment:: "This is a test acl with a global policy" target:: srx from-zone test to-zone all } """ GOOD_TERM_1 = """ term good-term-1 { comment:: "This header is very very very very very very very very very very very very very very very very very very very very large" destination-address:: SOME_HOST destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { destination-address:: SOME_HOST destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { destination-address:: SOME_HOST protocol:: tcp action:: accept vpn:: good-vpn-3 } """ GOOD_TERM_4 = """ term good-term-4 { destination-address:: SOME_HOST protocol:: tcp action:: accept vpn:: good-vpn-4 policy-4 } """ GOOD_TERM_LOG_1 = """ term good-term-5 { action:: accept logging:: log-both } """ GOOD_TERM_LOG_2 = """ term good-term-5 { action:: deny logging:: log-both } """ GOOD_TERM_LOG_3 = """ term good-term-5 { action:: accept logging:: true } """ GOOD_TERM_LOG_4 = """ term good-term-5 { action:: deny logging:: true } """ GOOD_TERM_COUNT_1 = """ term good-term-6 { counter:: good-counter action:: accept } """ GOOD_TERM_COUNT_2 = """ term good-term-6 { counter:: good-counter action:: deny } """ GOOD_TERM_10 = """ term good-term-10 { destination-address:: SOME_HOST action:: accept dscp-set:: b111000 } """ GOOD_TERM_11 = """ term good-term-11 { destination-address:: SOME_HOST action:: accept dscp-set:: af42 dscp-match:: af41-af42 5 dscp-except:: be } """ GOOD_TERM_12 = """ term dup-of-term-1 { destination-address:: FOOBAR destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_13 = """ term dup-of-term-1 { destination-address:: FOOBAR SOME_HOST destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_14 = """ term term_to_split { source-address:: FOOBAR destination-address:: SOME_HOST destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_15 = """ term good-term-15 { destination-address:: SOME_HOST destination-port:: SMTP protocol:: tcp policer:: batman action:: accept } """ GOOD_TERM_16 = """ term good-term-16 { destination-address:: BAZ destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_17 = """ term term_to_split { destination-address:: FOOBAR SOME_HOST destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_18 = """ term good_term_18 { source-exclude:: SMALL protocol:: tcp action:: accept } """ GOOD_TERM_19 = """ term good_term_19 { source-address:: LARGE source-exclude:: SMALL protocol:: tcp action:: accept } """ GOOD_TERM_20 = """ term good_term_20 { destination-address:: FOO destination-port:: HTTP protocol:: tcp action:: accept } """ GOOD_TERM_21 = """ term good_term_21 { destination-address:: UDON destination-port:: HTTP protocol:: tcp action:: accept } """ GOOD_TERM_21 = """ term good_term_21 { destination-address:: FOO destination-port:: QUIC protocol:: udp action:: accept } """ GOOD_TERM_23 = """ term good_term_23 { action:: accept } """ BAD_TERM_1 = """ term bad-term-1 { destination-address:: SOME_HOST protocol:: tcp action:: deny vpn:: good-vpn-4 policy-4 } """ TCP_ESTABLISHED_TERM = """ term tcp-established-term { source-address:: SOME_HOST source-port:: SMTP protocol:: tcp option:: tcp-established action:: accept } """ UDP_ESTABLISHED_TERM = """ term udp-established-term { source-address:: FOO source-port:: QUIC protocol:: udp option:: established action:: accept } """ ICMP_RESPONSE_TERM = """ term icmp_response-term { protocol:: icmp icmp-type:: echo-reply action:: accept } """ EXPIRED_TERM_1 = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ ICMP_TYPE_TERM_1 = """ term test-icmp { protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } """ # For testing when the number of terms is at the 8 term application limit LONG_IPV6_ICMP_TERM = """ term accept-icmpv6-types { protocol:: icmpv6 icmp-type:: echo-request echo-reply neighbor-solicit icmp-type:: neighbor-advertisement router-advertisement packet-too-big icmp-type:: parameter-problem time-exceeded action:: accept } """ # For testing when the number of terms goes over the 8 term application limit LONG_IPV6_ICMP_TERM2 = """ term accept-icmpv6-types { protocol:: icmpv6 icmp-type:: echo-request echo-reply neighbor-solicit icmp-type:: neighbor-advertisement router-advertisement packet-too-big icmp-type:: parameter-problem time-exceeded destination-unreachable action:: accept } """ ICMP_ALL_TERM = """ term accept-icmp-types { protocol:: icmp icmp-type:: echo-reply unreachable source-quench redirect alternate-address icmp-type:: echo-request router-advertisement router-solicitation icmp-type:: time-exceeded parameter-problem timestamp-request icmp-type:: timestamp-reply information-request information-reply icmp-type:: mask-request mask-reply conversion-error mobile-redirect action:: accept } """ ICMP6_ALL_TERM = """ term accept-icmpv6-types { protocol:: icmpv6 icmp-type:: destination-unreachable packet-too-big time-exceeded icmp-type:: parameter-problem echo-request echo-reply icmp-type:: multicast-listener-query multicast-listener-report icmp-type:: multicast-listener-done router-solicit router-advertisement icmp-type:: neighbor-solicit neighbor-advertisement redirect-message icmp-type:: router-renumbering icmp-node-information-query icmp-type:: icmp-node-information-response icmp-type:: inverse-neighbor-discovery-solicitation icmp-type:: inverse-neighbor-discovery-advertisement icmp-type:: version-2-multicast-listener-report icmp-type:: home-agent-address-discovery-request icmp-type:: home-agent-address-discovery-reply mobile-prefix-solicitation icmp-type:: mobile-prefix-advertisement certification-path-solicitation icmp-type:: certification-path-advertisement multicast-router-advertisement icmp-type:: multicast-router-solicitation multicast-router-termination action:: accept } """ IPV6_ICMP_TERM = """ term test-ipv6_icmp { protocol:: icmpv6 icmp-type:: destination-unreachable packet-too-big icmp-type:: time-exceeded time-exceeded icmp-type:: echo-request echo-reply action:: accept } """ BAD_ICMP_TERM_1 = """ term test-icmp { icmp-type:: echo-request echo-reply action:: accept } """ ICMP_ONLY_TERM_1 = """ term test-icmp { protocol:: icmp action:: accept } """ OWNER_TERM = """ term owner-test { owner:: foo@google.com action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ DEFAULT_TERM_1 = """ term default-term-1 { action:: deny } """ TIMEOUT_TERM = """ term timeout-term { protocol:: icmp icmp-type:: echo-request timeout:: 77 action:: accept } """ GLOBAL_ZONE_TERM = """ term global-zone-term { protocol:: icmp icmp-type:: echo-request source-zone:: szone2 szone1 destination-zone:: dzone2 dzone1 action:: accept } """ PLATFORM_EXCLUDE_TERM = """ term platform-exclude-term { protocol:: tcp udp platform-exclude:: srx action:: accept } """ PLATFORM_TERM = """ term platform-term { protocol:: tcp udp platform:: srx juniper action:: accept } """ PLATFORM_EXCLUDE_ADDRESS_TERM = """ term platform-exclude-term { protocol:: tcp udp source-address:: FOO platform-exclude:: srx action:: accept } """ SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_zone', 'dscp_except', 'dscp_match', 'dscp_set', 'source_zone', 'expiration', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'timeout', 'translated', 'verbatim', 'vpn' }) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'count', 'log', 'dscp'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 _IPSET = [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('2001:4860:8000::/33')] _IPSET2 = [nacaddr.IP('10.23.0.0/22'), nacaddr.IP('10.23.0.6/23', strict=False)] _IPSET3 = [nacaddr.IP('10.23.0.0/23')] _IPSET4 = [nacaddr.IP('10.0.0.0/20')] _IPSET5 = [nacaddr.IP('10.0.0.0/24')] class JuniperSRXTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testHeaderComment(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP_TYPE_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('This is a test acl with a comment', output, output) def testHeaderApplyGroups(self): pol = policy.ParsePolicy(GOOD_HEADER_5 + ICMP_TYPE_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('apply-groups [ tcp-test1 tcp-test2 ]', output, output) def testHeaderApplyGroupsExcept(self): pol = policy.ParsePolicy(GOOD_HEADER_6 + ICMP_TYPE_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('apply-groups-except [ tcp-test1 tcp-test2 ]', output, output) def testLongComment(self): expected_output = """ /* This header is very very very very very very very very very very very very very very very very very very very very large */""" self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(srx) self.assertIn(expected_output, output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testTermAndFilterName(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO) output = str(srx) self.assertIn('policy good-term-1 {', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testVpnWithoutPolicy(self): self.naming.GetNetAddr.return_value = _IPSET srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3, self.naming), EXP_INFO) output = str(srx) self.assertIn('ipsec-vpn good-vpn-3;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testVpnWithPolicy(self): self.naming.GetNetAddr.return_value = _IPSET srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4, self.naming), EXP_INFO) output = str(srx) self.assertIn('ipsec-vpn good-vpn-4;', output, output) self.assertIn('pair-policy policy-4;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testVpnWithDrop(self): self.naming.GetNetAddr.return_value = _IPSET srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + BAD_TERM_1, self.naming), EXP_INFO) output = str(srx) self.assertNotIn('ipsec-vpn good-vpn-4;', output, output) self.assertNotIn('pair-policy policy-4;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testDefaultDeny(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1, self.naming), EXP_INFO) output = str(srx) self.assertIn('deny;', output, output) def testIcmpTypes(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP_TYPE_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('application test-icmp-app;', output, output) self.assertIn('application test-icmp-app {', output, output) self.assertIn('term t1 protocol icmp icmp-type 0 inactivity-timeout 60', output, output) self.assertIn('term t2 protocol icmp icmp-type 8 inactivity-timeout 60', output, output) def testLongIcmpTypes(self): pol = policy.ParsePolicy(GOOD_HEADER + LONG_IPV6_ICMP_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # Make sure that the application isn't split into an application set # due to ICMP term usage up to 8 terms. self.assertNotIn('application-set accept-icmpv6-types-app', output) self.assertIn('application accept-icmpv6-types-app;', output) # Use regex to check for there being a single application with exactly 8 # terms in it. pattern = re.compile( r'application accept-icmpv6-types-app \{\s+(term t\d protocol icmp6 icmp6-type \d{1,3} inactivity-timeout 60;\s+){8}\}' ) self.assertTrue(pattern.search(output), output) def testLongSplitIcmpTypes(self): pol = policy.ParsePolicy(GOOD_HEADER + LONG_IPV6_ICMP_TERM2, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # Check the application was split into a set of many applications; 9 terms. pattern = re.compile( r'application-set accept-icmpv6-types-app \{\s+(application accept-icmpv6-types-app\d;\s+){9}\}') self.assertTrue(pattern.search(output), output) # Check that each of the 9 applications with 1 term each. pattern = re.compile( r'(application accept-icmpv6-types-app\d \{\s+(term t1 protocol icmp6 icmp6-type \d{1,3} inactivity-timeout 60;\s+)\}\s+){9}' ) self.assertTrue(pattern.search(output), output) def testAllIcmpTypes(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP_ALL_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # Check for split into application set of many applications; 18 terms. pattern = re.compile( r'application-set accept-icmp-types-app \{\s+(application accept-icmp-types-app\d{1,2};\s+){18}\}') self.assertTrue(pattern.search(output), output) # Check that each of the 18 applications have 1 term each. pattern = re.compile( r'(application accept-icmp-types-app\d{1,2} \{\s+(term t1 protocol icmp icmp-type \d{1,3} inactivity-timeout 60;\s+)\}\s+){18}' ) self.assertTrue(pattern.search(output), output) def testAllIcmp6Types(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP6_ALL_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # Check for 29 applications. pattern = re.compile( r'application-set accept-icmpv6-types-app \{\s+(application accept-icmpv6-types-app\d{1,2};\s+){29}\}' ) self.assertTrue(pattern.search(output), output) # Check that each of the 4 applications have between 1 and 8 terms. pattern = re.compile( r'(application accept-icmpv6-types-app\d{1,2} \{\s+(term t1 protocol icmp6 icmp6-type \d{1,3} inactivity-timeout 60;\s+)\}\s+){29}' ) self.assertTrue(pattern.search(output), output) def testLoggingBothAccept(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOG_1, self.naming), EXP_INFO) output = str(srx) self.assertIn('session-init;', output) self.assertIn('session-close;', output) def testLoggingBothDeny(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOG_2, self.naming), EXP_INFO) output = str(srx) self.assertIn('session-init;', output) self.assertIn('session-close;', output) def testLoggingTrueAccept(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOG_3, self.naming), EXP_INFO) output = str(srx) self.assertIn('session-close;', output) self.assertNotIn('session-init;', output) def testLoggingTrueDeny(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_LOG_4, self.naming), EXP_INFO) output = str(srx) self.assertIn('session-init;', output) self.assertNotIn('session-close;', output) def testCounterAccept(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_COUNT_1, self.naming), EXP_INFO) output = str(srx) self.assertIn('count;', output) def testCounterDeny(self): srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_COUNT_2, self.naming), EXP_INFO) output = str(srx) self.assertIn('count;', output) def testOwnerTerm(self): pol = policy.ParsePolicy(GOOD_HEADER + OWNER_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn(' /*\n' ' Owner: foo@google.com\n' ' */', output, output) def testBadICMP(self): pol = policy.ParsePolicy(GOOD_HEADER + BAD_ICMP_TERM_1, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, junipersrx.JuniperSRX, pol, EXP_INFO) def testICMPProtocolOnly(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP_ONLY_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('protocol icmp;', output, output) def testMultipleProtocolGrouping(self): pol = policy.ParsePolicy(GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('application-set multi-proto-app {', output, output) self.assertIn('application multi-proto-app1;', output, output) self.assertIn('application multi-proto-app2;', output, output) self.assertIn('application multi-proto-app3;', output, output) self.assertIn('application multi-proto-app1 {', output, output) self.assertIn('term t1 protocol tcp;', output, output) self.assertIn('application multi-proto-app2 {', output, output) self.assertIn('term t2 protocol udp;', output, output) self.assertIn('application multi-proto-app3 {', output, output) self.assertIn('term t3 protocol icmp;', output, output) def testGlobalPolicyHeader(self): pol = policy.ParsePolicy(GOOD_HEADER_10 + MULTIPLE_PROTOCOLS_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertEqual(output.count('global {'), 2) self.assertNotIn('from-zone all to-zone all {', output) def testBadGlobalPolicyHeaderZoneBook(self): pol = policy.ParsePolicy(BAD_HEADER_3 + MULTIPLE_PROTOCOLS_TERM, self.naming) self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX, pol, EXP_INFO) def testBadGlobalPolicyHeaderNameAll(self): pol = policy.ParsePolicy(BAD_HEADER_4 + MULTIPLE_PROTOCOLS_TERM, self.naming) self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX, pol, EXP_INFO) def testBadHeaderType(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(BAD_HEADER + GOOD_TERM_1, self.naming) self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testBadHeaderMultiAF(self): # test for multiple address faimilies in header self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(BAD_HEADER_1 + GOOD_TERM_1, self.naming) self.assertRaises(junipersrx.ConflictingTargetOptionsError, junipersrx.JuniperSRX, pol, EXP_INFO) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') @mock.patch.object(junipersrx.logging, 'warning') def testExpiredTerm(self, mock_warn): _ = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM_1, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s>%s is expired.', 'expired_test', 'trust', 'untrust') @mock.patch.object(junipersrx.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) _ = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s>%s expires in ' 'less than two weeks.', 'is_expiring', 'trust', 'untrust') def testTimeout(self): pol = policy.ParsePolicy(GOOD_HEADER + TIMEOUT_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('timeout 77', output, output) def testZoneGlobal(self): pol = policy.ParsePolicy(GOOD_HEADER_10 + GLOBAL_ZONE_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('from-zone', output, output) self.assertIn('szone1 szone2', output, output) self.assertIn('to-zone', output, output) self.assertIn('dzone1 dzone2', output, output) def testZoneNonGlobal(self): pol = policy.ParsePolicy(GOOD_HEADER + GLOBAL_ZONE_TERM, self.naming) self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX, pol, EXP_INFO) def testIcmpV6(self): pol = policy.ParsePolicy(GOOD_HEADER + IPV6_ICMP_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('protocol icmp6', output, output) self.assertIn('icmp6-type', output, output) def testReplaceStatement(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('replace: address-book', output, output) self.assertIn('replace: policies', output, output) self.assertIn('replace: applications', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testAdressBookBothAFs(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('replace: address-book {', output, output) self.assertIn('global {', output, output) self.assertIn('2001:4860:8000::/33', output, output) self.assertIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testAdressBookIPv4(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('replace: address-book {', output, output) self.assertIn('global {', output, output) self.assertNotIn('2001:4860:8000::/33', output, output) self.assertIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testAdressBookIPv6(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('replace: address-book {', output, output) self.assertIn('global {', output, output) self.assertIn('2001:4860:8000::/33', output, output) self.assertNotIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testAddressBookContainsSmallerPrefix(self): _IPSET2[0].parent_token = 'FOOBAR' _IPSET2[1].parent_token = 'SOME_HOST' _IPSET3[0].parent_token = 'FOOBAR' self.naming.GetNetAddr.side_effect = [_IPSET2, _IPSET3] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1 + GOOD_HEADER_2 + GOOD_TERM_12, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('address FOOBAR_0 10.23.0.0/22;', output, output) self.naming.GetNetAddr.assert_has_calls([ mock.call('SOME_HOST'), mock.call('FOOBAR')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SMTP', 'tcp')] * 2) def testAddressBookContainsLargerPrefix(self): _IPSET2[0].parent_token = 'FOOBAR' _IPSET2[1].parent_token = 'SOME_HOST' _IPSET3[0].parent_token = 'FOOBAR' self.naming.GetNetAddr.side_effect = [_IPSET3, _IPSET2] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_12 + GOOD_HEADER + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('address FOOBAR_0 10.23.0.0/22;', output, output) self.naming.GetNetAddr.assert_has_calls([ mock.call('FOOBAR'), mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SMTP', 'tcp')] * 2) def testZoneAdressBookBothAFs(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_9 + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('security-zone untrust {', output, output) self.assertIn('replace: address-book {', output, output) self.assertIn('2001:4860:8000::/33', output, output) self.assertIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testZoneAdressBookIPv4(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_7 + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('security-zone untrust {', output, output) self.assertIn('replace: address-book {', output, output) self.assertNotIn('2001:4860:8000::/33', output, output) self.assertIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testZoneAdressBookIPv6(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_8 + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('security-zone untrust {', output, output) self.assertIn('replace: address-book {', output, output) self.assertIn('2001:4860:8000::/33', output, output) self.assertNotIn('10.0.0.0/8', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def assertFalseUnorderedAddressBook(self, address_book): # This is very naive check that expects addresses to be exact as returned # from _OutOfOrderAddresses method. If you modify one please modify this one # as well. for line in address_book: if '10.0.0.0/8' in line: self.fail('Addresses in address book are out of order.') elif '1.0.0.0/8' in line: break def _OutOfOrderAddresses(self): x = nacaddr.IP('10.0.0.0/8') x.parent_token = 'test' y = nacaddr.IP('1.0.0.0/8') y.parent_token = 'out_of_order' return x, y def testAddressBookOrderingSuccess(self): self.naming.GetNetAddr.return_value = self._OutOfOrderAddresses() self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2, self.naming) p = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertFalseUnorderedAddressBook(p._GenerateAddressBook()) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testAddressBookOrderingAlreadyOrdered(self): y, x = self._OutOfOrderAddresses() self.naming.GetNetAddr.return_value = [x, y] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2, self.naming) p = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertFalseUnorderedAddressBook(p._GenerateAddressBook()) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def _AssertOrder(self, strings, expected_order): order = copy.copy(expected_order) matcher = order.pop(0) for line in strings: if matcher in line: if not order: return matcher = order.pop(0) self.fail('Strings weren\'t in expected order.\nExpected:\n %s\n\nGot:\n%s' % ('\n '.join(expected_order), '\n'.join(strings))) def testApplicationsOrderingSuccess(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.side_effect = [['80', '80'], ['25', '25']] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2 + GOOD_TERM_1, self.naming) p = junipersrx.JuniperSRX(pol, EXP_INFO) self._AssertOrder(p._GenerateApplications(), ['application good-term-1-app1', 'application good-term-2-app1', 'application-set good-term-1-app', 'application-set good-term-2-app']) self.naming.GetNetAddr.assert_has_calls( [mock.call('SOME_HOST')] * 2) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SMTP', 'tcp')] * 2) def testApplicationsOrderingAlreadyOrdered(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.side_effect = [['25', '25'], ['80', '80']] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_1 + GOOD_TERM_2, self.naming) p = junipersrx.JuniperSRX(pol, EXP_INFO) self._AssertOrder(p._GenerateApplications(), ['application good-term-1-app1', 'application good-term-2-app1', 'application-set good-term-1-app', 'application-set good-term-2-app']) self.naming.GetNetAddr.assert_has_calls( [mock.call('SOME_HOST')] * 2) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SMTP', 'tcp')] * 2) def testDscpWithByte(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10, self.naming), EXP_INFO) output = str(srx) self.assertIn('dscp b111000;', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testDscpWithClass(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11, self.naming), EXP_INFO) output = str(srx) self.assertIn('dscp af42;', output, output) self.assertIn('dscp [ af41-af42 5 ];', output, output) self.assertIn('dscp-except [ be ];', output, output) self.naming.GetNetAddr.assert_called_once_with('SOME_HOST') def testLargeTermSplitting(self): ips = list(nacaddr.IP('10.0.8.0/21').subnets(new_prefix=32)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 ips = list(nacaddr.IP('10.0.0.0/21').subnets(new_prefix=32)) prodcolos_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: prodcolos_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetNetAddr.side_effect = [mo_ips, prodcolos_ips] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_14, self.naming) srx = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertEqual(len(srx.policy.filters[0][1]), 4) self.naming.GetNetAddr.assert_has_calls([ mock.call('FOOBAR'), mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testLargeTermSplittingV6(self): ips = list(nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/119' ).subnets(new_prefix=128)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 ips = list(nacaddr.IP('2720:0:1000:3103:eca0:2c09:6b32:e000/119' ).subnets(new_prefix=128)) prodcolos_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: prodcolos_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetNetAddr.side_effect = [mo_ips, prodcolos_ips] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_14, self.naming) srx = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertEqual(len(srx.policy.filters[0][1]), 4) self.naming.GetNetAddr.assert_has_calls([ mock.call('FOOBAR'), mock.call('SOME_HOST')]) self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testLargeTermSplitIgnoreV6(self): ips = list(nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/119' ).subnets(new_prefix=128)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 ips = list(nacaddr.IP('2720:0:1000:3103:eca0:2c09:6b32:e000/119' ).subnets(new_prefix=128)) ips.append(nacaddr.IPv4('10.0.0.1/32')) prodcolos_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: prodcolos_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetNetAddr.side_effect = [mo_ips, prodcolos_ips] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_14, self.naming) srx = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertEqual(len(srx.policy.filters[0][1]), 1) def testDuplicateTermsInDifferentZones(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2 + GOOD_HEADER_11 + GOOD_TERM_2, self.naming) self.assertRaises(junipersrx.ConflictingApplicationSetsError, junipersrx.JuniperSRX, pol, EXP_INFO) self.naming.GetNetAddr.assert_has_calls( [mock.call('SOME_HOST')] * 2) self.naming.GetServiceByProto.assert_has_calls( [mock.call('SMTP', 'tcp')] * 2) def testBuildTokens(self): self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = junipersrx.JuniperSRX(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_15, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testOptimizedGlobalAddressBook(self): foobar_ips = [nacaddr.IP('172.16.0.0/16', token='FOOBAR'), nacaddr.IP('172.17.0.0/16', token='FOOBAR'), nacaddr.IP('172.18.0.0/16', token='FOOBAR'), nacaddr.IP('172.19.0.0/16', token='FOOBAR'), nacaddr.IP('172.22.0.0/16', token='FOOBAR'), nacaddr.IP('172.23.0.0/16', token='FOOBAR'), nacaddr.IP('172.24.0.0/16', token='FOOBAR'), nacaddr.IP('172.25.0.0/16', token='FOOBAR'), nacaddr.IP('172.26.0.0/16', token='FOOBAR'), nacaddr.IP('172.27.0.0/16', token='FOOBAR'), nacaddr.IP('172.28.0.0/16', token='FOOBAR'), nacaddr.IP('172.29.0.0/16', token='FOOBAR'), nacaddr.IP('172.30.0.0/16', token='FOOBAR'), nacaddr.IP('172.31.0.0/16', token='FOOBAR')] some_host_ips = [nacaddr.IP('172.20.0.0/16', token='SOME_HOST'), nacaddr.IP('172.21.0.0/16', token='SOME_HOST'), nacaddr.IP('10.0.0.0/8', token='SOME_HOST')] self.naming.GetNetAddr.side_effect = [foobar_ips, some_host_ips, some_host_ips] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_17 + GOOD_HEADER_2 + GOOD_TERM_15, self.naming) srx = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('address FOOBAR_0 172.16.0.0/14', srx, srx) self.assertIn('address FOOBAR_1 172.22.0.0/15;', srx, srx) self.assertIn('address FOOBAR_2 172.24.0.0/13;', srx, srx) self.assertIn('address SOME_HOST_0 10.0.0.0/8;', srx, srx) self.assertIn('address SOME_HOST_1 172.20.0.0/15;', srx, srx) self.assertNotIn('/16', srx, srx) def testNakedExclude(self): small = [nacaddr.IP('10.0.0.0/24', 'SMALL', 'SMALL')] self.naming.GetNetAddr.side_effect = [small] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_18, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn( 'address GOOD_TERM_18_SRC_EXCLUDE_2 10.0.1.0/24;', output, output) self.assertIn( 'address GOOD_TERM_18_SRC_EXCLUDE_3 10.0.2.0/23;', output, output) self.assertIn( 'address GOOD_TERM_18_SRC_EXCLUDE_4 10.0.4.0/22;', output, output) self.assertIn( 'address GOOD_TERM_18_SRC_EXCLUDE_5 10.0.8.0/21;', output, output) self.assertNotIn('10.0.0.0', output) def testSourceExclude(self): large = [nacaddr.IP('10.0.0.0/20', 'LARGE', 'LARGE')] small = [nacaddr.IP('10.0.0.0/24', 'SMALL', 'SMALL')] self.naming.GetNetAddr.side_effect = [large, small] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_19, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn( 'address GOOD_TERM_19_SRC_EXCLUDE_0 10.0.1.0/24;', output, output) self.assertIn( 'address GOOD_TERM_19_SRC_EXCLUDE_1 10.0.2.0/23;', output, output) self.assertIn( 'address GOOD_TERM_19_SRC_EXCLUDE_2 10.0.4.0/22;', output, output) self.assertIn( 'address GOOD_TERM_19_SRC_EXCLUDE_3 10.0.8.0/21;', output, output) self.assertNotIn('10.0.0.0/24', output) def testPlatformExclude(self): large = [nacaddr.IP('10.0.0.0/20', 'LARGE', 'LARGE')] small = [nacaddr.IP('10.0.0.0/24', 'SMALL', 'SMALL')] self.naming.GetNetAddr.side_effect = [large, small] pol = policy.ParsePolicy(GOOD_HEADER + PLATFORM_EXCLUDE_TERM + GOOD_TERM_19, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('good_term_19', output, output) self.assertNotIn('platform-exclude-term', output) def testPlatformTerm(self): large = [nacaddr.IP('10.0.0.0/20', 'LARGE', 'LARGE')] small = [nacaddr.IP('10.0.0.0/24', 'SMALL', 'SMALL')] self.naming.GetNetAddr.side_effect = [large, small] pol = policy.ParsePolicy(GOOD_HEADER + PLATFORM_TERM + GOOD_TERM_19, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('good_term_19', output, output) self.assertIn('platform-term', output, output) def testPlatformExcludeWithSourceExclude(self): foo = [nacaddr.IP('192.1.0.0/20', 'FOO', 'FOO')] large = [nacaddr.IP('10.0.0.0/20', 'LARGE', 'LARGE')] small = [nacaddr.IP('10.0.0.0/24', 'SMALL', 'SMALL')] self.naming.GetNetAddr.side_effect = [foo, large, small] pol = policy.ParsePolicy( GOOD_HEADER + PLATFORM_EXCLUDE_ADDRESS_TERM + GOOD_TERM_19, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('address GOOD_TERM_19_SRC_EXCLUDE_0 10.0.1.0/24;', output, output) self.assertIn('address GOOD_TERM_19_SRC_EXCLUDE_1 10.0.2.0/23;', output, output) self.assertIn('address GOOD_TERM_19_SRC_EXCLUDE_2 10.0.4.0/22;', output, output) self.assertIn('address GOOD_TERM_19_SRC_EXCLUDE_3 10.0.8.0/21;', output, output) self.assertNotIn('10.0.0.0/24', output) self.assertNotIn('192.1.0.0/20', output) self.assertNotIn('platform-exclude-term', output) def testMixedVersionIcmp(self): pol = policy.ParsePolicy(GOOD_HEADER + ICMP_TYPE_TERM_1 + IPV6_ICMP_TERM, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('term t6 protocol icmp6 icmp6-type 129 ' 'inactivity-timeout 60;', output) self.assertIn('term t1 protocol icmp icmp-type 0 ' 'inactivity-timeout 60;', output) def testOptimizedApplicationset(self): some_host = [nacaddr.IP('10.0.0.1/32', token='SOMEHOST')] foo = [nacaddr.IP('10.0.0.2/32', token='FOO')] foobar = [nacaddr.IP('10.0.0.3/32', token='FOOBAR')] self.naming.GetNetAddr.side_effect = [some_host, foo, foobar, foobar, some_host] self.naming.GetServiceByProto.side_effect = [['25', '25'], ['80', '80'], ['25', '25'], ['25', '25']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2 + GOOD_TERM_20 + GOOD_TERM_12 + GOOD_HEADER_2 + GOOD_TERM_14, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('dup-of-term-1-app', output) def testExpressPath(self): some_host = [nacaddr.IP('10.0.0.1/32', token='SOMEHOST')] self.naming.GetNetAddr.side_effect = [some_host, some_host] self.naming.GetServiceByProto.side_effect = [['25', '25'], ['25', '25']] pol = policy.ParsePolicy(GOOD_HEADER_14 + GOOD_TERM_2 + DEFAULT_TERM_1 + GOOD_HEADER + GOOD_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertIn('services-offload;', output) self.assertIn('deny;', output) self.assertIn('permit;', output) def testDropEstablished(self): some_host = [nacaddr.IP('10.0.0.1/32', token='FOO')] self.naming.GetServiceByProto.side_effect = [['25', '25'], ['443', '443'], ['25', '25'], ['443', '443']] self.naming.GetNetAddr.side_effect = [some_host, some_host, some_host, some_host] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1 + GOOD_TERM_21 + DEFAULT_TERM_1 + GOOD_HEADER_2 + TCP_ESTABLISHED_TERM + UDP_ESTABLISHED_TERM + DEFAULT_TERM_1, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('udp-established-term', output) self.assertNotIn('tcp-established-term', output) def testStatelessReply(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] ret = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1 + ICMP_RESPONSE_TERM, self.naming) _, terms = ret.filters[0] for term in terms: if term.protocol[0] == 'icmp': term.stateless_reply = True srx = junipersrx.JuniperSRX(ret, EXP_INFO) output = str(srx) self.assertIn('policy good-term-1 {', output, output) self.assertNotIn('policy icmp_response-term {', output, output) def testNoVerbose(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(GOOD_HEADER_NOVERBOSE + GOOD_TERM_1, self.naming) srx = junipersrx.JuniperSRX(pol, EXP_INFO) self.assertNotIn('This is a test acl with a comment', str(srx)) self.assertNotIn('very very very', str(srx)) def testDropUndefinedAddressbookTermsV4ForV6Render(self): # V4-only term should be dropped when rendering ACL as V6 - b/172933068 udon = [nacaddr.IP('10.0.0.2/32', token='UDON')] self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [udon] # GOOD_HEADER_4 specifies V6 rendering pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('good_term_21', output) def testDeleteV4AddressEntriesForV6Render(self): # Confirm V4 address book entries are not generated when rendering as V6 udon = [nacaddr.IP('10.0.0.2/32', token='UDON')] self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [udon] # GOOD_HEADER_4 specifies V6 rendering pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('10.0.0.2/32', output) def testDropUndefinedAddressbookTermsV6ForV4Render(self): # V6-only term should be dropped when rendering ACL as V4 - b/172933068 udon = [nacaddr.IP('2001:4860:8000::5/128', token='UDON')] self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [udon] # GOOD_HEADER_3 specifies V4 rendering pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('good_term_21', output) def testDeleteV6AddressEntriesForV4Render(self): # Confirm V6 address book entries are not generated when rendering as V4 udon = [nacaddr.IP('2001:4860:8000::5/128', token='UDON')] self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [udon] # GOOD_HEADER_3 specifies V4 rendering pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) self.assertNotIn('2001:4860:8000::5/128', output) def testCreateV6AddressEntriesForMixedRender(self): # V6-only 1024+ IPs; MIXED rendering # Confirm that address set names used in policy are also created in # address book # TODO(nitb) Move multiple IP networks generation logic to separate method # and reuse in other tests overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('3051:abd2:5400::9/128'), nacaddr.IP('aee2:37ba:3cc0::3/128'), nacaddr.IP('6f5d:abd2:1403::1/128'), nacaddr.IP('577e:5400:3051::6/128'), nacaddr.IP('af22:32d2:3f00::2/128') ] ips = list( nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/117').subnets( new_prefix=128)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER = MIXED rendering pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 9 terms in the ACL by checking if # partial_pruned_acl contains exactly 9 elements self.assertEqual(len(partial_pruned_acl), 9) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testCreateV6AddressEntriesForV6Render(self): # V6-only 1024+ IPs; V6 rendering overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('3051:abd2:5400::9/128'), nacaddr.IP('aee2:37ba:3cc0::3/128'), nacaddr.IP('6f5d:abd2:1403::1/128'), nacaddr.IP('577e:5400:3051::6/128'), nacaddr.IP('af22:32d2:3f00::2/128') ] ips = list( nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/117').subnets( new_prefix=128)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER_4 = V6 rendering pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 9 terms in the ACL by checking if # partial_pruned_acl contains exactly 9 elements self.assertEqual(len(partial_pruned_acl), 9) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testEmptyACLEmptyAddressBookV6IpsV4Render(self): # V6-only 1024+ IPs; V4 rendering overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128'), nacaddr.IP('3051:abd2:5400::9/128'), nacaddr.IP('aee2:37ba:3cc0::3/128'), nacaddr.IP('6f5d:abd2:1403::1/128'), nacaddr.IP('577e:5400:3051::6/128'), nacaddr.IP('af22:32d2:3f00::2/128') ] ips = list( nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/117').subnets( new_prefix=128)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER_3 = V4 rendering pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) address_set_count = output.count('address') # verify acl is empty self.assertNotIn('policy', output) # verify address book is empty self.assertEqual(address_set_count, 1) def testCreateV4AddressEntriesForMixedRender(self): # V4-only 1024+ IPs; MIXED rendering overflow_ips = [ nacaddr.IP('23.2.3.3/32'), nacaddr.IP('54.2.3.4/32'), nacaddr.IP('76.2.3.5/32'), nacaddr.IP('132.2.3.6/32'), nacaddr.IP('197.2.3.7/32') ] ips = list(nacaddr.IP('10.0.8.0/21').subnets(new_prefix=32)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER = MIXED rendering pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 3 terms in the ACL by checking if # partial_pruned_acl contains exactly 3 elements self.assertEqual(len(partial_pruned_acl), 3) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testEmptyACLEmptyAddressBookV4IpsV6Render(self): # V4-only 1024+ IPs; V6 rendering overflow_ips = [ nacaddr.IP('23.2.3.3/32'), nacaddr.IP('54.2.3.4/32'), nacaddr.IP('76.2.3.5/32'), nacaddr.IP('132.2.3.6/32'), nacaddr.IP('197.2.3.7/32') ] ips = list(nacaddr.IP('10.0.8.0/21').subnets(new_prefix=32)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER_4 = V6 rendering pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) address_set_count = output.count('address') # verify acl is empty self.assertNotIn('policy', output) # verify address book is empty self.assertEqual(address_set_count, 1) def testCreateV4AddressEntriesForV4Render(self): # V4-only 1024+ IPs; V4 rendering overflow_ips = [ nacaddr.IP('23.2.3.3/32'), nacaddr.IP('54.2.3.4/32'), nacaddr.IP('76.2.3.5/32'), nacaddr.IP('132.2.3.6/32'), nacaddr.IP('197.2.3.7/32') ] ips = list(nacaddr.IP('10.0.8.0/21').subnets(new_prefix=32)) mo_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: mo_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips+mo_ips] # GOOD_HEADER_3 = V4 rendering pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 3 terms in the ACL by checking if # partial_pruned_acl contains exactly 3 elements self.assertEqual(len(partial_pruned_acl), 3) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testCreateMixedAddressEntriesForMixedRender(self): # 513V6 and 512V4 IPs; MIXED rendering overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128') ] ips = list(nacaddr.IP('10.0.8.0/22').subnets(new_prefix=32)) v4_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v4_ips.append(nacaddr.IP(ip)) counter += 1 ips = list( nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/118').subnets( new_prefix=128)) v6_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v6_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips + v4_ips + v6_ips] # GOOD_HEADER = MIXED rendering pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 6 terms in the ACL by checking if # partial_pruned_acl contains exactly 6 elements self.assertEqual(len(partial_pruned_acl), 6) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testCreateV6AddressEntriesForV6Render2(self): # 513V6 and 512V4 IPs; V6 rendering overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128') ] ips = list(nacaddr.IP('10.0.8.0/22').subnets(new_prefix=32)) v4_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v4_ips.append(nacaddr.IP(ip)) counter += 1 ips = list(nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/118').subnets(new_prefix=128)) v6_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v6_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips + v4_ips + v6_ips] # GOOD_HEADER_4 = V6 rendering pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there are exactly 5 terms in the ACL by checking if # partial_pruned_acl contains exactly 5 elements self.assertEqual(len(partial_pruned_acl), 5) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testCreateV4AddressEntriesForV4Render2(self): # 513V6 and 512V4 IPs; V4 rendering overflow_ips = [ nacaddr.IP('2001:4860:8000::5/128') ] ips = list(nacaddr.IP('10.0.8.0/22').subnets(new_prefix=32)) v4_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v4_ips.append(nacaddr.IP(ip)) counter += 1 ips = list(nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/118').subnets(new_prefix=128)) v6_ips = [] counter = 0 for ip in ips: if counter % 2 == 0: v6_ips.append(nacaddr.IP(ip)) counter += 1 self.naming.GetServiceByProto.side_effect = [['25', '25']] self.naming.GetNetAddr.side_effect = [overflow_ips + v4_ips + v6_ips] # GOOD_HEADER_3 = V4 rendering pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_21, self.naming) output = str(junipersrx.JuniperSRX(pol, EXP_INFO)) # extract address-set-names referenced in policy blocks partial_pruned_acl = output.split('replace: policies')[1].split( 'destination-address [ ')[1:] # verify that there is only one term in the ACL by checking if # partial_pruned_acl contains only one element self.assertEqual(len(partial_pruned_acl), 1) for text in partial_pruned_acl: address_set_name = text.split(' ];')[0] if address_set_name: address_set_count = output.count(address_set_name) # check if each addresssetname referenced in policy occurs more than # once i.e. is defined in the address book self.assertGreater(address_set_count, 1) def testEmptyApplications(self): self.naming.GetNetAddr.return_value = _IPSET # GOOD_HEADER_3 doesn't matter, any valid header should do pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_23, self.naming) p = junipersrx.JuniperSRX(pol, EXP_INFO) output = p._GenerateApplications() pattern = re.compile(r'delete: applications;') self.assertTrue(pattern.search(str(''.join(output))), ''.join(output)) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/k8s_test.py000066400000000000000000000533641437377527500172110ustar00rootroot00000000000000# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for K8s NetworkPolicy rendering module.""" from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import k8s from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy import yaml GOOD_HEADER = """ header { comment:: "The general policy comment." target:: k8s } """ GOOD_HEADER_INGRESS = """ header { comment:: "The general policy comment." target:: k8s INGRESS } """ GOOD_HEADER_EGRESS = """ header { comment:: "The general policy comment." target:: k8s EGRESS } """ GOOD_TERM = """ term good-term-1 { owner:: myself comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_CUSTOM_NAME = """ term %s { owner:: myself comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_CUSTOM_PROTO = """ term custom-proto-term { owner:: myself comment:: "custom proto term" source-address:: CORP_EXTERNAL protocol:: %s action:: accept } """ GOOD_TERM_PROTO_ALL = """ term good-term-2 { owner:: myself comment:: "DNS access from corp." source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp sctp action:: accept } """ GOOD_TERM_ALLOW_ALL_TCP = """ term good-term-3 { owner:: myself comment:: "DNS access from corp." source-address:: CORP_EXTERNAL protocol:: tcp action:: accept } """ GOOD_TERM_EGRESS = """ term good-term-4 { comment:: "DNS access from corp." destination-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_TERM_EXPIRED = """ term good-term-expired { comment:: "Management access from corp." expiration:: 2001-01-01 source-address:: CORP_EXTERNAL destination-tag:: ssh-servers destination-port:: SSH protocol:: tcp action:: accept } """ GOOD_TERM_EXCLUDE_SOURCE = """ term good-term-exclude-source { comment:: "term with source exclusions" source-address:: ANY_IPS source-exclude:: TEST_IPS protocol:: tcp action:: accept } """ GOOD_TERM_EXCLUDE_DEST = """ term good-term-exclude-destination { comment:: "term with destination exclusions" destination-address:: ANY_IPS destination-exclude:: TEST_IPS protocol:: tcp action:: accept } """ DEFAULT_DENY = """ term default-deny { comment:: "default_deny." action:: deny } """ BAD_TERM_DENY = """ term bad-term-1 { comment:: "explicit deny" source-address:: CORP_EXTERNAL protocol:: tcp action:: deny } """ BAD_TERM_INVALID_SOURCE_EXCLUDE = """ term bad-term-2 { comment:: "source exclude without source address" source-exclude:: CORP_EXTERNAL protocol:: tcp action:: accept } """ BAD_TERM_NO_ADDR = """ term bad-term-3 { comment:: "ingress no source" protocol:: tcp action:: accept } """ BAD_TERM_INGRESS_DESTINATION = """ term bad-term-4 { comment:: "source exclude without source address" source-address:: CORP_EXTERNAL destination-address:: CORP_EXTERNAL protocol:: tcp action:: accept } """ BAD_TERM_SOURCE_PORT = """ term bad-term-5 { comment:: "source port restriction" source-address:: CORP_EXTERNAL source-port:: DNS protocol:: udp action:: accept } """ BAD_TERM_EMPTY_SOURCE = """ term bad-term-6 { comment:: "empty source address after flattening" source-address:: CORP_EXTERNAL source-exclude:: CORP_EXTERNAL protocol:: tcp action:: accept } """ BAD_TERM_EMPTY_DEST = """ term bad-term-7 { comment:: "empty destination address after flattening" destination-address:: CORP_EXTERNAL destination-exclude:: CORP_EXTERNAL protocol:: tcp action:: accept } """ VALID_TERM_NAMES = [ 'gcp-to-gcp', 'default-deny', 'google-web', 'zo6hmxkfibardh6tgbiy7ua6', 'http.frontend.web.com', ] INVALID_TERM_NAMES = [ 'CAPS-ARE-NOT-VALID', '_underscores_', 'mIxEdCaSe', 'an-otherwise-valid-term-ending-in-a-dash-', ] SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_tag', 'expiration', 'stateless_reply', 'name', 'owner', 'protocol', 'source_address', 'source_address_exclude', 'translated', 'platform', 'platform_exclude', } SUPPORTED_SUB_TOKENS = {'action': {'accept', 'deny'}} SUPPORTED_PROTOS = ['tcp', 'udp', 'sctp'] UNSUPPORTED_PROTOS = ['igmp', 'pim', 'ah'] # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 TEST_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128')] TEST_INCLUDE_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('10.4.3.2/32')] TEST_EXCLUDE_IPS = [nacaddr.IP('10.4.3.2/32')] TEST_INCLUDE_RANGE = [nacaddr.IP('10.128.0.0/9')] TEST_EXCLUDE_RANGE = [nacaddr.IP('10.240.0.0/16')] ANY_IPS = [nacaddr.IP('0.0.0.0/0'), nacaddr.IP('::/0')] ANY_IPV4 = [nacaddr.IP('0.0.0.0/0')] ANY_IPV6 = [nacaddr.IP('::/0')] TEST_IPV4_ONLY = [nacaddr.IP('10.2.3.4/32')] TEST_IPV6_ONLY = [nacaddr.IP('2001:4860:8000::5/128')] class K8sTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testGenericTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] expected = { 'apiVersion': k8s.K8s._API_VERSION, 'kind': k8s.K8s._RESOURCE_KIND, 'items': [{ 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'good-term-1', 'annotations': { 'owner': 'myself', 'comment': 'DNS access from corp.' }, }, 'spec': { 'podSelector': {}, 'policyTypes': ['Ingress'], 'ingress': [{ 'from': [{ 'ipBlock': { 'cidr': '10.2.3.4/32' } }, { 'ipBlock': { 'cidr': '2001:4860:8000::5/128' } }], 'ports': [{ 'port': 53, 'protocol': 'UDP' }, { 'port': 53, 'protocol': 'TCP' }], }] }, }] } acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) self.assertDictEqual(expected, policy_list) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testGenericEgressTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] expected = { 'apiVersion': k8s.K8s._API_VERSION, 'kind': k8s.K8s._RESOURCE_KIND, 'items': [{ 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'good-term-4-e', 'annotations': { 'comment': 'DNS access from corp.' }, }, 'spec': { 'podSelector': {}, 'policyTypes': ['Egress'], 'egress': [{ 'to': [{ 'ipBlock': { 'cidr': '10.2.3.4/32' } }, { 'ipBlock': { 'cidr': '2001:4860:8000::5/128' } }], 'ports': [{ 'port': 53, 'protocol': 'UDP' }, { 'port': 53, 'protocol': 'TCP' }], }] }, }] } acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EGRESS, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) self.assertDictEqual(expected, policy_list) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testAllProtosTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53'], ['53']] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_PROTO_ALL, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertLen(policies, 1) net_policy = policies[0] self.assertLen(net_policy['spec']['ingress'], 1) ingress_rule = net_policy['spec']['ingress'][0] self.assertLen(ingress_rule['ports'], 3) unique_protos = { port_selector['protocol'] for port_selector in ingress_rule['ports'] } self.assertSetEqual({'UDP', 'TCP', 'SCTP'}, unique_protos) def testPortRangeTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['0-1024'] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertLen(policies, 1) net_policy = policies[0] self.assertLen(net_policy['spec']['ingress'], 1) ingress_rule = net_policy['spec']['ingress'][0] self.assertLen(ingress_rule['ports'], 2) expected = {'endPort': 1024, 'port': 0} for port_selector in ingress_rule['ports']: self.assertEqual(port_selector, {**port_selector, **expected}) def testAllowAllTcpTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['53'] expected_ingress_ports = [{'protocol': 'TCP'}] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_ALLOW_ALL_TCP, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertLen(policies, 1) net_policy = policies[0] self.assertLen(net_policy['spec']['ingress'], 1) ingress_rule = net_policy['spec']['ingress'][0] self.assertLen(ingress_rule['ports'], 1) self.assertSequenceEqual(ingress_rule['ports'], expected_ingress_ports) def testDefaultDenyTerm(self): expected = { 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'default-deny', 'annotations': { 'comment': 'default_deny.' }, }, 'spec': { 'podSelector': {}, 'policyTypes': ['Ingress'] }, } acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + DEFAULT_DENY, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertDictEqual(expected, policies[0]) def testDefaultDenyEgressTerm(self): expected = { 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'default-deny-e', 'annotations': { 'comment': 'default_deny.' }, }, 'spec': { 'podSelector': {}, 'policyTypes': ['Egress'] }, } acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER_EGRESS + DEFAULT_DENY, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertLen(policies, 1) self.assertDictEqual(expected, policies[0]) def testBadDenyTerm(self): self.assertRaisesRegex( k8s.K8sNetworkPolicyError, 'not support explicit deny', k8s.K8s, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_DENY, self.naming), EXP_INFO) def testBadSourceExclusionTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( k8s.K8sNetworkPolicyError, 'missing required field', k8s.K8s, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_INVALID_SOURCE_EXCLUDE, self.naming), EXP_INFO) def testBadIngressNoAddressTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( k8s.K8sNetworkPolicyError, 'missing required field.+source', k8s.K8s, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_NO_ADDR, self.naming), EXP_INFO) def testBadEgressNoAddressTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.assertRaisesRegex( k8s.K8sNetworkPolicyError, 'missing required field.+destination', k8s.K8s, policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_NO_ADDR, self.naming), EXP_INFO) @parameterized.named_parameters( { 'testcase_name': 'IPv4', 'ip_block_cidr': TEST_INCLUDE_RANGE, 'ip_block_exclude': TEST_EXCLUDE_RANGE, }, { 'testcase_name': 'IPv6', 'ip_block_cidr': ANY_IPV6, 'ip_block_exclude': TEST_IPV6_ONLY, }, { 'testcase_name': 'MultiExclude', 'ip_block_cidr': TEST_INCLUDE_IPS, 'ip_block_exclude': TEST_EXCLUDE_RANGE + TEST_EXCLUDE_IPS, }) def testGoodSourceAddressExcludeTerm(self, ip_block_cidr, ip_block_exclude): expected_peer_specs = [] expected_peer_spec_except = [str(ex) for ex in ip_block_exclude[::-1]] for ip in ip_block_cidr: expected_peer_specs.append( {'ipBlock': { 'cidr': str(ip), 'except': expected_peer_spec_except }}) expected = { 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'good-term-exclude-source', 'annotations': { 'comment': 'term with source exclusions' }, }, 'spec': { 'ingress': [{ 'from': expected_peer_specs, 'ports': [{ 'protocol': 'TCP' }], }], 'podSelector': {}, 'policyTypes': ['Ingress'], }, } self.naming.GetNetAddr.side_effect = [ip_block_cidr, ip_block_exclude] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXCLUDE_SOURCE, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertDictEqual(expected, policies[0]) @parameterized.named_parameters( { 'testcase_name': 'IPv4', 'ip_block_cidr': TEST_INCLUDE_RANGE, 'ip_block_exclude': TEST_EXCLUDE_RANGE, }, { 'testcase_name': 'IPv6', 'ip_block_cidr': ANY_IPV6, 'ip_block_exclude': TEST_IPV6_ONLY, }, { 'testcase_name': 'MultiExclude', 'ip_block_cidr': TEST_INCLUDE_IPS, 'ip_block_exclude': TEST_EXCLUDE_RANGE + TEST_EXCLUDE_IPS, }) def testGoodDestAddressExcludeTerm(self, ip_block_cidr, ip_block_exclude): expected_peer_specs = [] expected_peer_spec_except = [str(ex) for ex in ip_block_exclude[::-1]] for ip in ip_block_cidr: expected_peer_specs.append( {'ipBlock': { 'cidr': str(ip), 'except': expected_peer_spec_except }}) expected = { 'apiVersion': k8s.Term._API_VERSION, 'kind': k8s.Term._RESOURCE_KIND, 'metadata': { 'name': 'good-term-exclude-destination-e', 'annotations': { 'comment': 'term with destination exclusions' }, }, 'spec': { 'egress': [{ 'to': expected_peer_specs, 'ports': [{ 'protocol': 'TCP' }], }], 'podSelector': {}, 'policyTypes': ['Egress'], }, } self.naming.GetNetAddr.side_effect = [ip_block_cidr, ip_block_exclude] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM_EXCLUDE_DEST, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) policies = policy_list['items'] self.assertDictEqual(expected, policies[0]) def testBadSourceAddressExcludeTerm(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + BAD_TERM_EMPTY_SOURCE, self.naming), EXP_INFO) self.assertEqual(str(acl), '') def testBadDestinationAddressExcludeTerm(self): self.naming.GetNetAddr.return_value = TEST_IPV4_ONLY acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER_EGRESS + BAD_TERM_EMPTY_DEST, self.naming), EXP_INFO) self.assertEqual(str(acl), '') def testBadSourcePortTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53']] self.assertRaisesRegex( k8s.K8sNetworkPolicyError, 'not support source port', k8s.K8s, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_SOURCE_PORT, self.naming), EXP_INFO) def testBadIngressDestinationTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53']] self.assertRaisesRegex( k8s.K8sNetworkPolicyError, '[Ii]ngress rules cannot include.+destination', k8s.K8s, policy.ParsePolicy(GOOD_HEADER + BAD_TERM_INGRESS_DESTINATION, self.naming), EXP_INFO) def testBadEgressSourceTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.assertRaisesRegex( k8s.K8sNetworkPolicyError, '[Ee]gress rules cannot include.+source', k8s.K8s, policy.ParsePolicy(GOOD_HEADER_EGRESS + GOOD_TERM, self.naming), EXP_INFO) def testValidTermNames(self): for name in VALID_TERM_NAMES: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_NAME % name, self.naming) acl = k8s.K8s(pol, EXP_INFO) self.assertIsNotNone(str(acl)) def testInvalidTermNames(self): for name in INVALID_TERM_NAMES: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_NAME % name, self.naming) self.assertRaisesRegex(k8s.K8sNetworkPolicyError, 'name %s is not valid' % name, k8s.K8s, pol, EXP_INFO) def testSkipExpiredTerm(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_EXPIRED, self.naming), EXP_INFO) self.assertEqual(str(acl), '') self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSkipStatelessReply(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] # Add stateless_reply to terms, there is no current way to include it in the # term definition. ret = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming) _, terms = ret.filters[0] for term in terms: term.stateless_reply = True acl = k8s.K8s(ret, EXP_INFO) self.assertEqual(str(acl), '') self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') self.naming.GetServiceByProto.assert_has_calls( [mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testValidTermProtos(self): for proto in SUPPORTED_PROTOS: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['53'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_PROTO % proto, self.naming) acl = k8s.K8s(pol, EXP_INFO) self.assertIsNotNone(str(acl)) def testInvalidTermProtos(self): for proto in UNSUPPORTED_PROTOS: self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['53'] pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_CUSTOM_PROTO % proto, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, k8s.K8s, pol, EXP_INFO) def testMultipleTerms(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['53'] acl = k8s.K8s( policy.ParsePolicy(GOOD_HEADER + GOOD_TERM + GOOD_TERM_ALLOW_ALL_TCP, self.naming), EXP_INFO) policy_list = yaml.safe_load(str(acl)) self.assertLen(policy_list['items'], 2) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/nacaddr_test.py000066400000000000000000000311671437377527500200750ustar00rootroot00000000000000# Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for nacaddr.py module.""" from absl.testing import absltest from capirca.lib import nacaddr class NacaddrUnitTest(absltest.TestCase): """Unit Test for nacaddr.py. nacaddr class extends ipaddr by adding .text fields to allow comments for each of the IPv4 and IPv6 classes. """ def setUp(self): super().setUp() self.addr1 = nacaddr.IPv4(u'10.0.0.0/8', 'The 10 block') self.addr2 = nacaddr.IPv6('DEAD:BEEF:BABE:FACE:DEAF:FEED:C0DE:F001/64', 'An IPv6 Address', strict=False) def testCollapsing(self): ip1 = nacaddr.IPv4(u'1.1.0.0/24', 'foo') ip2 = nacaddr.IPv4(u'1.1.1.0/24', 'foo') ip3 = nacaddr.IPv4(u'1.1.2.0/24', 'baz') ip4 = nacaddr.IPv4(u'1.1.3.0/24') ip5 = nacaddr.IPv4(u'1.1.4.0/24') # stored in no particular order b/c we want CollapseAddr to call [].sort # and we want that sort to call nacaddr.IP.__cmp__() on our array members ip6 = nacaddr.IPv4(u'1.1.0.0/22') # check that addreses are subsumed properlly. collapsed = nacaddr.CollapseAddrList([ip1, ip2, ip3, ip4, ip5, ip6]) self.assertEqual(len(collapsed), 2) # test that the comments are collapsed properlly, and that comments aren't # added to addresses that have no comments. self.assertListEqual([collapsed[0].text, collapsed[1].text], ['foo, baz', '']) self.assertListEqual(collapsed, [nacaddr.IPv4(u'1.1.0.0/22'), nacaddr.IPv4(u'1.1.4.0/24')]) # test that two addresses are supernet'ed properlly collapsed = nacaddr.CollapseAddrList([ip1, ip2]) self.assertEqual(len(collapsed), 1) self.assertEqual(collapsed[0].text, 'foo') self.assertListEqual(collapsed, [nacaddr.IPv4(u'1.1.0.0/23')]) ip_same1 = ip_same2 = nacaddr.IPv4(u'1.1.1.1/32') self.assertListEqual(nacaddr.CollapseAddrList([ip_same1, ip_same2]), [ip_same1]) ip1 = nacaddr.IPv6(u'::2001:1/100', strict=False) ip2 = nacaddr.IPv6(u'::2002:1/120', strict=False) ip3 = nacaddr.IPv6(u'::2001:1/96', strict=False) # test that ipv6 addresses are subsumed properlly. collapsed = nacaddr.CollapseAddrList([ip1, ip2, ip3]) self.assertListEqual(collapsed, [ip3]) def testNacaddrV4Comment(self): self.assertEqual(self.addr1.text, 'The 10 block') def testNacaddrV6Comment(self): self.assertEqual(self.addr2.text, 'An IPv6 Address') def testSupernetting(self): self.assertEqual(self.addr1.Supernet().text, 'The 10 block') self.assertEqual(self.addr2.Supernet().text, 'An IPv6 Address') self.assertEqual(self.addr1.Supernet().prefixlen, 7) self.assertEqual(self.addr2.Supernet().prefixlen, 63) token_ip = nacaddr.IP('1.1.1.0/24', token='FOO_TOKEN') self.assertEqual(token_ip.Supernet().token, 'FOO_TOKEN') self.assertEqual(nacaddr.IP('0.0.0.0/0').Supernet(), nacaddr.IP('0.0.0.0/0')) self.assertEqual(nacaddr.IP('::0/0').Supernet(), nacaddr.IP('::0/0')) self.assertRaises(nacaddr.PrefixlenDiffInvalidError, nacaddr.IP('1.1.1.0/24').Supernet, 25) self.assertRaises(nacaddr.PrefixlenDiffInvalidError, nacaddr.IP('::1/64', strict=False).Supernet, 65) def testAddressListExclusion(self): a1 = nacaddr.IPv4('1.1.1.0/24') a2 = nacaddr.IPv4('10.0.0.0/24') b1 = nacaddr.IPv4('1.1.1.1/32') b2 = nacaddr.IPv4('10.0.0.25/32') b3 = nacaddr.IPv4('192.168.0.0/16') expected = [nacaddr.IPv4('1.1.1.0/32'), nacaddr.IPv4('1.1.1.2/31'), nacaddr.IPv4('1.1.1.4/30'), nacaddr.IPv4('1.1.1.8/29'), nacaddr.IPv4('1.1.1.16/28'), nacaddr.IPv4('1.1.1.32/27'), nacaddr.IPv4('1.1.1.64/26'), nacaddr.IPv4('1.1.1.128/25'), nacaddr.IPv4('10.0.0.0/28'), nacaddr.IPv4('10.0.0.16/29'), nacaddr.IPv4('10.0.0.24/32'), nacaddr.IPv4('10.0.0.26/31'), nacaddr.IPv4('10.0.0.28/30'), nacaddr.IPv4('10.0.0.32/27'), nacaddr.IPv4('10.0.0.64/26'), nacaddr.IPv4('10.0.0.128/25')] self.assertListEqual(nacaddr.AddressListExclude([a1, a2], [b1, b2, b3]), expected) # [1,2,3] + [4,5,6] = [1,2,3,4,5,6]. this is basically the same test as # above but i think it's a little more readable expected_two = list(a1.address_exclude(b1)) expected_two.extend(a2.address_exclude(b2)) self.assertListEqual(nacaddr.AddressListExclude([a1, a2], [b1, b2, b3]), sorted(expected_two)) def testComplexAddressListExcludesion(self): # this is a big fugly test. there was a bug in AddressListExclude # which manifested itself when more than one member of the excludes # list was a part of the same superset token. # # for example, it used to be like so: # excludes = ['1.1.1.1/32', '1.1.1.2/32'] # superset = ['1.1.1.0/30'] # # '1.1.1.0/30'.AddressExclude('1.1.1.1/32') -> # ['1.1.1.0/32', '1.1.1.2/32', '1.1.1.3/32'] # '1.1.1.0/30'.AddressExclude('1.1.1.2/32') -> # ['1.1.1.0/32', '1.1.1.1/32', '1.1.1.3/32'] # # yet combining those two results gives you # ['1.1.1.0/32', '1.1.1.1/32', '1.1.1.2/32' '1.1.1.3/32'], or # '1.1.1.0/30', which clearly isn't right. excludes = [nacaddr.IPv4('10.0.0.0/23'), nacaddr.IPv4('10.1.0.0/16')] superset = [nacaddr.IPv4('0.0.0.0/0')] expected = [nacaddr.IPv4('0.0.0.0/5'), nacaddr.IPv4('8.0.0.0/7'), nacaddr.IPv4('10.0.2.0/23'), nacaddr.IPv4('10.0.4.0/22'), nacaddr.IPv4('10.0.8.0/21'), nacaddr.IPv4('10.0.16.0/20'), nacaddr.IPv4('10.0.32.0/19'), nacaddr.IPv4('10.0.64.0/18'), nacaddr.IPv4('10.0.128.0/17'), nacaddr.IPv4('10.2.0.0/15'), nacaddr.IPv4('10.4.0.0/14'), nacaddr.IPv4('10.8.0.0/13'), nacaddr.IPv4('10.16.0.0/12'), nacaddr.IPv4('10.32.0.0/11'), nacaddr.IPv4('10.64.0.0/10'), nacaddr.IPv4('10.128.0.0/9'), nacaddr.IPv4('11.0.0.0/8'), nacaddr.IPv4('12.0.0.0/6'), nacaddr.IPv4('16.0.0.0/4'), nacaddr.IPv4('32.0.0.0/3'), nacaddr.IPv4('64.0.0.0/2'), nacaddr.IPv4('128.0.0.0/1')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseOne(self): # Small block eliminated by large block, and an extra block that stays. # For both IP versions. superset = [nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv4('10.1.0.0/24'), nacaddr.IPv6('200::/56'), nacaddr.IPv6('10:1::/56')] excludes = [nacaddr.IPv6('10::/16'), nacaddr.IPv4('10.0.0.0/8')] expected = [nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv6('200::/56')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseTwo(self): # Two blocks out of the middle of a large block. superset = [nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv4('10.0.0.0/8'), nacaddr.IPv6('200::/56'), nacaddr.IPv6('10::/16')] excludes = [nacaddr.IPv6('10:8000::/18'), nacaddr.IPv6('10:4000::/18'), nacaddr.IPv4('10.128.0.0/10'), nacaddr.IPv4('10.64.0.0/10')] expected = [nacaddr.IPv4('10.0.0.0/10'), nacaddr.IPv4('10.192.0.0/10'), nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv6('10::/18'), nacaddr.IPv6('10:c000::/18'), nacaddr.IPv6('200::/56')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseThree(self): # Two blocks off both ends of a large block. superset = [nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv4('10.0.0.0/8'), nacaddr.IPv6('200::/56'), nacaddr.IPv6('10::/16')] excludes = [nacaddr.IPv6('10::/18'), nacaddr.IPv6('10:c000::/18'), nacaddr.IPv4('10.0.0.0/10'), nacaddr.IPv4('10.192.0.0/10')] expected = [nacaddr.IPv4('10.64.0.0/10'), nacaddr.IPv4('10.128.0.0/10'), nacaddr.IPv4('200.0.0.0/24'), nacaddr.IPv6('10:4000::/18'), nacaddr.IPv6('10:8000::/18'), nacaddr.IPv6('200::/56')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseFour(self): # IPv6 does not affect IPv4 superset = [nacaddr.IPv4('0.0.0.0/0')] excludes = [nacaddr.IPv6('::/0')] expected = [nacaddr.IPv4('0.0.0.0/0')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseFive(self): # IPv6 does not affect IPv4 superset = [nacaddr.IPv6('::/0')] excludes = [nacaddr.IPv4('0.0.0.0/0')] expected = [nacaddr.IPv6('::/0')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testAddressListExcludeCaseSix(self): # IPv6 does not affect IPv4 superset = [nacaddr.IPv6('0::ffff:0.0.0.0/96')] excludes = [nacaddr.IPv4('0.0.0.0/0')] expected = [nacaddr.IPv6('0::ffff:0.0.0.0/96')] self.assertListEqual(nacaddr.AddressListExclude(superset, excludes), expected) def testCollapseAddrListPreserveTokens(self): addr_list = [nacaddr.IPv4('10.0.1.7/32', token='BIZ'), nacaddr.IPv4('192.168.1.10/32', token='ALSOUNDERSUPER'), nacaddr.IPv4('10.0.0.6/32', token='FOO'), nacaddr.IPv4('10.0.0.9/32', token='BAR'), nacaddr.IPv4('10.0.0.8/32', token='FOO'), nacaddr.IPv4('10.0.0.7/32', token='BAR'), nacaddr.IPv4('192.168.1.1/24', token='SUPER', strict=False), nacaddr.IPv4('10.0.1.6/32', token='BIZ'), nacaddr.IPv4('192.168.1.7/31', token='UNDERSUPER', strict=False) ] expected = [nacaddr.IPv4('10.0.0.7/32', token='BAR'), nacaddr.IPv4('10.0.0.9/32', token='BAR'), nacaddr.IPv4('10.0.1.6/31', token='BIZ'), nacaddr.IPv4('10.0.0.6/32', token='FOO'), nacaddr.IPv4('10.0.0.8/32', token='FOO'), nacaddr.IPv4('192.168.1.1/24', token='SUPER', strict=False)] collapsed = nacaddr.CollapseAddrListPreserveTokens(addr_list) self.assertListEqual(collapsed, expected) def testIsSupernet(self): addrs1 = [nacaddr.IPv4('10.0.1.7/32'), nacaddr.IPv4('10.0.1.2/32')] addrs2 = [nacaddr.IPv4('10.0.1.0/24')] addrs3 = [nacaddr.IPv4('10.0.1.7/32'), nacaddr.IPv4('10.1.1.2/32')] addrs4 = [nacaddr.IPv4('192.168.1.1/32', nacaddr.IPv4('172.0.0.1/32'))] addrs5 = [nacaddr.IPv4('192.168.1.1/24', strict=False), nacaddr.IPv4('10.0.1.0/24')] self.assertTrue(nacaddr.IsSuperNet(addrs2, addrs1)) self.assertFalse(nacaddr.IsSuperNet(addrs2, addrs3)) self.assertFalse(nacaddr.IsSuperNet(addrs2, addrs4)) self.assertFalse(nacaddr.IsSuperNet(addrs2, addrs5)) self.assertTrue(nacaddr.IsSuperNet(addrs5, addrs2)) def testSafeCollapsing(self): test_data = [([nacaddr.IPv4('10.0.0.0/8'), nacaddr.IPv4('10.0.0.0/10')], [nacaddr.IPv4('10.0.0.0/9')], [nacaddr.IPv4('10.0.0.0/8'), nacaddr.IPv4('10.0.0.0/10')]), ([nacaddr.IPv4('192.168.0.0/27'), nacaddr.IPv4('192.168.0.0/24')], [nacaddr.IPv4('192.168.1.0/24')], [nacaddr.IPv4('192.168.0.0/24')]), ([nacaddr.IPv6('10::/56'), nacaddr.IPv6('10::/128')], [nacaddr.IPv6('10::/64')], [nacaddr.IPv6('10::/56'), nacaddr.IPv6('10::/128')]), ([nacaddr.IPv6('10::/128'), nacaddr.IPv6('10::/56')], [nacaddr.IPv6('8::/64')], [nacaddr.IPv6('10::/56')]) ] for addresses, complement_addresses, result in test_data: self.assertEqual(nacaddr.CollapseAddrList(addresses, complement_addresses), result) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/naming_test.py000066400000000000000000000170141437377527500177450ustar00rootroot00000000000000# Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for naming.py module.""" from absl.testing import absltest from capirca.lib import nacaddr from capirca.lib import naming class NamingUnitTest(absltest.TestCase): """Unit Test for naming.py. The Naming class allows us to specify if we want to use arrays of text instead of files. Most of the tests below create an empty Naming class. To populate the class with data, we simply pass our test data in arrays to the ParseList method, or in some cases, pass an io.BytesIO stream. """ def setUp(self): super().setUp() self.defs = naming.Naming(None) servicedata = [] servicedata.append('SVC1 = 80/tcp 81/udp 82/tcp') servicedata.append('SVC2 = 80/tcp 81/udp 82/tcp SVC2') servicedata.append('SVC3 = 80/tcp 81/udp') servicedata.append('SVC4 = 80/tcp # some service') servicedata.append('TCP_90 = 90/tcp') servicedata.append('SVC5 = TCP_90') servicedata.append('SVC6 = SVC1 SVC5') networkdata = [] networkdata.append('NET1 = 10.1.0.0/8 # network1') networkdata.append('NET2 = 10.2.0.0/16 # network2.0') networkdata.append(' NET1') networkdata.append('9OCLOCK = 1.2.3.4/32 # 9 is the time') networkdata.append('FOOBAR = 9OCLOCK') networkdata.append('FOO_V6 = ::FFFF:FFFF:FFFF:FFFF') networkdata.append('BAR_V6 = ::1/128') networkdata.append('BAZ = FOO_V6') networkdata.append(' BAR_V6') networkdata.append('BING = NET1 # foo') networkdata.append(' FOO_V6') self.defs.ParseServiceList(servicedata) self.defs.ParseNetworkList(networkdata) def testCommentedServices(self): self.assertEqual(self.defs.GetService('SVC4'), ['80/tcp']) self.assertListEqual(self.defs.GetServiceByProto('SVC4', 'tcp'), ['80']) def testBadGetRequest(self): """Test proper handling of a non-existant service request.""" self.assertRaises(naming.UndefinedServiceError, self.defs.GetService, 'FOO') self.assertRaises(naming.UndefinedServiceError, self.defs.GetServiceByProto, 'FOO', 'tcp') def testGetServiceRecursion(self): """Ensure we don't slip into recursion hell when object contains itself.""" self.assertListEqual(self.defs.GetService('SVC2'), ['80/tcp', '81/udp', '82/tcp']) def testGetService(self): """Verify proper results from a service lookup request.""" self.assertListEqual(self.defs.GetService('SVC1'), ['80/tcp', '81/udp', '82/tcp']) def testBadProtocol(self): """Test proper handling of a non-existant service request.""" self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'fud'), []) def testGetServiceByProto(self): self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'tcp'), ['80', '82']) def testGetServiceByProtoWithoutProtocols(self): """Ensure services with protocol are not returned when type is specified.""" self.assertListEqual(self.defs.GetServiceByProto('SVC3', 'tcp'), ['80']) def testNetworkComment(self): self.assertEqual(self.defs.GetNetAddr('NET1')[0].text, 'network1') def testNestedNetworkComment(self): self.assertEqual(self.defs.GetNetAddr('NET2')[1].text, 'network1') def testUndefinedAddress(self): self.assertRaises(naming.UndefinedAddressError, self.defs.GetNetAddr, 'FOO') def testNamespaceCollisionError(self): badservicedata = [] badservicedata.append('SVC1 = 80/tcp') badservicedata.append('SVC1 = 81/udp') testdefs = naming.Naming(None) self.assertRaises(naming.NamespaceCollisionError, testdefs.ParseServiceList, badservicedata) def testNetworkAddress(self): self.assertListEqual(self.defs.GetNetAddr('NET1'), [nacaddr.IPv4('10.0.0.0/8')]) def testInet6Address(self): self.assertListEqual(self.defs.GetNetAddr('BAZ'), [nacaddr.IPv6('::FFFF:FFFF:FFFF:FFFF'), nacaddr.IPv6('::1/128')]) def testMixedAddresses(self): self.assertListEqual(self.defs.GetNetAddr('BING'), [nacaddr.IPv4('10.0.0.0/8'), nacaddr.IPv6('::FFFF:FFFF:FFFF:FFFF')]) # same thing but letting nacaddr decide which v4 or v6. self.assertListEqual(self.defs.GetNetAddr('BING'), [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('::FFFF:FFFF:FFFF:FFFF')]) def testNestedServices(self): self.assertListEqual(self.defs.GetServiceByProto('SVC6', 'tcp'), ['80', '82', '90']) def testServiceParents(self): """SVC6 contains SVC5 which contains TCP_90 which contains 90/tcp.""" self.assertListEqual(self.defs.GetServiceParents('90/tcp'), ['TCP_90', 'SVC5', 'SVC6']) def testNetParents(self): """BIN & NET2 contain NET1, BING & BAZ contain FOO_V6.""" self.assertListEqual(sorted(self.defs.GetNetParents('NET1')), ['BING', 'NET2']) self.assertListEqual(sorted(self.defs.GetNetParents('FOO_V6')), ['BAZ', 'BING']) def testGetIpParents(self): """Ensure GetIpParents returns proper results.""" self.assertListEqual(self.defs.GetIpParents('10.11.12.13/32'), ['BING', 'NET1', 'NET2']) def testUndefinedTokenNesting(self): bad_servicedata = ['FOO = 7/tcp BAR'] bad_networkdata = ['NETGROUP = 10.0.0.0/8 FOOBAR'] baddefs = naming.Naming(None) baddefs.ParseServiceList(bad_servicedata) baddefs.ParseNetworkList(bad_networkdata) self.assertRaises(naming.UndefinedServiceError, baddefs._CheckUnseen, 'services') self.assertRaises(naming.UndefinedAddressError, baddefs._CheckUnseen, 'networks') def testParseNetFile(self): filedefs = naming.Naming(None) data = ['FOO = 127.0.0.1 # some network\n'] filedefs._ParseFile(data, 'networks') self.assertEqual(filedefs.GetNetAddr('FOO'), [nacaddr.IPv4('127.0.0.1')]) def testParseServiceFile(self): filedefs = naming.Naming(None) data = ['HTTP = 80/tcp\n'] filedefs._ParseFile(data, 'services') self.assertEqual(filedefs.GetService('HTTP'), ['80/tcp']) def testServiceIncorrectSyntax(self): badservicedata = [] badservicedata.append('SVC1 = 80//tcp 80/udp') badservicedata.append('SVC2 = 81/tcp') testdefs = naming.Naming(None) self.assertRaises(naming.NamingSyntaxError, testdefs.ParseServiceList, badservicedata) def testGetNetChildrenSingle(self): expected = ['NET1'] self.assertEqual(expected, self.defs.GetNetChildren('NET2')) def testGetNetChildrenMulti(self): expected = ['FOO_V6', 'BAR_V6'] self.assertEqual(expected, self.defs.GetNetChildren('BAZ')) def testGetNetChildrenQueryNotExist(self): self.assertEqual([], self.defs.GetNetChildren('IDONOTEXIST')) def testGetNetChildrenNoChild(self): self.assertEqual([], self.defs.GetNetChildren('NET1')) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/nftables_test.py000066400000000000000000000556001437377527500202750ustar00rootroot00000000000000# Copyright 2023 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Nftables rendering module.""" import datetime import re from unittest import mock from absl import logging from absl.testing import absltest from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import nftables from capirca.lib import policy class DictObj: """Helper class to use a dictionary of dictionaries to form an object. We can then specifically test using it. """ def __init__(self, in_dict: dict): assert isinstance(in_dict, dict) for key, val in in_dict.items(): if isinstance(val, (list, tuple)): setattr(self, key, [DictObj(x) if isinstance(x, dict) else x for x in val]) else: setattr(self, key, DictObj(val) if isinstance(val, dict) else val) # "logging" is not a token. SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'name', # obj attribute, not token 'option', 'protocol', 'platform', 'platform_exclude', 'source_interface', #input interface 'source_address', 'source_address_exclude', 'source_port', 'destination_interface', #ouput interface 'translated', # obj attribute, not token 'stateless_reply', }) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny'}, 'option': {'established', 'tcp-established'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # IP address data, to be loaded onto policy and test rendering. TEST_IPV4_ONLY = [nacaddr.IP('10.2.3.4/32')] TEST_IPV6_ONLY = [nacaddr.IP('2001:4860:8000::5/128')] TEST_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128')] HEADER_TEMPLATE = """ header { target:: nftables %s } """ HEAD_OVERRIDE_DEFAULT_ACTION = """ header { target:: nftables inet output ACCEPT } """ HEADER_COMMENT = """ header { comment:: "Noverbose + custom priority policy example" target:: nftables inet output ACCEPT } """ HEADER_MIXED_AF = """ header { target:: nftables mixed output } """ HEADER_IPV4_AF = """ header { target:: nftables inet output } """ HEADER_IPV6_AF = """ header { target:: nftables inet6 output } """ HEADER_NOVERBOSE = """ header { target:: nftables mixed output noverbose } """ GOOD_HEADER_1 = """ header { target:: nftables inet6 INPUT } """ GOOD_HEADER_2 = """ header { target:: nftables mixed output accept } """ GOOD_HEADER_3 = """ header { target:: nftables inet input } """ DENY_TERM = """ term deny-term { comment:: "Dual-stack IPv4/v6 deny all" action:: deny } """ # Input interface name test term. SOURCE_INTERFACE_TERM = """ term src-interface-term { source-interface:: eth123 protocol:: tcp action:: accept } """ # Output interface name test term. DESTINATION_INTERFACE_TERM = """ term dst-interface-term { destination-interface:: eth123 protocol:: tcp action:: accept } """ BAD_INTERFACE_TERM = """ term dst-interface-term { source-interface:: eth123 destination-interface:: eth123 protocol:: tcp action:: accept } """ ESTABLISHED_OPTION_TERM = """ term established-term { protocol:: udp option:: established action:: accept } """ TCP_ESTABLISHED_OPTION_TERM = """ term tcp-established-term { protocol:: tcp option:: tcp-established action:: accept } """ ICMP_TERM = """ term good-icmp { protocol:: icmp action:: accept } """ ICMPV6_TERM = """ term good-icmpv6 { protocol:: icmpv6 action:: accept } """ ICMPV6_MULTI_TERM = """ term good-icmpv6-type { comment:: "IPv6 ICMP accept many types" icmp-type:: router-solicit router-advertisement neighbor-advertisement neighbor-solicit protocol:: icmpv6 action:: accept } """ COMMENT_TERM = """ term good-icmpv6-type { comment:: "This term has a comment" protocol:: tcp action:: accept } """ NOCOMMENT_TERM = """ term good-icmpv6-type { protocol:: tcp action:: accept } """ LOGGING_TERM = """ term log-packets { logging:: true action:: accept } """ COUNTER_TERM = """ term count-packets { counter:: thisnameisignored action:: accept } """ COUNT_AND_LOG_TERM = """ term count-and-log-packets { logging:: true counter:: thisnameisignored action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp action:: accept destination-port:: SSH destination-address:: TEST_NET } """ IPV6_ONLY_TERM = """ term ip6-only { destination-address:: TEST_IPV6_ONLY action:: accept } """ IPV6_SRCIP = """ term ip6-src-addr { source-address:: TEST_IPV6_ONLY action:: deny } """ IPV4_SRCIP = """ term ip4-src-addr { source-address:: TEST_IPV4_ONLY action:: deny } """ ALL_SRCIP = """ term all-src-addr { comment:: "All IP address families. v4/v6" source-address:: TEST_IPS action:: deny } """ EXCLUDE = {'ip6': [nacaddr.IP('::/3'), nacaddr.IP('::/0')]} # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 def IPhelper(addresses): """Helper for string to nacaddr.IP conversion for parametized tests.""" normalized = [] if not addresses: # if empty list of addresses. return addresses else: for addr in addresses: normalized.append(nacaddr.IP(addr)) return normalized class NftablesTest(parameterized.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) self.dummyterm = nftables.Term('', '', '') @parameterized.parameters(('ip protocol tcp', ' ip protocol tcp'), ('', '')) def testAdd(self, statement, expected_output): result = nftables.Add(statement) self.assertEqual(result, expected_output) @parameterized.parameters((2, 'chain acl_name', ' chain acl_name')) def testTabSpacer(self, num_spaces, statement, expected_output): result = nftables.TabSpacer(num_spaces, statement) self.assertEqual(result, expected_output) @parameterized.parameters( ('ip', ['200.1.1.3/32', '9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84'], [ '200.1.1.3/32', '2606:4700:4700::1111' ], ['ip saddr 200.1.1.3/32 ip daddr 200.1.1.3/32']), ('ip', ['200.1.1.3/32', '200.1.1.4/32'], [ '200.1.1.3/32', '200.1.1.4/32' ], [ 'ip saddr { 200.1.1.3/32, 200.1.1.4/32 } ip daddr { 200.1.1.3/32, 200.1.1.4/32 }' ]), ('ip6', ['8.8.8.8', '9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84'], [ '200.1.1.3/32', '2606:4700:4700::1111' ], [ 'ip6 saddr 9782:b30a:e5c6:1aa4:29ff:e57c:44a0:1b84/128 ip6 daddr 2606:4700:4700::1111/128' ]), ('ip6', ['2606:4700:4700::1111', '2606:4700:4700::1112'], [ '2606:4700:4700::1111', '2606:4700:4700::1112' ], [ 'ip6 saddr { 2606:4700:4700::1111/128, 2606:4700:4700::1112/128 } ip6 daddr { 2606:4700:4700::1111/128, 2606:4700:4700::1112/128 }' ]), ) def test_AddrStatement(self, af, src_addr, dst_addr, expected): # Necessary object format. src_obj = IPhelper(src_addr) dst_obj = IPhelper(dst_addr) result = self.dummyterm._AddrStatement(af, src_obj, dst_obj) self.assertEqual(result, expected) @parameterized.parameters( (['nd-router-advert', 'nd-neighbor-solicit', 'nd-neighbor-advert' ], '{ nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert }'), (['200.1.1.3/32'], '200.1.1.3/32'), (['1.1.1.1', '8.8.8.8'], '{ 1.1.1.1, 8.8.8.8 }'), (['tcp', 'udp', 'icmp'], '{ tcp, udp, icmp }'), (['80', '443'], '{ 80, 443 }'), ('53', '53'), ) def testCreateAnonymousSet(self, input_data, expected): result = self.dummyterm.CreateAnonymousSet(input_data) self.assertEqual(result, expected) @parameterized.parameters( ('',['ip6 saddr 2606:4700:4700::1111/128 ip6 daddr { 2001:4860:4860::8844/128, 2001:4860:4860::8888/128 }'], ['tcp sport 80 tcp dport 80'],'ct state { ESTABLISHED, RELATED } log prefix "combo_cnt_log_established" counter', 'accept', '', ['ip6 saddr 2606:4700:4700::1111/128 ip6 daddr { 2001:4860:4860::8844/128, 2001:4860:4860::8888/128 } tcp sport 80 tcp dport 80 ct state { ESTABLISHED, RELATED } log prefix "combo_cnt_log_established" counter accept' ]), ('',['ip daddr 8.8.8.8/32'], ['tcp sport 53 tcp dport 53'],'ct state new','accept', 'comment "this is a term with a comment"', ['ip daddr 8.8.8.8/32 tcp sport 53 tcp dport 53 ct state new accept comment "this is a term with a comment"']) ) def testGroupExpressions(self, int_str, address_expr, porst_proto_expr, opt, verdict, comment, expected_output): result = self.dummyterm.GroupExpressions(int_str, address_expr, porst_proto_expr, opt, verdict, comment) self.assertEqual(result, expected_output) def testBadInterfaceTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1 + BAD_INTERFACE_TERM, self.naming) with self.assertRaises(nftables.TermError): nftables.Nftables.__init__( nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO) def testDuplicateTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_TERM_1, self.naming) with self.assertRaises(nftables.TermError): nftables.Nftables.__init__( nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO) @parameterized.parameters(([(80, 80)], '80'), ([(1024, 65535)], '1024-65535'), ([], '')) def testGroup(self, data, expected_output): """Test _Group function we use in Ports.""" result = self.dummyterm._Group(data) self.assertEqual(result, expected_output) @parameterized.parameters( ('ip', ['tcp'], [], [], [], ['ip protocol tcp']), ('ip', ['tcp'], [(3198, 3199)], [ (80, 80), (443, 443) ], [], ['tcp sport 3198-3199 tcp dport { 80, 443 }']), ('ip', ['tcp, udp'], [], [], [], ['ip protocol tcp, udp']), ('ip6', ['tcp'], [], [], [], ['meta l4proto tcp']), ('ip6', ['tcp'], [(3198, 3199)], [ (80, 80), (443, 443) ], [], ['tcp sport 3198-3199 tcp dport { 80, 443 }']), ('ip6', ['tcp', 'udp'], [], [], [], ['meta l4proto { tcp, udp }']), ) def testPortsAndProtocols(self, af, proto, src_p, dst_p, icmp_type, expected): result = self.dummyterm.PortsAndProtocols(af, proto, src_p, dst_p, icmp_type) self.assertEqual(result, expected) @parameterized.parameters( 'chain_name input 0 inet extraneous_target_option', 'ip6 OUTPUT 300 400' # pylint: disable=implicit-str-concat 'mixed input', 'ip forwarding', 'ip7 0 spaghetti', 'ip6 prerouting', 'chain_name', '', ) def testBadHeader(self, case): logging.info('Testing bad header case %s.', case) header = HEADER_TEMPLATE % case pol = policy.ParsePolicy(header + GOOD_TERM_1, self.naming) with self.assertRaises(nftables.HeaderError): nftables.Nftables.__init__( nftables.Nftables.__new__(nftables.Nftables), pol, EXP_INFO) @parameterized.parameters((HEADER_NOVERBOSE, False), (HEADER_COMMENT, True)) def testVerboseHeader(self, header_to_use, expected_output): pol = policy.ParsePolicy(header_to_use + GOOD_TERM_1, self.naming) data = nftables.Nftables(pol, EXP_INFO) for (_, _, _, _, _, _, verbose, _) in data.nftables_policies: result = verbose self.assertEqual(result, expected_output) def testGoodHeader(self): nftables.Nftables( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) nft = str( nftables.Nftables( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_HEADER_2 + IPV6_SRCIP, self.naming), EXP_INFO)) self.assertIn('type filter hook input', nft) def testStatefulFirewall(self): nftables.Nftables( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) nft = str( nftables.Nftables( policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_HEADER_2 + IPV6_SRCIP, self.naming), EXP_INFO)) self.assertIn('ct state established,related accept', nft) def testICMPv6type(self): nftables.Nftables( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) nft = str( nftables.Nftables( policy.ParsePolicy( GOOD_HEADER_1 + ICMPV6_MULTI_TERM, self.naming), EXP_INFO)) self.assertIn('icmpv6 type { nd-router-solicit, nd-router-advert, nd-neighbor-advert, nd-neighbor-solicit } accept', nft) def testOverridePolicyHeader(self): expected_output = 'accept' pol = policy.ParsePolicy(HEAD_OVERRIDE_DEFAULT_ACTION + GOOD_TERM_1, self.naming) data = nftables.Nftables(pol, EXP_INFO) for (_, _, _, _, _, default_policy, _, _) in data.nftables_policies: result = default_policy self.assertEqual(result, expected_output) @parameterized.parameters((['127.0.0.1', '8.8.8.8'], { 'ip': ['127.0.0.1/32', '8.8.8.8/32'] }), (['0.0.0.0/8', '2001:db8::/32'], { 'ip': ['0.0.0.0/8'], 'ip6': ['2001:db8::/32'] })) def testAddressClassifier(self, addr_to_classify, expected_output): result = nftables.Term._AddressClassifier(self, IPhelper(addr_to_classify)) self.assertEqual(result, expected_output) @parameterized.parameters( ('ip6', ['multicast-listener-query'], ['mld-listener-query']), ('ip6', ['echo-request', 'multicast-listener-query' ], ['echo-request', 'mld-listener-query']), ('ip6', [ 'router-solicit', 'multicast-listener-done', 'router-advertisement' ], ['nd-router-solicit', 'mld-listener-done', 'nd-router-advert']), ('ip4', ['echo-request', 'echo-reply'], ['echo-request', 'echo-reply']), ) def testMapICMPtypes(self, af, icmp_types, expected_output): result = self.dummyterm.MapICMPtypes(af, icmp_types) self.assertEqual(result, expected_output) @parameterized.parameters( ({ 'name': 'tcp_established', 'option': ['tcp-established', 'established'], 'icmp_type': None, 'counter': None, 'logging': [], 'protocol': ['tcp', 'icmp'], 'action': ['deny'], }, ''), ({ 'name': 'icmpv6_noconttrack', 'option': [], 'icmp_type': ['router-solicit'], 'counter': None, 'logging': [], 'protocol': ['icmpv6'], 'action': ['accept'], }, ''), ({ 'name': 'dont_render_tcp_established', 'option': ['tcp-established', 'established'], 'icmp_type': None, 'counter': None, 'logging': [], 'protocol': ['icmp'], 'action': ['accept'], }, 'ct state new'), ({ 'name': 'blank_option_donothing', 'option': [], 'icmp_type': None, 'counter': None, 'logging': [], 'protocol': ['icmp'], 'action': ['accept'], }, 'ct state new'), ({ 'name': 'syslog', 'option': [], 'icmp_type': None, 'counter': None, 'logging': ['syslog'], 'protocol': ['tcp'], 'action': ['accept'], }, 'ct state new log prefix "syslog"'), ({ 'name': 'logging_disabled', 'option': [], 'icmp_type': None, 'counter': None, 'logging': ['disable'], 'protocol': ['tcp'], 'action': ['accept'], }, 'ct state new'), ({ 'name': 'combo_logging_tcp_established', 'option': ['tcp-established'], 'icmp_type': None, 'counter': None, 'logging': ['true'], 'protocol': ['tcp'], 'action': ['accept'], }, 'ct state new log prefix "combo_logging_tcp_established"'), ({ 'name': 'combo_cnt_log_established', 'option': ['tcp-established'], 'icmp_type': None, 'counter': 'whatever-name-you-want', 'logging': ['true'], 'protocol': ['tcp'], 'action': ['deny'], }, 'log prefix "combo_cnt_log_established" counter'), ) def testOptionsHandler(self, term_dict, expected_output): term = DictObj(term_dict) result = self.dummyterm._OptionsHandler(term) self.assertEqual(result, expected_output) def testBuildTokens(self): self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = nftables.Nftables( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) @parameterized.parameters( (ESTABLISHED_OPTION_TERM,'WARNING: Term established-term is a established term and will not be rendered.'), (TCP_ESTABLISHED_OPTION_TERM, 'WARNING: Term tcp-established-term is a tcp-established term and will not be rendered.') ) def testSkippedTerm(self, termdata, messagetxt): with self.assertLogs() as ctx: # run a policy object expected to be skipped and logged. nft = nftables.Nftables( policy.ParsePolicy(GOOD_HEADER_1 + termdata, self.naming), EXP_INFO) # self.assertEqual(len(ctx.records), 2) record = ctx.records[1] self.assertEqual(record.message, messagetxt) @parameterized.parameters( (HEADER_MIXED_AF + ICMPV6_TERM, 'ip protocol icmp'), (HEADER_IPV4_AF + ICMPV6_TERM, 'meta l4proto icmpv6'), (HEADER_IPV6_AF + ICMP_TERM, 'ip protocol icmp'), ) def testRulesetGeneratorICMPmismatch(self, pol_data, doesnotcontain): # This test ensures that ICMPv6 only term isn't rendered in a mixed header. nftables.Nftables( policy.ParsePolicy(pol_data, self.naming), EXP_INFO) nft = str( nftables.Nftables( policy.ParsePolicy(pol_data, self.naming), EXP_INFO)) self.assertNotIn(doesnotcontain, nft) def testRulesetGeneratorUniqueChain(self): # This test is intended to verify that on mixed address family rulesets # no duplicate instance of a simple deny is rendered within a mixed chain. expected_term_rule = 'drop comment "Dual-stack IPv4/v6 deny all"' count = 0 nftables.Nftables( policy.ParsePolicy(HEADER_MIXED_AF + DENY_TERM, self.naming), EXP_INFO) nft = str( nftables.Nftables( policy.ParsePolicy( HEADER_MIXED_AF + DENY_TERM, self.naming), EXP_INFO)) matching_lines = re.findall(expected_term_rule, nft) for match in matching_lines: count += 1 self.assertEqual(count, 1) @parameterized.parameters( (GOOD_HEADER_1 + GOOD_TERM_2, 'inet6'), (GOOD_HEADER_1 + ICMPV6_TERM, 'inet6'), (GOOD_HEADER_1 + COMMENT_TERM, 'mixed'), (GOOD_HEADER_2 + GOOD_TERM_2, 'mixed'), (GOOD_HEADER_3 + GOOD_TERM_2, 'inet'), (GOOD_HEADER_3 + ICMP_TERM, 'inet'), ) def testRulesetGeneratorAF(self, policy_data: str, expected_inet: str): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.return_value = ['22'] nft = nftables.Nftables( policy.ParsePolicy(policy_data, self.naming), EXP_INFO) for header, terms in nft.policy.filters: filter_options = header.FilterOptions('nftables') nf_af, nf_hook, _, _, verbose = nft._ProcessHeader(filter_options) for term in terms: term_object = nftables.Term(term, nf_af, nf_hook, verbose) # Checks for address family consistency within terms ruleset_list = term_object.RulesetGenerator(term) self.assertNotEmpty(ruleset_list) for ruleset in ruleset_list: if expected_inet == 'inet': self.assertNotIn(str(TEST_IPV6_ONLY), ruleset) elif expected_inet == 'inet6': self.assertNotIn(str(TEST_IPV4_ONLY), ruleset) for rule in ruleset.split('\n'): if rule.startswith('ip '): self.assertNotIn('meta l4proto', rule) self.assertNotIn('icmpv6', rule) if rule.startswith('ip6 '): self.assertNotIn('ip protocol', rule) self.assertNotIn('icmp', rule) @parameterized.parameters( (GOOD_HEADER_1 + SOURCE_INTERFACE_TERM, TEST_IPS, ' iifname eth123 meta l4proto'), (GOOD_HEADER_1 + DESTINATION_INTERFACE_TERM, TEST_IPS, ' oifname eth123 meta l4proto'), (GOOD_HEADER_1 + LOGGING_TERM, TEST_IPS, 'log prefix "log-packets"'), (GOOD_HEADER_1 + COUNTER_TERM, TEST_IPS, 'counter'), (GOOD_HEADER_1 + COUNT_AND_LOG_TERM, TEST_IPS, 'log prefix "count-and-log-packets" counter'), (HEADER_MIXED_AF + IPV6_ONLY_TERM, TEST_IPS, 'ip6 daddr 2001:4860:8000::5/128 ct state new accept'), (HEADER_MIXED_AF + ALL_SRCIP, TEST_IPS, 'ip saddr 10.2.3.4/32 drop comment "All IP address families. v4/v6"'), ) def testRulesetGenerator(self, policy_data: str, IPs, contains: str): self.naming.GetNetAddr.return_value = IPs nft = str( nftables.Nftables( policy.ParsePolicy(policy_data, self.naming), EXP_INFO)) self.assertIn(contains, nft) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/nsxv_functtest.py000066400000000000000000000075541437377527500205420ustar00rootroot00000000000000# Copyright 2015 The Capirca Project Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Functional test class for nsxv.py.""" import copy import optparse from absl.testing import absltest from xml.etree import ElementTree as ET from capirca.lib import naming from capirca.lib import nsxv from capirca.lib import policy from capirca.tests.lib import nsxv_mocktest class NsxvFunctionalTest(absltest.TestCase): """Functional testing for NSXV.""" def setUp(self): """Call before every test case.""" super().setUp() parser = optparse.OptionParser() parser.add_option( '-d', '--def', dest='definitions', help='definitions directory', default='../def') (FLAGS, args) = _parser.parse_args() self.defs = naming.Naming(FLAGS.definitions) def tearDown(self): super().tearDown() pass def runTest(self): pass def test_nsxv_policy(self): pol = policy.ParsePolicy(nsxv_mocktest.POLICY, self.defs) exp_info = 2 nsx = copy.deepcopy(pol) fw = nsxv.Nsxv(nsx, exp_info) output = str(fw) # parse the xml root = ET.fromstring(output) # check section name section_name = {'id': '1007', 'name': 'POLICY_NAME'} self.assertEqual(root.attrib, section_name) # check name and action self.assertEqual(root.find('./rule/name').text, 'reject-imap-requests') self.assertEqual(root.find('./rule/action').text, 'reject') # check IPV4 destination exp_destaddr = ['200.1.1.4/31'] for destination in root.findall('./rule/destinations/destination'): self.assertEqual((destination.find('type').text), 'Ipv4Address') value = (destination.find('value').text) if value not in exp_destaddr: self.fail('IPv4Address destination not found in test_nsxv_str()') # check protocol protocol = int(root.find('./rule/services/service/protocol').text) self.assertEqual(protocol, 6) # check destination port destination_port = root.find('./rule/services/service/destinationPort').text self.assertEqual(destination_port, '143') def test_nsxv_nosectiondid(self): pol = policy.ParsePolicy(nsxv_mocktest.POLICY_NO_SECTION_ID, self.defs) exp_info = 2 nsx = copy.deepcopy(pol) fw = nsxv.Nsxv(nsx, exp_info) output = str(fw) # parse the xml root = ET.fromstring(output) # check section name section_name = {'name': 'POLICY_NO_SECTION_ID_NAME'} self.assertEqual(root.attrib, section_name) # check name and action self.assertEqual(root.find('./rule/name').text, 'accept-icmp') self.assertEqual(root.find('./rule/action').text, 'allow') # check protocol protocol = int(root.find('./rule/services/service/protocol').text) self.assertEqual(protocol, 1) def test_nsxv_nofiltertype(self): pol = policy.ParsePolicy(nsxv_mocktest.POLICY_NO_FILTERTYPE, self.defs) self.assertRaises(nsxv.UnsupportedNsxvAccessListError, nsxv.Nsxv(pol, 2)) def test_nsxv_incorrectfiltertype(self): pol = policy.ParsePolicy(nsxv_mocktest.POLICY_INCORRECT_FILTERTYPE, self.defs) self.assertRaises(nsxv.UnsupportedNsxvAccessListError, nsxv.Nsxv(pol, 2)) def test_nsxv_optionkywd(self): pol = policy.ParsePolicy(nsxv_mocktest.POLICY_OPTION_KYWD, self.defs) self.assertRaises(nsxv.NsxvAclTermError, str(nsxv.Nsxv(pol, 2))) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/nsxv_mocktest.py000066400000000000000000000060311437377527500203410ustar00rootroot00000000000000# Copyright 2015 The Capirca Project Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Nsxv Mock Test terms for nsxv module.""" INET_TERM = """\ term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } """ INET6_TERM = """\ term test-icmpv6 { protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ INET_FILTER = """\ header { comment:: "Sample inet NSXV filter" target:: nsxv INET_FILTER_NAME inet } term allow-ntp-request { comment::"Allow ntp request" source-address:: NTP_SERVERS source-port:: NTP destination-address:: INTERNAL destination-port:: NTP protocol:: udp action:: accept } """ INET6_FILTER = """\ header { comment:: "Sample inet6 NSXV filter" target:: nsxv INET6_FILTER_NAME inet6 } term test-icmpv6 { #destination-address:: WEB_SERVERS protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ MIXED_FILTER = """\ header { comment:: "Sample mixed NSXV filter" target:: nsxv MIXED_FILTER_NAME mixed 1009 } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } """ POLICY = """\ header { comment:: "Sample NSXV filter" target:: nsxv POLICY_NAME inet 1007 } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp action:: reject-with-tcp-rst } """ POLICY_NO_SECTION_ID = """\ header { comment:: "NSXV filter without section id" target:: nsxv POLICY_NO_SECTION_ID_NAME inet } term accept-icmp { protocol:: icmp action:: accept } """ POLICY_NO_FILTERTYPE = """\ header { comment:: "Sample NSXV filter" target:: nsxv POLICY_NO_FILTERTYPE_NAME } term accept-icmp { protocol:: icmp action:: accept } """ POLICY_INCORRECT_FILTERTYPE = """\ header { comment:: "Sample NSXV filter" target:: nsxv POLICY_INCORRECT_FILTERTYPE_NAME inet1 } term accept-icmp { protocol:: icmp action:: accept } """ POLICY_OPTION_KYWD = """\ header { comment:: "Sample NSXV filter" target:: nsxv POLICY_OPTION_KYWD_NAME inet 1009 } term accept-bgp-replies { comment:: "Allow inbound replies to BGP requests." source-port:: BGP protocol:: tcp option:: tcp-established action:: accept } """ capirca-2.0.9/tests/lib/nsxv_test.py000066400000000000000000000774701437377527500175060ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """UnitTest class for nsxv.py.""" from absl.testing import absltest from unittest import mock from xml.etree import ElementTree as ET from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import nsxv from capirca.lib import policy INET_TERM = """\ term permit-mail-services { destination-address:: MAIL_SERVERS protocol:: tcp destination-port:: MAIL_SERVICES action:: accept } """ INET6_TERM = """\ term test-icmpv6 { protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ INET_FILTER = """\ header { comment:: "Sample inet NSXV filter" target:: nsxv INET_FILTER_NAME inet } term allow-ntp-request { comment::"Allow ntp request" source-address:: NTP_SERVERS source-port:: NTP destination-address:: INTERNAL destination-port:: NTP protocol:: udp action:: accept } """ INET_FILTER_2 = """\ header { comment:: "Sample inet NSXV filter" target:: nsxv INET_FILTER2_NAME inet } term allow-ntp-request { comment::"Allow ntp request" source-address:: NTP_SERVERS source-port:: NTP destination-address:: INTERNAL destination-port:: NTP protocol:: udp policer:: batman action:: accept } """ INET_FILTER_WITH_ESTABLISHED = """\ header { comment:: "Sample inet NSXV filter" target:: nsxv INET_FILTER_WITH_ESTABLISHED_NAME inet } term allow-ntp-request { comment::"Allow ntp request" source-address:: NTP_SERVERS source-port:: NTP destination-address:: INTERNAL destination-port:: NTP protocol:: udp option:: tcp-established policer:: batman action:: accept } """ MIXED_HEADER = """\ header { comment:: "Sample mixed NSXV filter" target:: nsxv MIXED_HEADER_NAME mixed } """ INET_HEADER = """\ header { comment:: "Sample mixed NSXV filter" target:: nsxv INET_HEADER_NAME inet } """ MIXED_FILTER_INET_ONLY = MIXED_HEADER + INET_TERM INET_FILTER_NO_SOURCE = INET_HEADER + INET_TERM INET6_FILTER = """\ header { comment:: "Sample inet6 NSXV filter" target:: nsxv INET6_FILTER_NAME inet6 } term test-icmpv6 { #destination-address:: WEB_SERVERS protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ MIXED_FILTER = """\ header { comment:: "Sample mixed NSXV filter" target:: nsxv MIXED_FILTER_NAME mixed } term accept-to-honestdns { comment:: "Allow name resolution using honestdns." destination-address:: GOOGLE_DNS destination-port:: DNS protocol:: udp action:: accept } """ POLICY = """\ header { comment:: "Sample NSXV filter" target:: nsxv POLICY_NAME inet } term reject-imap-requests { destination-address:: MAIL_SERVERS destination-port:: IMAP protocol:: tcp action:: reject-with-tcp-rst } """ POLICY_WITH_SECURITY_GROUP = """\ header { comment:: "Sample filter with Security Group" target:: nsxv POLICY_WITH_SECURITY_GROUP_NAME inet 1010 securitygroup \ securitygroup-Id } term accept-icmp { protocol:: icmp action:: accept } """ HEADER_WITH_SECTIONID = """\ header { comment:: "Sample NSXV filter1" target:: nsxv HEADER_WITH_SECTIONID_NAME inet 1009 } """ HEADER_WITH_SECURITYGROUP = """\ header { comment:: "Sample NSXV filter2" target:: nsxv HEADER_WITH_SECURITYGROUP_NAME inet6 securitygroup \ securitygroup-Id1 } """ BAD_HEADER = """\ header { comment:: "Sample NSXV filter3" target:: nsxv BAD_HEADER_NAME inet 1011 securitygroup } """ BAD_HEADER_1 = """\ header { comment:: "Sample NSXV filter4" target:: nsxv BAD_HEADER_1_NAME 1012 } """ BAD_HEADER_2 = """\ header { comment:: "Sample NSXV filter5" target:: nsxv BAD_HEADER_2_NAME inet securitygroup } """ BAD_HEADER_3 = """\ header { comment:: "Sample NSXV filter6" target:: nsxv BAD_HEADER_3_NAME } """ BAD_HEADER_4 = """\ header { comment:: "Sample NSXV filter7" target:: nsxv BAD_HEADER_3_NAME inet 1234 securitygroup securitygroup \ securitygroupId1 } """ TERM = """\ term accept-icmp { protocol:: icmp action:: accept } """ MIXED_TO_V4 = """\ term mixed_to_v4 { source-address:: GOOGLE_DNS destination-address:: INTERNAL protocol:: tcp udp action:: accept } """ V4_TO_MIXED = """\ term v4_to_mixed { source-address:: INTERNAL destination-address:: GOOGLE_DNS protocol:: tcp udp action:: accept } """ MIXED_TO_V6 = """\ term mixed_to_v6 { source-address:: GOOGLE_DNS destination-address:: SOME_HOST action:: accept } """ V6_TO_MIXED = """\ term v6_to_mixed { source-address:: SOME_HOST destination-address:: GOOGLE_DNS action:: accept } """ MIXED_TO_MIXED = """\ term mixed_to_mixed { source-address:: GOOGLE_DNS destination-address:: GOOGLE_DNS action:: accept } """ MIXED_TO_ANY = """\ term mixed_to_any { source-address:: GOOGLE_DNS action:: accept } """ ANY_TO_MIXED = """\ term any_to_mixed { destination-address:: GOOGLE_DNS action:: accept } """ V4_TO_V4 = """\ term v4_to_v4 { source-address:: NTP_SERVERS destination-address:: INTERNAL action:: accept } """ V6_TO_V6 = """\ term v6_to_v6 { source-address:: SOME_HOST destination-address:: SOME_HOST action:: accept } """ V4_TO_V6 = """\ term v4_to_v6 { source-address:: INTERNAL destination-address:: SOME_HOST action:: accept } """ V6_TO_V4 = """\ term v6_to_v4 { source-address:: SOME_HOST destination-address:: INTERNAL action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'expiration', 'icmp_type', 'stateless_reply', 'logging', 'name', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'option', 'platform', 'platform_exclude', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 _PLATFORM = 'nsxv' class TermTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testInitForinet(self): """Test for Term._init_.""" inet_term = nsxv.Term(INET_TERM, 'inet') self.assertEqual(inet_term.af, 4) self.assertEqual(inet_term.filter_type, 'inet') def testInitForinet6(self): """Test for Term._init_.""" inet6_term = nsxv.Term(INET6_TERM, 'inet6', None, 6) self.assertEqual(inet6_term.af, 6) self.assertEqual(inet6_term.filter_type, 'inet6') def testServiceToStr(self): """Test for Term._ServiceToStr.""" proto = 6 icmp_types = [] dports = [(1024, 65535)] spots = [(123, 123)] nsxv_term = nsxv.Term(INET_TERM, 'inet') service = nsxv_term._ServiceToString(proto, spots, dports, icmp_types) self.assertEqual(service, '6' '1231024-65535' '') def testStrForinet(self): """Test for Term._str_.""" self.naming.GetNetAddr.side_effect = [ [nacaddr.IP('10.0.0.1'), nacaddr.IP('10.0.0.2')], [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('172.16.0.0/12'), nacaddr.IP('192.168.0.0/16')]] self.naming.GetServiceByProto.return_value = ['123'] pol = policy.ParsePolicy(INET_FILTER, self.naming, False) af = 4 for _, terms in pol.filters: nsxv_term = nsxv.Term(terms[0], af) rule_str = nsxv.Term.__str__(nsxv_term) # parse xml rule and check if the values are correct root = ET.fromstring(rule_str) # check name and action self.assertEqual(root.find('name').text, 'allow-ntp-request') self.assertEqual(root.find('action').text, 'allow') # check source address exp_sourceaddr = ['10.0.0.1', '10.0.0.2'] source_address = root.findall('./sources/source') self.assertNotEqual(len(source_address), 0) for source in source_address: self.assertEqual((source.find('type').text), 'Ipv4Address') value = (source.find('value').text) if value not in exp_sourceaddr: self.fail('IPv4Address source address not found in test_str_forinet()') # check destination address exp_destaddr = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'] destination_address = root.findall('./destinations/destination') self.assertNotEqual(len(destination_address), 0) for destination in destination_address: self.assertEqual((destination.find('type').text), 'Ipv4Address') value = (destination.find('value').text) if value not in exp_destaddr: self.fail('IPv4Address destination not found in test_str_forinet()') # check protocol protocol = int(root.find('./services/service/protocol').text) self.assertEqual(protocol, 17) # check source port source_port = root.find('./services/service/sourcePort').text self.assertEqual(source_port, '123') # check destination port destination_port = root.find('./services/service/destinationPort').text self.assertEqual(destination_port, '123') # check notes notes = root.find('notes').text self.assertEqual(notes, 'Allow ntp request') self.naming.GetNetAddr.assert_has_calls( [mock.call('NTP_SERVERS'), mock.call('INTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('NTP', 'udp')] * 2) def testStrForinet6(self): """Test for Term._str_.""" pol = policy.ParsePolicy(INET6_FILTER, self.naming, False) af = 6 filter_type = 'inet6' for _, terms in pol.filters: nsxv_term = nsxv.Term(terms[0], filter_type, None, af) rule_str = nsxv.Term.__str__(nsxv_term) # parse xml rule and check if the values are correct root = ET.fromstring(rule_str) # check name and action self.assertEqual(root.find('name').text, 'test-icmpv6') self.assertEqual(root.find('action').text, 'allow') # check protocol and sub protocol exp_subprotocol = [128, 129] for service in root.findall('./services/service'): protocol = int(service.find('protocol').text) self.assertEqual(protocol, 58) sub_protocol = int(service.find('subProtocol').text) if sub_protocol not in exp_subprotocol: self.fail('subProtocol not matched in test_str_forinet6()') def testTranslatePolicy(self): """Test for Nsxv.test_TranslatePolicy.""" self.naming.GetNetAddr.side_effect = [ [nacaddr.IP('10.0.0.1'), nacaddr.IP('10.0.0.2')], [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('172.16.0.0/12'), nacaddr.IP('192.168.0.0/16')]] self.naming.GetServiceByProto.return_value = ['123'] pol = policy.ParsePolicy(INET_FILTER, self.naming, False) translate_pol = nsxv.Nsxv(pol, EXP_INFO) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'inet') self.assertEqual(filter_list, ['inet']) self.assertEqual(len(terms), 1) self.naming.GetNetAddr.assert_has_calls( [mock.call('NTP_SERVERS'), mock.call('INTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('NTP', 'udp')] * 2) def testTranslatePolicyMixedFilterInetOnly(self): """Test for Nsxv.test_TranslatePolicy. Testing Mixed filter with inet.""" self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(MIXED_FILTER_INET_ONLY, self.naming, False) translate_pol = nsxv.Nsxv(pol, EXP_INFO) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'mixed') self.assertEqual(filter_list, ['mixed']) self.assertEqual(len(terms), 1) self.assertIn('10.0.0.0/8', str(terms[0])) self.naming.GetNetAddr.assert_has_calls( [mock.call('MAIL_SERVERS')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('MAIL_SERVICES', 'tcp')] * 1) def testTranslatePolicyMixedFilterInet6Only(self): """Test for Nsxv.test_TranslatePolicy. Testing Mixed filter with inet6.""" self.naming.GetNetAddr.return_value = [nacaddr.IP('2001:4860:4860::8844')] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(MIXED_FILTER_INET_ONLY, self.naming, False) translate_pol = nsxv.Nsxv(pol, EXP_INFO) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'mixed') self.assertEqual(filter_list, ['mixed']) self.assertEqual(len(terms), 1) self.assertIn('2001:4860:4860::8844', str(terms[0])) self.naming.GetNetAddr.assert_has_calls( [mock.call('MAIL_SERVERS')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('MAIL_SERVICES', 'tcp')] * 1) def testTranslatePolicyMixedFilterInetMixed(self): """Test for Nsxv.test_TranslatePolicy. Testing Mixed filter with mixed.""" self.naming.GetNetAddr.return_value = [ nacaddr.IP('2001:4860:4860::8844'), nacaddr.IP('10.0.0.0/8') ] self.naming.GetServiceByProto.return_value = ['25'] pol = policy.ParsePolicy(MIXED_FILTER_INET_ONLY, self.naming, False) translate_pol = nsxv.Nsxv(pol, EXP_INFO) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'mixed') self.assertEqual(filter_list, ['mixed']) self.assertEqual(len(terms), 1) self.assertIn('2001:4860:4860::8844', str(terms[0])) self.assertIn('10.0.0.0/8', str(terms[0])) self.naming.GetNetAddr.assert_has_calls( [mock.call('MAIL_SERVERS')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('MAIL_SERVICES', 'tcp')] * 1) def testTranslatePolicyWithEstablished(self): """Test for Nsxv.test_TranslatePolicy.""" self.naming.GetNetAddr.side_effect = [ [nacaddr.IP('10.0.0.1'), nacaddr.IP('10.0.0.2')], [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('172.16.0.0/12'), nacaddr.IP('192.168.0.0/16')]] self.naming.GetServiceByProto.return_value = ['123'] pol = policy.ParsePolicy(INET_FILTER_WITH_ESTABLISHED, self.naming, False) translate_pol = nsxv.Nsxv(pol, EXP_INFO) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'inet') self.assertEqual(filter_list, ['inet']) self.assertEqual(len(terms), 1) self.assertNotIn('123123', str(terms[0])) self.naming.GetNetAddr.assert_has_calls( [mock.call('NTP_SERVERS'), mock.call('INTERNAL')]) self.naming.GetServiceByProto.assert_has_calls( [mock.call('NTP', 'udp')] * 2) def testNsxvStr(self): """Test for Nsxv._str_.""" self.naming.GetNetAddr.side_effect = [ [nacaddr.IP('8.8.4.4'), nacaddr.IP('8.8.8.8'), nacaddr.IP('2001:4860:4860::8844'), nacaddr.IP('2001:4860:4860::8888')]] self.naming.GetServiceByProto.return_value = ['53'] pol = policy.ParsePolicy(MIXED_FILTER, self.naming, False) target = nsxv.Nsxv(pol, EXP_INFO) # parse the output and seperate sections and comment section_tokens = str(target).split('6123' '1024-65535' '') def test_str_forinet(self): """Test for Term._str_.""" pol = policy.ParsePolicy(nsxv_mocktest.INET_FILTER, self.defs, False) af = 4 for _, terms in pol.filters: nsxv_term = nsxv.Term(terms[0], af) rule_str = nsxv.Term.__str__(nsxv_term) # parse xml rule and check if the values are correct root = ET.fromstring(rule_str) # check name and action self.assertEqual(root.find('name').text, 'allow-ntp-request') self.assertEqual(root.find('action').text, 'allow') # check source address exp_sourceaddr = ['10.0.0.1', '10.0.0.2'] for destination in root.findall('./sources/source'): self.assertEqual((destination.find('type').text), 'Ipv4Address') value = (destination.find('value').text) if value not in exp_sourceaddr: self.fail('IPv4Address source address not found in test_str_forinet()') # check destination address exp_destaddr = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'] for destination in root.findall('./destinations/destination'): self.assertEqual((destination.find('type').text), 'Ipv4Address') value = (destination.find('value').text) if value not in exp_destaddr: self.fail('IPv4Address destination not found in test_str_forinet()') # check protocol protocol = int(root.find('./services/service/protocol').text) self.assertEqual(protocol, 17) # check source port source_port = root.find('./services/service/sourcePort').text self.assertEqual(source_port, '123') # check destination port destination_port = root.find('./services/service/destinationPort').text self.assertEqual(destination_port, '123') # check notes notes = root.find('notes').text self.assertEqual(notes, 'Allow ntp request') def test_str_forinet6(self): """Test for Term._str_.""" pol = policy.ParsePolicy(nsxv_mocktest.INET6_FILTER, self.defs, False) af = 6 filter_type = 'inet6' for _, terms in pol.filters: nsxv_term = nsxv.Term(terms[0], filter_type, af) rule_str = nsxv.Term.__str__(nsxv_term) # parse xml rule and check if the values are correct root = ET.fromstring(rule_str) # check name and action self.assertEqual(root.find('name').text, 'test-icmpv6') self.assertEqual(root.find('action').text, 'allow') # check protocol and sub protocol exp_subprotocol = [128, 129] for service in root.findall('./services/service'): protocol = int(service.find('protocol').text) self.assertEqual(protocol, 58) sub_protocol = int(service.find('subProtocol').text) if sub_protocol not in exp_subprotocol: self.fail('subProtocol not matched in test_str_forinet6()') def test_TranslatePolicy(self): """Test for Nsxv.test_TranslatePolicy.""" # exp_info default is 2 exp_info = 2 pol = policy.ParsePolicy(nsxv_mocktest.INET_FILTER, self.defs, False) translate_pol = nsxv.Nsxv(pol, exp_info) nsxv_policies = translate_pol.nsxv_policies for (_, filter_name, filter_list, terms) in nsxv_policies: self.assertEqual(filter_name, 'inet') self.assertEqual(filter_list, ['inet']) self.assertEqual(len(terms), 1) def test_nsxv_str(self): """Test for Nsxv._str_.""" # exp_info default is 2 exp_info = 2 pol = policy.ParsePolicy(nsxv_mocktest.MIXED_FILTER, self.defs, False) target = nsxv.Nsxv(pol, exp_info) # parse the xml and check the values root = ET.fromstring(str(target)) # check section name section_name = {'id': '1009', 'name': 'MIXED_FILTER_NAME'} self.assertEqual(root.attrib, section_name) # check name and action self.assertEqual(root.find('./rule/name').text, 'accept-to-honestdns') self.assertEqual(root.find('./rule/action').text, 'allow') # check IPV4 and IPV6 destinations exp_ipv4dest = ['8.8.4.4', '8.8.8.8'] exp_ipv6dest = ['2001:4860:4860::8844', '2001:4860:4860::8888'] for destination in root.findall('./rule/destinations/destination'): obj_type = destination.find('type').text value = (destination.find('value').text) if 'Ipv4Address' in obj_type: if value not in exp_ipv4dest: self.fail('IPv4Address not found in test_nsxv_str()') else: if value not in exp_ipv6dest: self.fail('IPv6Address not found in test_nsxv_str()') # check protocol protocol = int(root.find('./rule/services/service/protocol').text) self.assertEqual(protocol, 17) # check destination port destination_port = root.find('./rule/services/service/destinationPort').text self.assertEqual(destination_port, '53') # check notes notes = root.find('./rule/notes').text self.assertEqual(notes, 'Allow name resolution using honestdns.') if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/openconfig_test.py000066400000000000000000000215371437377527500206300ustar00rootroot00000000000000# Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for OpenConfig rendering module.""" import json from absl.testing import absltest from unittest import mock from absl.testing import parameterized from capirca.lib import aclgenerator from capirca.lib import openconfig from capirca.lib import gcp from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy GOOD_HEADER = """ header { comment:: "The general policy comment." target:: openconfig inet } """ GOOD_SADDR = """ term good-term-1 { comment:: "Allow source address." source-address:: CORP_EXTERNAL action:: accept } """ GOOD_DADDR = """ term good-term-1 { comment:: "Allow destination address." destination-address:: CORP_EXTERNAL action:: accept } """ GOOD_SPORT = """ term good-term-1 { comment:: "Allow TCP 53 source." source-port:: DNS protocol:: tcp action:: accept } """ GOOD_DPORT = """ term good-term-1 { comment:: "Allow TCP 53 dest." destination-port:: DNS protocol:: tcp action:: accept } """ GOOD_MULTI_PROTO_DPORT = """ term good-term-1 { comment:: "Allow TCP & UDP 53." destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_EVERYTHING = """ term good-term-1 { comment:: "Allow TCP & UDP 53 with saddr/daddr." destination-address:: CORP_EXTERNAL source-address:: CORP_EXTERNAL destination-port:: DNS protocol:: udp tcp action:: accept } """ GOOD_JSON_SADDR = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "source-address": "10.2.3.4/32" } } } ] """ GOOD_JSON_V6_SADDR = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv6": { "config": { "source-address": "2001:4860:8000::5/128" } } } ] """ GOOD_JSON_DADDR = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "destination-address": "10.2.3.4/32" } } } ] """ GOOD_JSON_V6_DADDR = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv6": { "config": { "destination-address": "2001:4860:8000::5/128" } } } ] """ GOOD_JSON_MIXED_DADDR = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "destination-address": "10.2.3.4/32" } } }, { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv6": { "config": { "destination-address": "2001:4860:8000::5/128" } } } ] """ GOOD_JSON_SPORT = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "protocol": 6 }, "transport": { "config": { "source-port": 53} } } } ] """ GOOD_JSON_DPORT = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "protocol": 6 }, "transport": { "config": { "destination-port": 53} } } } ] """ GOOD_JSON_MULTI_PROTO_DPORT = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "protocol": 17 }, "transport": { "config": { "destination-port": 53} } } }, { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "protocol": 6 }, "transport": { "config": { "destination-port": 53} } } } ] """ GOOD_JSON_EVERYTHING = """ [ { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "destination-address": "10.2.3.4/32", "protocol": 17, "source-address": "10.2.3.4/32" }, "transport": { "config": { "destination-port": 53 } } } }, { "actions": { "config": { "forwarding-action": "ACCEPT" } }, "ipv4": { "config": { "destination-address": "10.2.3.4/32", "protocol": 6, "source-address": "10.2.3.4/32" }, "transport": { "config": { "destination-port": 53 } } } } ] """ GOOD_HEADER_INET6 = """ header { comment:: "The general policy comment." target:: openconfig inet6 } """ GOOD_HEADER_MIXED = """ header { comment:: "The general policy comment." target:: openconfig mixed } """ # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 TEST_IPS = [nacaddr.IP('10.2.3.4/32'), nacaddr.IP('2001:4860:8000::5/128')] _TERM_SOURCE_TAGS_LIMIT = 30 _TERM_TARGET_TAGS_LIMIT = 70 _TERM_PORTS_LIMIT = 256 class OpenConfigTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def _StripAclHeaders(self, acl): return '\n'.join([line for line in str(acl).split('\n') if not line.lstrip().startswith('#')]) def testSaddr(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER + GOOD_SADDR, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_SADDR) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') def testDaddr(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER + GOOD_DADDR, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_DADDR) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') def testSport(self): self.naming.GetNetAddr.return_value = TEST_IPS self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER + GOOD_SPORT, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_SPORT) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp')]) def testDport(self): self.naming.GetServiceByProto.side_effect = [['53'], ['53']] acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER + GOOD_DPORT, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_DPORT) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp')]) def testEverything(self): self.naming.GetServiceByProto.side_effect = [['53'], ['53']] self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER + GOOD_EVERYTHING, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_EVERYTHING) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'udp'), mock.call('DNS', 'tcp')]) def testV6Saddr(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_SADDR, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_V6_SADDR) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') def testV6Daddr(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_DADDR, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_V6_DADDR) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') def testMixedDaddr(self): self.naming.GetNetAddr.return_value = TEST_IPS acl = openconfig.OpenConfig(policy.ParsePolicy( GOOD_HEADER_MIXED + GOOD_DADDR, self.naming), EXP_INFO) expected = json.loads(GOOD_JSON_MIXED_DADDR) self.assertEqual(expected, json.loads(str(acl))) self.naming.GetNetAddr.assert_called_once_with('CORP_EXTERNAL') if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/packetfilter_test.py000066400000000000000000001014161437377527500211510ustar00rootroot00000000000000# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for packetfilter rendering module.""" import datetime from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import packetfilter from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: packetfilter test-filter mixed } """ GOOD_HEADER_STATELESS = """ header { comment:: "this is a stateless test acl" target:: packetfilter test-filter mixed nostate } """ GOOD_HEADER_INET4 = """ header { comment:: "this is a test acl" target:: packetfilter test-filter } """ GOOD_HEADER_INET6 = """ header { comment:: "this is a test acl" target:: packetfilter test-filter inet6 } """ GOOD_HEADER_DIRECTIONAL = """ header { comment:: "this is a test acl" target:: packetfilter test-filter out mixed } """ GOOD_HEADER_DIRECTIONAL_STATELESS = """ header { comment:: "this is a test acl" target:: packetfilter test-filter out mixed nostate } """ GOOD_TERM_ICMP = """ term good-term-icmp { protocol:: icmp action:: accept } """ GOOD_TERM_ICMP_TYPES = """ term good-term-icmp-types { protocol:: icmp icmp-type:: echo-reply unreachable time-exceeded action:: deny } """ GOOD_TERM_ICMPV6 = """ term good-term-icmpv6 { protocol:: icmpv6 action:: accept } """ GOOD_TERM_ICMPV6_TYPES = """ term good-term-icmpv6-types { protocol:: icmpv6 icmp-type:: echo-reply action:: deny } """ BAD_TERM_ICMP = """ term test-icmp { icmp-type:: echo-request echo-reply action:: accept } """ BAD_TERM_ACTION = """ term bad-term-action { protocol:: icmp action:: reject-with-tcp-rst } """ GOOD_TERM_TCP = """ term good-term-tcp { comment:: "Test term 1" destination-address:: PROD_NETWORK destination-port:: SMTP protocol:: tcp action:: accept } """ DENY_TERM_TCP = """ term deny-term-tcp { protocol:: tcp action:: deny } """ GOOD_TERM_LOG = """ term good-term-log { protocol:: tcp logging:: true action:: accept } """ EXPIRED_TERM = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRED_TERM2 = """ term expired_test2 { expiration:: 2015-01-01 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ NEXT_TERM = """ term next { action:: next } """ NEXT_LOG_TERM = """ term next-log { logging:: true action:: next } """ PORTRANGE_TERM = """ term portrange { protocol:: tcp action:: accept destination-port:: HIGH_PORTS } """ FLAGS_TERM = """ term flags { protocol:: tcp action:: accept option:: syn fin } """ INVALID_FLAGS_TERM = """ term invalid-flags { protocol:: udp action:: accept option:: syn fin } """ MULTILINE_COMMENT = """ term multiline-comment { comment:: "This is a multiline comment" protocol:: tcp action:: accept } """ TCP_STATE_TERM = """ term tcp-established-only { protocol:: tcp option:: established action:: accept } """ TCP_GOOD_ESTABLISHED_TERM = """ term tcp-established-good { protocol:: tcp option:: established action:: accept } """ TCP_BAD_ESTABLISHED_TERM = """ term tcp-established-bad { protocol:: tcp option:: established syn action:: accept } """ UDP_ESTABLISHED_TERM = """ term udp-established { protocol:: udp option:: established action:: accept } """ MULTIPLE_NAME_TERM = """ term multiple-name { protocol:: tcp destination-address:: PROD_NETWORK destination-port:: SMTP source-address:: CORP_INTERNAL action:: accept } """ LONG_NAME_TERM_DNS_TCP = """ term multiple-name-dns-tcp { protocol:: tcp destination-address:: PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME destination-port:: DNS action:: accept } """ LONG_NAME_TERM_DNS_UDP = """ term multiple-name-dns-udp { protocol:: udp destination-address:: PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME destination-port:: DNS action:: accept } """ NON_SHORTENED_LONG_NAME_TERM_DNS_UDP = """ term multiple-name-dns-udp { protocol:: udp destination-address:: PROD_NETWORK_EXTREAMLY_LONG_VER destination-port:: DNS action:: accept } """ DUPLICATE_DIFFERENT_LONG_NAME_TERM = """ term multiple-name { protocol:: tcp destination-address:: PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME destination-port:: SMTP source-address:: PROD_NETWORK_EXTREAMLY_LONG_VERY_GOOD_NAME action:: accept } """ BAD_PROTO_TERM = """ term bad-proto { protocol:: hopopt action:: accept } """ GOOD_WARNING_TERM = """ term good-warning { protocol:: tcp policer:: batman action:: accept } """ SOURCE_INTERFACE_TERM = """ term src-intf { source-interface:: lo0 action:: accept } """ DESTINATION_INTERFACE_TERM = """ term dest-intf { destination-interface:: lo0 action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_interface', 'expiration', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'source_interface', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'established', 'tcp-established'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class PacketFilterTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testTcp(self): ip = nacaddr.IP('10.0.0.0/8') ip.parent_token = 'PROD_NETWORK' self.naming.GetNetAddr.return_value = [ip] self.naming.GetServiceByProto.return_value = ['25'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-tcp', result, 'did not find comment for good-term-tcp') self.assertIn( 'pass quick proto { tcp } from { any } to { } port ' '{ 25 }', result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWORK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testLog(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_LOG, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-log', result, 'did not find comment for good-term-log') self.assertIn( 'pass quick log proto { tcp } from { any } to { any } flags S/SA ' 'keep state\n', result, 'did not find actual term for good-term-log') def testIcmp(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-icmp', result, 'did not find comment for good-term-icmp') self.assertIn( 'pass quick proto { icmp } from { any } to { any } keep state\n', result, 'did not find actual term for good-term-icmp') def testIcmpTypes(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMP_TYPES, self.naming), EXP_INFO) acl_v6_header = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_ICMP_TYPES, self.naming), EXP_INFO) result = str(acl) result_v6_header = str(acl_v6_header) self.assertIn('# term good-term-icmp-types', result, 'did not find comment for good-term-icmp-types') self.assertIn('# term good-term-icmp-types', result_v6_header, 'did not find comment for good-term-icmp-types' 'in V6 header') self.assertIn( 'block drop quick proto { icmp } from { any } to { any } ' 'icmp-type { 0, 3, 11 }', result, 'did not find actual term for good-term-icmp-types') self.assertIn( 'block drop quick proto { icmp } from { any } to { any } ' 'icmp-type { 0, 3, 11 }', result, 'did not find actual term for good-term-icmp-types' 'in the acl with V6 header') def testIcmpv6(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-icmpv6', result, 'did not find comment for good-term-icmpv6') self.assertIn( 'pass quick proto { ipv6-icmp } from { any } to { any } keep state\n', result, 'did not find actual term for good-term-icmpv6') def testIcmpv6Types(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMPV6_TYPES, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-icmpv6-types', result, 'did not find comment for good-term-icmpv6-types') self.assertIn( 'block drop quick proto { ipv6-icmp } from { any } to { any } ' 'icmp6-type { 129 }', result, 'did not find actual term for good-term-icmpv6-types') def testBadIcmp(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + BAD_TERM_ICMP, self.naming), EXP_INFO) self.assertRaises(aclgenerator.UnsupportedFilterError, str, acl) @mock.patch.object(packetfilter.logging, 'warning') def testExpiredTerm(self, mock_warn): packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', 'expired_test', 'test-filter') @mock.patch.object(packetfilter.logging, 'warning') def testExpiredTerm2(self, mock_warn): packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + EXPIRED_TERM2, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', 'expired_test2', 'test-filter') @mock.patch.object(packetfilter.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testMultiprotocol(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term multi-proto', result, 'did not find comment for multi-proto') self.assertIn( 'pass quick proto { tcp udp icmp } from { any } ' 'to { any } keep state\n', result, 'did not find actual term for multi-proto') def testNextTerm(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + NEXT_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term next', result, 'did not find comment for next') self.assertIn( 'pass from { any } to { any } flags S/SA keep state\n', result, 'did not find actual term for next-term') def testNextLogTerm(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + NEXT_LOG_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term next-log', result, 'did not find comment for next-log') self.assertIn( 'pass log from { any } to { any } flags S/SA keep state\n', result, 'did not find actual term for next-log-term') def testPortRange(self): self.naming.GetServiceByProto.return_value = ['12345-12354'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + PORTRANGE_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term portrange', result, 'did not find comment for portrange') self.assertIn( 'pass quick proto { tcp } from { any } to { any } ' 'port { 12345:12354 }', result, 'did not find actual term for portrange') self.naming.GetServiceByProto.assert_called_once_with( 'HIGH_PORTS', 'tcp') def testFlags(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + FLAGS_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term flags', result, 'did not find comment for flags') self.assertIn( 'pass quick proto { tcp } from { any } to { any } ' 'flags SF/SF', result, 'did not find actual term for flags') def testInvalidFlags(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + INVALID_FLAGS_TERM, self.naming), EXP_INFO) self.assertRaises(aclgenerator.UnsupportedFilterError, str, acl) def testMultilineComment(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + MULTILINE_COMMENT, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term multiline-comment', result, 'did not find comment for multiline-comment') self.assertIn('# This is a\n# multiline comment', result, 'did not find multiline comment for multiline-comment') def testStateless(self): ip = nacaddr.IP('10.0.0.0/8') ip.parent_token = 'PROD_NETWORK' self.naming.GetNetAddr.return_value = [ip] self.naming.GetServiceByProto.return_value = ['25'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_STATELESS + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-tcp', result, 'did not find comment for good-term-tcp') self.assertIn( 'pass quick proto { tcp } from { any } to { } port ' '{ 25 } no state', result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWORK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testInet4(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_INET4 + GOOD_TERM_LOG, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-log', result, 'did not find comment for good-term-log') self.assertIn( 'pass quick log inet proto { tcp } from { any } to { any } flags S/SA ' 'keep state\n', result, 'did not find actual term for good-term-log') def testInet6(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_INET6 + GOOD_TERM_LOG, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-log', result, 'did not find comment for good-term-log') self.assertIn( 'pass quick log inet6 proto { tcp } from { any } to { any } flags S/SA ' 'keep state\n', result, 'did not find actual term for good-term-log') def testDirectional(self): ip = nacaddr.IP('10.0.0.0/8') ip.parent_token = 'PROD_NETWORK' self.naming.GetNetAddr.return_value = [ip] self.naming.GetServiceByProto.return_value = ['25'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-tcp', result, 'did not find comment for good-term-tcp') self.assertIn( 'pass out quick proto { tcp } from { any } to { } port ' '{ 25 }', result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWORK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testDirectionalSourceInterface(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + SOURCE_INTERFACE_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term src-intf', result, 'did not find comment for src-intf') self.assertIn( 'pass in quick on lo0 from { any } to { any } flags S/SA keep state', result, 'did not find actual term for src-intf') def testDirectionalDestinationInterface(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + DESTINATION_INTERFACE_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term dest-intf', result, 'did not find comment for dest-intf') self.assertIn( 'pass out quick on lo0 from { any } to { any } flags S/SA keep state', result, 'did not find actual term for dest-intf') def testMultipleHeader(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_STATELESS + GOOD_TERM_LOG + GOOD_HEADER_INET6 + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'pass quick log proto { tcp } from { any } to { any } no state', result, 'did not find actual term for good-term-log') self.assertIn( 'pass quick inet6 proto { icmp } from { any } to { any } no state', result, 'did not find actual term for good-term-icmp') def testDirectionalStateless(self): ip = nacaddr.IP('10.0.0.0/8') ip.parent_token = 'PROD_NETWORK' self.naming.GetNetAddr.return_value = [ip] self.naming.GetServiceByProto.return_value = ['25'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL_STATELESS + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term good-term-tcp', result, 'did not find comment for good-term-tcp') self.assertIn( 'pass out quick proto { tcp } from { any } to { } port ' '{ 25 } no state', result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWORK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testStatelessEstablished(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_STATELESS + TCP_STATE_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term tcp-established-only', result, 'did not find comment for tcp-established-only') self.assertIn( 'pass quick proto { tcp } from { any } to { any } flags A/A no state', result, 'did not find actual term for tcp-established-only') def testBadFlags(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + TCP_BAD_ESTABLISHED_TERM, self.naming), EXP_INFO) self.assertRaises(aclgenerator.UnsupportedFilterError, str, acl) # While "UDP stateless established" seems to be a strange combination it # actually makes sense: e.g., the state or nostate header is a global # header directive and indicates whether we do matching on established by # flags or proper connection tracking, and pf's permissiveness allows things # like: # proto { udp, tcp } flags A/A no state' # whereby the flags only apply to TCP protocol matches. However, the # following is invalid: # proto { udp } flags A/A no state' # check to make sure we don't output the latter for things like: # target:: packetfilter nostate # term foo { protocol:: udp option:: established } def testUdpStatelessEstablished(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_STATELESS + UDP_ESTABLISHED_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term udp-established', result, 'did not find comment for udp-established') self.assertIn( 'pass quick proto { udp } from { any } to { any } no state', result, 'did not find actual term for udp-established') def testStatefulBlock(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + DENY_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term deny-term-tcp', result, 'did not find comment for udp-established') self.assertIn( 'block drop quick proto { tcp } from { any } to { any } flags S/SA', result, 'did not find actual term for deny-term-tcp') def testTcpEstablished(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + TCP_GOOD_ESTABLISHED_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('# term tcp-established-good', result, 'did not find comment for tcp-established-good') self.assertIn( 'pass quick proto { tcp } from { any } to { any } flags A/A keep state', result, 'did not find actual term for udp-established') def testTableCreation(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK' corp_internal_one = nacaddr.IP('100.96.0.1/11', strict=False) corp_internal_one.parent_token = 'CORP_INTERNAL' corp_internal_two = nacaddr.IP('172.16.0.0/16') corp_internal_two.parent_token = 'CORP_INTERNAL' self.naming.GetNetAddr.side_effect = [ [prod_network], [corp_internal_one, corp_internal_two]] self.naming.GetServiceByProto.return_value = ['25'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + MULTIPLE_NAME_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'table {10.0.0.0/8}', result, 'did not find PROD_NETWORKtable in header') self.assertIn( 'table {100.96.0.0/11,\\\n' '172.16.0.0/16}', result, 'did not find CORP_INTERNAL table in header') self.assertIn( 'pass quick proto { tcp } from { } to ' '{ } port { 25 } flags S/SA keep state', result, 'did not find actual term for multiple-name') self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK'), mock.call('CORP_INTERNAL')]) self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testTableNameShortened(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' self.naming.GetNetAddr.return_value = [prod_network] self.naming.GetServiceByProto.return_value = ['53'] acl = packetfilter.PacketFilter( policy.ParsePolicy(GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'table {10.0.0.0/8}', result, 'did not find shortened name in header.') self.assertIn( 'pass out quick proto { tcp } from { any } to ' '{ } ' 'port { 53 } flags S/SA keep state', result, 'did not find actual term for multiple-name') self.naming.GetNetAddr.assert_called_once_with( 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME') self.naming.GetServiceByProto.assert_called_once_with('DNS', 'tcp') def testTableDuplicateShortNameError(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' prod_network_two = nacaddr.IP('172.0.0.0/8') prod_network_two.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_GOOD_NAME' self.naming.GetNetAddr.side_effect = [ [prod_network], [prod_network_two]] self.naming.GetServiceByProto.return_value = ['25'] self.assertRaises( packetfilter.DuplicateShortenedTableNameError, packetfilter.PacketFilter.__init__, packetfilter.PacketFilter.__new__(packetfilter.PacketFilter), policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + DUPLICATE_DIFFERENT_LONG_NAME_TERM, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME'), mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_GOOD_NAME')]) self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testTableSameLongNameSameFilter(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' self.naming.GetNetAddr.return_value = [prod_network] self.naming.GetServiceByProto.return_value = ['53'] acl = packetfilter.PacketFilter( policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_TCP + LONG_NAME_TERM_DNS_UDP, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'table {10.0.0.0/8}', result, 'did not find shortened name in header.') self.assertIn( 'pass out quick proto { tcp } from { any } to ' '{ } ' 'port { 53 } flags S/SA keep state', result, 'did not find actual TCP term for multiple-name') self.assertIn( 'pass out quick proto { udp } from { any } to ' '{ } ' 'port { 53 } keep state', result, 'did not find actual UDP for multiple-name') self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME'), mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME')]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')]) def testTableSameLongNameDiffFilter(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' self.naming.GetNetAddr.return_value = [prod_network] self.naming.GetServiceByProto.return_value = ['53'] acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_TCP + GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_UDP, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'table {10.0.0.0/8}', result, 'did not find shortened name in header.') self.assertIn( 'pass out quick proto { tcp } from { any } to ' '{ } ' 'port { 53 } flags S/SA keep state', result, 'did not find actual TCP term for multiple-name') self.assertIn( 'pass out quick proto { udp } from { any } to ' '{ } ' 'port { 53 } keep state', result, 'did not find actual UDP for multiple-name') self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME'), mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME')]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')]) def testTableDiffObjectsShortenedAndNonShortened(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' prod_network_two = nacaddr.IP('172.0.0.0/8') prod_network_two.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VER' self.naming.GetNetAddr.side_effect = [ [prod_network], [prod_network_two]] self.naming.GetServiceByProto.return_value = ['53'] self.assertRaises( packetfilter.DuplicateShortenedTableNameError, packetfilter.PacketFilter.__init__, packetfilter.PacketFilter.__new__(packetfilter.PacketFilter), policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_TCP + NON_SHORTENED_LONG_NAME_TERM_DNS_UDP, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME'), mock.call('PROD_NETWORK_EXTREAMLY_LONG_VER')]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')]) def testTableDuplicateShortNameErrorDiffFilter(self): prod_network = nacaddr.IP('10.0.0.0/8') prod_network.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME' prod_network_two = nacaddr.IP('172.0.0.0/8') prod_network_two.parent_token = 'PROD_NETWORK_EXTREAMLY_LONG_VER' self.naming.GetNetAddr.side_effect = [ [prod_network], [prod_network_two]] self.naming.GetServiceByProto.return_value = ['53'] self.assertRaises( packetfilter.DuplicateShortenedTableNameError, packetfilter.PacketFilter.__init__, packetfilter.PacketFilter.__new__(packetfilter.PacketFilter), policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + LONG_NAME_TERM_DNS_TCP + GOOD_HEADER_DIRECTIONAL + NON_SHORTENED_LONG_NAME_TERM_DNS_UDP, self.naming), EXP_INFO) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWORK_EXTREAMLY_LONG_VERY_NO_GOOD_NAME'), mock.call('PROD_NETWORK_EXTREAMLY_LONG_VER')]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')]) def testTermNameConflict(self): self.assertRaises( packetfilter.DuplicateTermError, packetfilter.PacketFilter.__init__, packetfilter.PacketFilter.__new__(packetfilter.PacketFilter), policy.ParsePolicy( GOOD_HEADER_DIRECTIONAL + GOOD_TERM_ICMP + GOOD_TERM_ICMP, self.naming), EXP_INFO) def testBadProtoError(self): acl = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + BAD_PROTO_TERM, self.naming), EXP_INFO) self.assertRaises(packetfilter.UnsupportedProtoError, str, acl) def testBuildTokens(self): ip = nacaddr.IP('10.0.0.0/8') ip.parent_token = 'PROD_NETWORK' self.naming.GetNetAddr.return_value = [ip] self.naming.GetServiceByProto.return_value = ['25'] pol1 = packetfilter.PacketFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = packetfilter.PacketFilter( policy.ParsePolicy(GOOD_HEADER + GOOD_WARNING_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/paloaltofw_test.py000066400000000000000000001621751437377527500206550ustar00rootroot00000000000000# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for Palo Alto Firewalls acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import paloaltofw from capirca.lib import policy GOOD_HEADER_1 = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone trust to-zone untrust } """ GOOD_HEADER_2 = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone all to-zone all } """ GOOD_HEADER_INET6 = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone trust to-zone untrust inet6 } """ GOOD_HEADER_MIXED = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone trust to-zone untrust mixed } """ GOOD_HEADER_TERM_PREFIXES_1 = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone trust to-zone untrust mixed no-addr-obj unique-term-prefixes } """ GOOD_HEADER_TERM_PREFIXES_2 = """ header { comment:: "This is a test acl with a comment" target:: paloalto from-zone trust to-zone trust mixed no-addr-obj unique-term-prefixes } """ BAD_HEADER_1 = """ header { comment:: "This header has two address families" target:: paloalto from-zone trust to-zone untrust inet6 mixed } """ GRE_PROTO_TERM = """ term test-gre-protocol { comment:: "allow GRE protocol to FOOBAR" destination-address:: FOOBAR protocol:: gre action:: accept } """ AH_PROTO_TERM = """ term test-ah-protocol { comment:: "allow AH protocol to FOOBAR" destination-address:: FOOBAR protocol:: ah action:: accept } """ AH_TCP_MIXED_PROTO_TERM = """ term test-mixed-protocol { source-address:: FOOBAR protocol:: ah tcp action:: accept comment:: "Applications and Services should be split into separate terms." } """ ESP_PROTO_TERM = """ term test-esp-protocol { comment:: "allow ESP protocol to FOOBAR" destination-address:: FOOBAR protocol:: esp action:: accept } """ ESP_TCP_MIXED_PROTO_TERM = """ term test-mixed-protocol { destination-address:: FOOBAR protocol:: esp tcp action:: accept comment:: "Applications and Services should be split into separate terms." } """ GOOD_TERM_1 = """ term good-term-1 { comment:: "This header is very very very very very very very very very very very very very very very very very very very very large" destination-address:: FOOBAR destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_2 = """ term good-term-4 { destination-address:: SOME_HOST protocol:: tcp pan-application:: ssl http action:: accept } """ GOOD_TERM_3 = """ term only-pan-app { pan-application:: ssl action:: accept } """ GOOD_TERM_4_STATELESS_REPLY = """ term good-term-stateless-reply { comment:: "ThisIsAStatelessReply" destination-address:: SOME_HOST protocol:: tcp pan-application:: ssl http action:: accept } """ SVC_TERM_1 = """ term ssh-term-1 { comment:: "Allow SSH" destination-address:: FOOBAR destination-port:: SSH protocol:: tcp action:: accept } term smtp-term-1 { comment:: "Allow SMTP" destination-address:: FOOBAR destination-port:: SMTP protocol:: tcp action:: accept } """ SVC_TERM_2 = """ term smtp-term-1 { comment:: "Allow SMTP" destination-address:: FOOBAR destination-port:: SMTP protocol:: tcp action:: accept } """ TCP_ESTABLISHED_TERM = """ term tcp-established { destination-address:: SOME_HOST protocol:: tcp option:: tcp-established action:: accept } """ UDP_ESTABLISHED_TERM = """ term udp-established-term { destination-address:: SOME_HOST protocol:: udp option:: established action:: accept } """ UNSUPPORTED_OPTION_TERM = """ term unsupported-option-term { destination-address:: SOME_HOST protocol:: udp option:: inactive action:: accept } """ EXPIRED_TERM_1 = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ ICMP_TYPE_TERM_1 = """ term test-icmp { protocol:: icmp icmp-type:: echo-request echo-reply unreachable action:: accept } """ ICMP_TYPE_TERM_2 = """ term test-icmp-2 { protocol:: icmp icmp-type:: echo-request echo-reply unreachable action:: accept } """ ICMPV6_ONLY_TERM = """ term test-icmpv6-only { protocol:: icmpv6 action:: accept } """ ICMPV6_TYPE_TERM = """ term test-icmpv6-types { protocol:: icmpv6 icmp-type:: echo-request echo-reply destination-unreachable action:: accept } """ ICMPV6_ABBREVIATION_TYPE_TERM = """ term test-icmpv6-abbreviation-types { protocol:: icmpv6 icmp-type:: echo-request echo-reply destination-unreachable inverse-neighbor-discovery-solicitation inverse-neighbor-discovery-advertisement version-2-multicast-listener-report home-agent-address-discovery-reply action:: accept } """ BAD_ICMPV6_TYPE_TERM = """ term test-icmp { protocol:: icmpv6 icmp-type:: echo-request echo-reply unreachable action:: accept comment:: "This is incorrect because unreachable is not an icmpv6-type." } """ ICMP_ONLY_TERM_1 = """ term test-icmp-only { protocol:: icmp action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ DEFAULT_TERM_1 = """ term default-term-1 { action:: deny } """ TIMEOUT_TERM = """ term timeout-term { protocol:: icmp icmp-type:: echo-request timeout:: 77 action:: accept } """ LOGGING_DISABLED = """ term test-disabled-log { comment:: "Testing disabling logging for tcp." protocol:: tcp logging:: disable action:: accept } """ LOGGING_BOTH_TERM = """ term test-log-both { comment:: "Testing enabling log-both for tcp." protocol:: tcp logging:: log-both action:: accept } """ LOGGING_TRUE_KEYWORD = """ term test-true-log { comment:: "Testing enabling logging for udp with true keyword." protocol:: udp logging:: true action:: accept } """ LOGGING_PYTRUE_KEYWORD = """ term test-pytrue-log { comment:: "Testing enabling logging for udp with True keyword." protocol:: udp logging:: True action:: accept } """ LOGGING_SYSLOG_KEYWORD = """ term test-syslog-log { comment:: "Testing enabling logging for udp with syslog keyword." protocol:: udp logging:: syslog action:: accept } """ LOGGING_LOCAL_KEYWORD = """ term test-local-log { comment:: "Testing enabling logging for udp with local keyword." protocol:: udp logging:: local action:: accept } """ ACTION_ACCEPT_TERM = """ term test-accept-action { comment:: "Testing accept action for tcp." protocol:: tcp action:: accept } """ ACTION_COUNT_TERM = """ term test-count-action { comment:: "Testing unsupported count action for tcp." protocol:: tcp action:: count } """ ACTION_NEXT_TERM = """ term test-next-action { comment:: "Testing unsupported next action for tcp." protocol:: tcp action:: next } """ ACTION_DENY_TERM = """ term test-deny-action { comment:: "Testing deny action for tcp." protocol:: tcp action:: deny } """ ACTION_REJECT_TERM = """ term test-reject-action { comment:: "Testing reject action for tcp." protocol:: tcp action:: reject } """ ACTION_RESET_TERM = """ term test-reset-action { comment:: "Testing reset action for tcp." protocol:: tcp action:: reject-with-tcp-rst } """ PLATFORM_TERM = """ term test-accept-action { comment:: "Testing accept action for tcp." protocol:: tcp action:: accept platform:: paloalto } """ OTHER_PLATFORM_TERM = """ term test-accept-action { comment:: "Testing accept action for tcp." protocol:: tcp action:: accept platform:: juniper } """ PLATFORM_EXCLUDE_TERM = """ term test-accept-action { comment:: "Testing accept action for tcp." protocol:: tcp action:: accept platform-exclude:: paloalto } """ OTHER_PLATFORM_EXCLUDE_TERM = """ term test-accept-action { comment:: "Testing accept action for tcp." protocol:: tcp action:: accept platform-exclude:: junipersrx } """ HEADER_COMMENTS = """ header { comment:: "comment 1" comment:: "comment 2" target:: paloalto from-zone trust to-zone untrust } term policy-1 { pan-application:: ssh action:: accept } term policy-2 { pan-application:: web-browsing action:: accept } header { comment:: "comment 3" target:: paloalto from-zone trust to-zone dmz } term policy-3 { pan-application:: web-browsing action:: accept } header { # no comment target:: paloalto from-zone trust to-zone dmz-2 } term policy-4 { pan-application:: web-browsing action:: accept } """ ZONE_LEN_ERROR = """ header { target:: paloalto from-zone %s to-zone %s } term policy { pan-application:: web-browsing action:: accept } """ SUPPORTED_TOKENS = frozenset({ 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'logging', 'name', 'option', 'owner', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'stateless_reply', 'timeout', 'pan_application', 'translated', }) SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'reject-with-tcp-rst'}, 'option': {'established', 'tcp-established'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 _IPSET = [nacaddr.IP('10.0.0.0/8'), nacaddr.IP('2001:4860:8000::/33')] _IPSET2 = [nacaddr.IP('10.23.0.0/22'), nacaddr.IP('10.23.0.6/23', strict=False)] _IPSET3 = [nacaddr.IP('10.23.0.0/23')] PATH_VSYS = "./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']" PATH_RULES = PATH_VSYS + '/rulebase/security/rules' PATH_TAG = PATH_VSYS + '/tag' PATH_SERVICE = PATH_VSYS + '/service' PATH_ADDRESSES = PATH_VSYS + '/address' PATH_ADDRESS_GROUP = PATH_VSYS + '/address-group' class PaloAltoFWTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testTermAndFilterName(self): self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['25'] paloalto = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='good-term-1']") self.assertIsNotNone(x, output) self.naming.GetNetAddr.assert_called_once_with('FOOBAR') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testServiceMap(self): definitions = naming.Naming() definitions._ParseLine('SSH = 22/tcp', 'services') definitions._ParseLine('SMTP = 25/tcp', 'services') definitions._ParseLine('FOOBAR = 10.0.0.0/8', 'networks') definitions._ParseLine(' 2001:4860:8000::/33', 'networks') pol1 = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + SVC_TERM_1, definitions), EXP_INFO) self.assertEqual( pol1.service_map.entries, { ((), ('22',), 'tcp'): { 'name': 'service-ssh-term-1-tcp' }, ((), ('25',), 'tcp'): { 'name': 'service-smtp-term-1-tcp' } }, pol1.service_map.entries) pol2 = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + SVC_TERM_2, definitions), EXP_INFO) # The expectation is that there will be a single port mapped. self.assertEqual( pol2.service_map.entries, { ((), ('25',), 'tcp'): { 'name': 'service-smtp-term-1-tcp' } }, pol2.service_map.entries) def testDefaultDeny(self): paloalto = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + DEFAULT_TERM_1, self.naming), EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='default-term-1']/action") self.assertIsNotNone(x, output) self.assertEqual(x.text, 'deny', output) def testIcmpTypes(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ICMP_TYPE_TERM_1, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmp']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertCountEqual( ['icmp-echo-reply', 'icmp-echo-request', 'icmp-unreachable'], members, output) def testIcmpTypesMultiplePolicies(self): pol = policy.ParsePolicy( GOOD_HEADER_1 + ICMP_TYPE_TERM_1 + GOOD_HEADER_2 + ICMP_TYPE_TERM_2, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmp']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertCountEqual( ['icmp-echo-reply', 'icmp-echo-request', 'icmp-unreachable'], members, output) # Check second policy as well. x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmp-2']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertCountEqual( ['icmp-echo-reply', 'icmp-echo-request', 'icmp-unreachable'], members, output) def testIcmpV6Types(self): pol = policy.ParsePolicy(GOOD_HEADER_MIXED + ICMPV6_TYPE_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmpv6-types']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertCountEqual([ 'icmp6-echo-reply', 'icmp6-echo-request', 'icmp6-destination-unreachable' ], members, output) def testIcmpV6ApplicationAbbreviation(self): pol = policy.ParsePolicy(GOOD_HEADER_MIXED + ICMPV6_ABBREVIATION_TYPE_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find( PATH_RULES + "/entry[@name='test-icmpv6-abbreviation-types']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertCountEqual([ 'icmp6-echo-reply', 'icmp6-echo-request', 'icmp6-destination-unreachable', 'icmp6-INV-NBR-DSCVR-SOL', 'icmp6-INV-NBR-DSCVR-ADV', 'icmp6-version-2-MCAST-LSNR-repo', 'icmp6-home-agent-ADDR-DSCVR-RPL' ], members, output) def testBadICMP(self): POL = """ header { target:: paloalto from-zone trust to-zone untrust %s } term rule-1 { %s action:: accept }""" T = """ icmp-type:: echo-request echo-reply """ pol = policy.ParsePolicy(POL % ("", T), self.naming) self.assertRaises(paloaltofw.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) T = """ protocol:: udp icmp icmp-type:: echo-request echo-reply """ pol = policy.ParsePolicy(POL % ("inet", T), self.naming) self.assertRaises(paloaltofw.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) T = """ protocol:: icmpv6 icmp icmp-type:: echo-request """ pol = policy.ParsePolicy(POL % ("mixed", T), self.naming) self.assertRaises(paloaltofw.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) T = """ protocol:: icmpv6 icmp-type:: echo echo-reply """ self.assertRaises(policy.TermInvalidIcmpType, policy.ParsePolicy, POL % ("inet6", T), self.naming) def testBadICMPv6Type(self): pol = policy.ParsePolicy(GOOD_HEADER_MIXED + BAD_ICMPV6_TYPE_TERM, self.naming) self.assertRaises(paloaltofw.PaloAltoFWBadIcmpTypeError, paloaltofw.PaloAltoFW, pol, EXP_INFO) def testICMPProtocolOnly(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ICMP_ONLY_TERM_1, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmp-only']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertEqual(['icmp'], members, output) def testICMPv6ProtocolOnly(self): pol = policy.ParsePolicy(GOOD_HEADER_INET6 + ICMPV6_ONLY_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-icmpv6-only']/application") self.assertIsNotNone(x, output) members = [] for node in x: self.assertEqual(node.tag, 'member', output) members.append(node.text) self.assertEqual(['ipv6-icmp'], members, output) def testUniqueTermsMultiplePolicies(self): pol = policy.ParsePolicy( GOOD_HEADER_TERM_PREFIXES_1 + ICMP_TYPE_TERM_1 + GOOD_HEADER_TERM_PREFIXES_2 + ICMP_TYPE_TERM_1, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find( PATH_RULES + "/entry[@name='a5f554bb7a8276615edbd5de-test-icmp']") self.assertIsNotNone(x, output) # Check second policy as well. x = paloalto.config.find( PATH_RULES + "/entry[@name='e7d22ea748e04110eaf0495e-test-icmp']") self.assertIsNotNone(x, output) def testSkipStatelessReply(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_4_STATELESS_REPLY, self.naming) # Add stateless_reply to terms, there is no current way to include it in the # term definition. _, terms = pol.filters[0] for term in terms: term.stateless_reply = True paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='good-term-stateless-reply']") self.assertIsNone(x, output) def testSkipEstablished(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + TCP_ESTABLISHED_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='tcp-established']") self.assertIsNone(x, output) pol = policy.ParsePolicy(GOOD_HEADER_1 + UDP_ESTABLISHED_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='udp-established-term']") self.assertIsNone(x, output) def testUnsupportedOptions(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + UNSUPPORTED_OPTION_TERM, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) def testBuildTokens(self): self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testLoggingBoth(self): paloalto = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + LOGGING_BOTH_TERM, self.naming), EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-log-both']/log-start") self.assertEqual(x, 'yes', output) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-log-both']/log-end") self.assertEqual(x, 'yes', output) def testDisableLogging(self): paloalto = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + LOGGING_DISABLED, self.naming), EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-disabled-log']/log-start") self.assertEqual(x, 'no', output) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-disabled-log']/log-end") self.assertEqual(x, 'no', output) def testLogging(self): for term in [ LOGGING_SYSLOG_KEYWORD, LOGGING_LOCAL_KEYWORD, LOGGING_PYTRUE_KEYWORD, LOGGING_TRUE_KEYWORD ]: paloalto = paloaltofw.PaloAltoFW( policy.ParsePolicy(GOOD_HEADER_1 + term, self.naming), EXP_INFO) output = str(paloalto) # we don't have term name so match all elements with attribute # name at the entry level x = paloalto.config.findall(PATH_RULES + '/entry[@name]/log-start') self.assertEqual(len(x), 0, output) x = paloalto.config.findall(PATH_RULES + '/entry[@name]/log-end') self.assertEqual(len(x), 1, output) self.assertEqual(x[0].text, 'yes', output) def testAcceptAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_ACCEPT_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-accept-action']/action") self.assertEqual(x, 'allow', output) def testDenyAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_DENY_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-deny-action']/action") self.assertEqual(x, 'deny', output) def testRejectAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_REJECT_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-reject-action']/action") self.assertEqual(x, 'reset-client', output) def testResetAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_RESET_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-reset-action']/action") self.assertEqual(x, 'reset-client', output) def testCountAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_COUNT_TERM, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) def testNextAction(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ACTION_NEXT_TERM, self.naming) self.assertRaises(aclgenerator.UnsupportedFilterError, paloaltofw.PaloAltoFW, pol, EXP_INFO) def testPlatformTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + PLATFORM_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-accept-action']/action") self.assertEqual(x, 'allow', output) def testOtherPlatformTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + OTHER_PLATFORM_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-accept-action']/action") self.assertIsNone(x, output) def testPlatformExcludeTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + PLATFORM_EXCLUDE_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-accept-action']/action") self.assertIsNone(x, output) def testOtherPlatformExcludeTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + OTHER_PLATFORM_EXCLUDE_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='test-accept-action']/action") self.assertEqual(x, 'allow', output) def testGreProtoTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + GRE_PROTO_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-gre-protocol']/application") self.assertIsNotNone(x, output) self.assertEqual(len(x), 1, output) self.assertEqual(x[0].tag, 'member', output) self.assertEqual(x[0].text, 'gre', output) def testAhProtoTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + AH_PROTO_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-ah-protocol']/application") self.assertIsNotNone(x, output) self.assertEqual(len(x), 1, output) self.assertEqual(x[0].tag, 'member', output) self.assertEqual(x[0].text, 'ipsec-ah', output) def testAhTcpMixedProtoTerm(self): pol = policy.ParsePolicy( GOOD_HEADER_1 + AH_TCP_MIXED_PROTO_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) svc = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-1']/service") self.assertIsNotNone(svc, output) self.assertEqual(len(svc), 1, output) self.assertEqual(svc[0].tag, 'member', output) self.assertEqual(svc[0].text, 'any-tcp', output) app = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-1']/application") self.assertIsNotNone(app, output) self.assertEqual(len(app), 1, output) self.assertEqual(app[0].tag, 'member', output) self.assertEqual(app[0].text, 'any', output) # Check second policy as well. svc = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-2']/service") self.assertIsNotNone(svc, output) self.assertEqual(len(svc), 1, output) self.assertEqual(svc[0].tag, 'member', output) self.assertEqual(svc[0].text, 'application-default', output) app = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-2']/application") self.assertIsNotNone(app, output) self.assertEqual(len(app), 1, output) self.assertEqual(app[0].tag, 'member', output) self.assertEqual(app[0].text, 'ipsec-ah', output) def testEspProtoTerm(self): pol = policy.ParsePolicy(GOOD_HEADER_1 + ESP_PROTO_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='test-esp-protocol']/application") self.assertIsNotNone(x, output) self.assertEqual(len(x), 1, output) self.assertEqual(x[0].tag, 'member', output) self.assertEqual(x[0].text, 'ipsec-esp', output) def testEspTcpMixedProtoTerm(self): pol = policy.ParsePolicy( GOOD_HEADER_1 + ESP_TCP_MIXED_PROTO_TERM, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) svc = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-1']/service") self.assertIsNotNone(svc, output) self.assertEqual(len(svc), 1, output) self.assertEqual(svc[0].tag, 'member', output) self.assertEqual(svc[0].text, 'any-tcp', output) app = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-1']/application") self.assertIsNotNone(app, output) self.assertEqual(len(app), 1, output) self.assertEqual(app[0].tag, 'member', output) self.assertEqual(app[0].text, 'any', output) # Check second policy as well. svc = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-2']/service") self.assertIsNotNone(svc, output) self.assertEqual(len(svc), 1, output) self.assertEqual(svc[0].tag, 'member', output) self.assertEqual(svc[0].text, 'application-default', output) app = paloalto.config.find( PATH_RULES + "/entry[@name='test-mixed-protocol-2']/application") self.assertIsNotNone(app, output) self.assertEqual(len(app), 1, output) self.assertEqual(app[0].tag, 'member', output) self.assertEqual(app[0].text, 'ipsec-esp', output) def testHeaderComments(self): pol = policy.ParsePolicy(HEADER_COMMENTS, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) tag = 'trust_untrust_policy-comment-1' x = paloalto.config.find(PATH_TAG + "/entry[@name='%s']/comments" % tag) self.assertIsNotNone(x, output) self.assertEqual(x.text, 'comment 1 comment 2', output) x = paloalto.config.find(PATH_RULES + "/entry[@name='policy-2']/tag") self.assertIsNotNone(x, output) self.assertEqual(len(x), 1, output) self.assertEqual(x[0].tag, 'member', output) self.assertEqual(x[0].text, tag, output) tag = 'trust_dmz_policy-comment-2' x = paloalto.config.find(PATH_TAG + "/entry[@name='%s']/comments" % tag) self.assertIsNotNone(x, output) self.assertEqual(x.text, 'comment 3', output) x = paloalto.config.find(PATH_RULES + "/entry[@name='policy-3']/tag") self.assertIsNotNone(x, output) self.assertEqual(len(x), 1, output) self.assertEqual(x[0].tag, 'member', output) self.assertEqual(x[0].text, tag, output) x = paloalto.config.find(PATH_RULES + "/entry[@name='policy-4']/tag") self.assertIsNone(x, output) def testZoneLen(self): ZONE_MAX_LEN = 'Z' * 31 ZONE_TOO_LONG = 'Z' * 32 # from pol = policy.ParsePolicy(ZONE_LEN_ERROR % (ZONE_MAX_LEN, 'dmz'), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='policy']/from/member") self.assertEqual(x, ZONE_MAX_LEN, output) pol = policy.ParsePolicy(ZONE_LEN_ERROR % (ZONE_TOO_LONG, 'dmz'), self.naming) self.assertRaisesRegex(paloaltofw.PaloAltoFWNameTooLongError, '^Source zone must be 31 characters max', paloaltofw.PaloAltoFW, pol, EXP_INFO) # to pol = policy.ParsePolicy(ZONE_LEN_ERROR % ('dmz', ZONE_MAX_LEN), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='policy']/to/member") self.assertEqual(x, ZONE_MAX_LEN, output) pol = policy.ParsePolicy(ZONE_LEN_ERROR % ('dmz', ZONE_TOO_LONG), self.naming) self.assertRaisesRegex(paloaltofw.PaloAltoFWNameTooLongError, '^Destination zone must be 31 characters max', paloaltofw.PaloAltoFW, pol, EXP_INFO) def test_ZonesRequired(self): BAD_HEADERS = [ 'header{target::paloalto}', 'header{target::paloalto from-zone x}', 'header{target::paloalto x x to-zone x}', ] msg = ('^Palo Alto Firewall filter arguments ' 'must specify from-zone and to-zone[.]$') for header in BAD_HEADERS: pol = policy.ParsePolicy(header + GOOD_TERM_3, self.naming) self.assertRaisesRegex(paloaltofw.UnsupportedFilterError, msg, paloaltofw.PaloAltoFW, pol, EXP_INFO) def test_LongComments(self): POL = """ header { comment:: "%s" target:: paloalto from-zone trust to-zone untrust } term rule-1 { comment:: "%s" pan-application:: ssl action:: accept }""" # get maximum lengths pol = policy.ParsePolicy(POL % ('C', 'C'), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) MAX_TAG_COMMENTS_LENGTH = paloalto._MAX_TAG_COMMENTS_LENGTH MAX_RULE_DESCRIPTION_LENGTH = paloalto._MAX_RULE_DESCRIPTION_LENGTH tag = 'trust_untrust_policy-comment-1' # maximum length pol = policy.ParsePolicy( POL % ('C' * MAX_TAG_COMMENTS_LENGTH, 'C' * MAX_RULE_DESCRIPTION_LENGTH), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_TAG + "/entry[@name='%s']/comments" % tag) self.assertEqual(x, 'C' * MAX_TAG_COMMENTS_LENGTH, output) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/description") self.assertEqual(x, 'C' * MAX_RULE_DESCRIPTION_LENGTH, output) # maximum length + 1 pol = policy.ParsePolicy( POL % ('C' * (MAX_TAG_COMMENTS_LENGTH + 1), 'C' * (MAX_RULE_DESCRIPTION_LENGTH + 1)), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) # verify warning with self.assertLogs(level='WARN') as log: output = str(paloalto) self.assertEqual(len(log.output), 2, log.output) self.assertIn('comments exceeds maximum length', log.output[0]) self.assertIn('description exceeds maximum length', log.output[1]) x = paloalto.config.findtext(PATH_TAG + "/entry[@name='%s']/comments" % tag) self.assertEqual(x, 'C' * MAX_TAG_COMMENTS_LENGTH, output) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/description") self.assertEqual(x, 'C' * MAX_RULE_DESCRIPTION_LENGTH, output) def testTermLen(self): TERM = """ term %s { pan-application:: ssl action:: accept } """ # get maximum length pol = policy.ParsePolicy(GOOD_HEADER_1 + TERM % 'T', self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) TERM_MAX_LENGTH = paloalto._TERM_MAX_LENGTH # maximum length term = 'T' * TERM_MAX_LENGTH pol = policy.ParsePolicy(GOOD_HEADER_1 + TERM % term, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.find(PATH_RULES + "/entry[@name='%s']" % term) self.assertIsNotNone(x, output) # maximum length + 1 term = 'T' * (TERM_MAX_LENGTH + 1) pol = policy.ParsePolicy(GOOD_HEADER_1 + TERM % term, self.naming) regex = '^Term .+ is too long[.] Limit is %d characters' % TERM_MAX_LENGTH self.assertRaisesRegex(aclgenerator.TermNameTooLongError, regex, paloaltofw.PaloAltoFW, pol, EXP_INFO) def testPanApplication(self): POL1 = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { action:: accept }""" POL2 = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { pan-application:: %s action:: accept %s }""" APPS = [ {'app1'}, {'app1', 'app2'}, {'app1', 'app2', 'app3'}, ] POL3 = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { pan-application:: web-browsing action:: accept%s }""" T0 = '' T1 = """ protocol:: tcp """ T2 = """ protocol:: tcp destination-port:: PORT1 PORT2 """ POL4 = """ header { target:: paloalto from-zone trust to-zone untrust %s } term rule-1 { pan-application:: web-browsing action:: accept protocol:: %s }""" POL5 = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { pan-application:: web-browsing action:: accept protocol:: icmp icmp-type:: echo-request }""" POL6 = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { pan-application:: web-browsing protocol:: tcp icmp destination-port:: PORT1 action:: accept }""" pol = policy.ParsePolicy(POL1, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/application/member") self.assertEqual(x, 'any', output) for i, app in enumerate(APPS): pol = policy.ParsePolicy(POL2 % (' '.join(app), ''), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/application/member") apps = {elem.text for elem in x} self.assertEqual(APPS[i], apps, output) for i, app in enumerate(APPS): pol = policy.ParsePolicy(POL2 % (' '.join(app), 'protocol:: tcp'), self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/application/member") apps = {elem.text for elem in x} self.assertEqual(APPS[i], apps, output) pol = policy.ParsePolicy(POL3 % T0, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/service/member") self.assertEqual(x, 'application-default', output) pol = policy.ParsePolicy(POL3 % T1, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/service/member") self.assertEqual(x, 'any-tcp', output) definitions = naming.Naming() definitions._ParseLine('PORT1 = 8080/tcp', 'services') definitions._ParseLine('PORT2 = 8081/tcp', 'services') pol = policy.ParsePolicy(POL3 % T2, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findtext(PATH_RULES + "/entry[@name='rule-1']/service/member") self.assertEqual(x, 'service-rule-1-tcp', output) regex = ('^Term rule-1 contains non tcp, udp protocols ' 'with pan-application:') pol = policy.ParsePolicy(POL4 % ('inet', 'tcp icmp'), self.naming) self.assertRaisesRegex(paloaltofw.UnsupportedFilterError, regex, paloaltofw.PaloAltoFW, pol, EXP_INFO) pol = policy.ParsePolicy(POL4 % ('inet6', 'icmpv6'), self.naming) self.assertRaisesRegex(paloaltofw.UnsupportedFilterError, regex, paloaltofw.PaloAltoFW, pol, EXP_INFO) pol = policy.ParsePolicy(POL5, self.naming) self.assertRaisesRegex(paloaltofw.UnsupportedFilterError, regex, paloaltofw.PaloAltoFW, pol, EXP_INFO) self.assertRaisesRegex(policy.MixedPortandNonPortProtos, '^Term rule-1 contains mixed uses of protocols ' 'with and without port numbers', policy.ParsePolicy, POL6, definitions) def testPanPorts(self): POL = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { %s action:: accept }""" T = """ protocol:: udp destination-port:: NTP """ definitions = naming.Naming() definitions._ParseLine('NTP = 123/tcp 123/udp', 'services') definitions._ParseLine('DNS = 53/tcp 53/udp', 'services') pol = policy.ParsePolicy(POL % T, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) name = "service-rule-1-udp" path = "/entry[@name='%s']/protocol/udp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "123", output) path = "/entry[@name='%s']/protocol/udp/source-port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertIsNone(x, output) T = """ protocol:: udp source-port:: NTP """ pol = policy.ParsePolicy(POL % T, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) name = "service-rule-1-udp" path = "/entry[@name='%s']/protocol/udp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "0-65535", output) path = "/entry[@name='%s']/protocol/udp/source-port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "123", output) T = """ protocol:: tcp source-port:: NTP destination-port:: NTP DNS """ pol = policy.ParsePolicy(POL % T, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) name = "service-rule-1-tcp" path = "/entry[@name='%s']/protocol/tcp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "53,123", output) path = "/entry[@name='%s']/protocol/tcp/source-port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "123", output) T = """ protocol:: tcp """ pol = policy.ParsePolicy(POL % T, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) name = "any-tcp" path = "/entry[@name='%s']/protocol/tcp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "0-65535", output) path = "/entry[@name='%s']/protocol/tcp/source-port" % name x = paloalto.config.find(PATH_SERVICE + path) self.assertIsNone(x, output) T = """ protocol:: tcp udp """ pol = policy.ParsePolicy(POL % T, definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) name = "any-tcp" path = "/entry[@name='%s']/protocol/tcp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "0-65535", output) name = "any-udp" path = "/entry[@name='%s']/protocol/udp/port" % name x = paloalto.config.findtext(PATH_SERVICE + path) self.assertEqual(x, "0-65535", output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/service/member") services = {elem.text for elem in x} self.assertEqual({"any-tcp", "any-udp"}, services, output) def testPortLessNonPort(self): POL = """ header { target:: paloalto from-zone trust to-zone untrust } term rule-1 { %s action:: accept }""" T = """ protocol:: udp icmp """ pol = policy.ParsePolicy(POL % T, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1-1']/service/member") self.assertTrue(len(x) > 0, output) services = {elem.text for elem in x} self.assertEqual({"any-udp"}, services, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1-2']/application/member") self.assertTrue(len(x) > 0, output) applications = {elem.text for elem in x} self.assertEqual({"icmp"}, applications, output) T = """ protocol:: udp tcp icmp gre """ pol = policy.ParsePolicy(POL % T, self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1-1']/service/member") self.assertTrue(len(x) > 0, output) services = {elem.text for elem in x} self.assertEqual({"any-udp", "any-tcp"}, services, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1-2']/application/member") self.assertTrue(len(x) > 0, output) applications = {elem.text for elem in x} self.assertEqual({"icmp", "gre"}, applications, output) def testSrcAnyDstAnyAddressFamily(self): POL = """ header { target:: paloalto from-zone trust to-zone untrust %s } term rule-1 { action:: accept }""" pol = policy.ParsePolicy(POL % "mixed", self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) for srcdst in ["source", "destination"]: x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/%s/member" % srcdst) self.assertTrue(len(x) == 1, output) values = {elem.text for elem in x} self.assertEqual({"any"}, values, output) pol = policy.ParsePolicy(POL % "inet", self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) for srcdst in ["source", "destination"]: x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/%s/member" % srcdst) self.assertTrue(len(x) == 1, output) values = {elem.text for elem in x} self.assertEqual({"any-ipv4"}, values, output) x = paloalto.config.find(PATH_RULES + "/entry[@name='rule-1']/negate-source") self.assertIsNone(x, output) x = paloalto.config.find(PATH_RULES + "/entry[@name='rule-1']/negate-destination") self.assertIsNone(x, output) pol = policy.ParsePolicy(POL % "inet6", self.naming) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) for srcdst in ["source", "destination"]: x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/%s/member" % srcdst) self.assertTrue(len(x) == 1, output) values = {elem.text for elem in x} self.assertEqual({"any-ipv4"}, values, output) x = paloalto.config.find(PATH_RULES + "/entry[@name='rule-1']/negate-source") self.assertIsNotNone(x, output) x = paloalto.config.find(PATH_RULES + "/entry[@name='rule-1']/negate-destination") self.assertIsNotNone(x, output) def testNoAddrObj(self): definitions = naming.Naming() definitions._ParseLine('NET1 = 10.1.0.0/24', 'networks') definitions._ParseLine('NET2 = 10.2.0.0/24', 'networks') definitions._ParseLine('NET3 = 10.3.1.0/24', 'networks') definitions._ParseLine(' 10.3.2.0/24', 'networks') definitions._ParseLine('NET4 = 2001:db8:0:aa::/64', 'networks') definitions._ParseLine(' 2001:db8:0:bb::/64', 'networks') definitions._ParseLine('NET5 = NET3 NET4', 'networks') definitions._ParseLine('NET6 = 4000::/2', 'networks') definitions._ParseLine('NET7 = 8000::/1', 'networks') POL = """ header { target:: paloalto from-zone trust to-zone untrust %s no-addr-obj } term rule-1 { %s action:: accept protocol:: tcp }""" T = """ source-address:: NET1 destination-address:: NET2 NET3 """ pol = policy.ParsePolicy(POL % ("inet", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.1.0.0/24"}, addrs, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.2.0.0/24", "10.3.1.0/24", "10.3.2.0/24"}, addrs, output) T = """ source-address:: NET4 """ pol = policy.ParsePolicy(POL % ("inet6", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"2001:db8:0:aa::/64", "2001:db8:0:bb::/64"}, addrs, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"any"}, addrs, output) T = """ destination-address:: NET5 """ pol = policy.ParsePolicy(POL % ("mixed", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"any"}, addrs, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.3.1.0/24", "10.3.2.0/24", "2001:db8:0:aa::/64", "2001:db8:0:bb::/64"}, addrs, output) T = """ source-address:: NET6 destination-address:: NET7 """ pol = policy.ParsePolicy(POL % ("mixed", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"4000::/3", "6000::/3"}, addrs, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"8000::/3", "a000::/3", "c000::/3", "e000::/3"}, addrs, output) POL = """ header { target:: paloalto from-zone trust to-zone untrust inet no-addr-obj } term rule-1 { source-address:: NET1 NET2 action:: accept protocol:: tcp } header { target:: paloalto from-zone trust to-zone untrust inet addr-obj } term rule-1 { destination-address:: NET3 action:: accept protocol:: tcp }""" pol = policy.ParsePolicy(POL, definitions) self.assertRaisesRegex(paloaltofw.UnsupportedHeaderError, '^Cannot mix addr-obj and no-addr-obj header ' 'option in a single policy file$', paloaltofw.PaloAltoFW, pol, EXP_INFO) def testAddrObj(self): definitions = naming.Naming() definitions._ParseLine('NET1 = 10.1.0.0/24', 'networks') definitions._ParseLine('NET2 = 10.2.0.0/24', 'networks') definitions._ParseLine('NET3 = 10.3.1.0/24', 'networks') definitions._ParseLine(' 10.3.2.0/24', 'networks') definitions._ParseLine('NET4 = 4000::/128', 'networks') definitions._ParseLine('NET5 = 4000::/2', 'networks') POL = """ header { target:: paloalto from-zone trust to-zone untrust %s addr-obj } term rule-1 { %s action:: accept protocol:: tcp }""" T = """ source-address:: NET1 destination-address:: NET2 NET3 """ pol = policy.ParsePolicy(POL % ("inet", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET1"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESS_GROUP + "/entry[@name='NET1']/static/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET1_0"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET1_0']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.1.0.0/24"}, addrs, output) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET2", "NET3"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESS_GROUP + "/entry[@name='NET2']/static/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET2_0"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET2_0']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.2.0.0/24"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESS_GROUP + "/entry[@name='NET3']/static/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET3_0", "NET3_1"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET3_0']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.3.1.0/24"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET3_1']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"10.3.2.0/24"}, addrs, output) # These tests check that large IP ranges are broken into equivalent subnets. T = """ source-address:: NET5 """ pol = policy.ParsePolicy(POL % ("mixed", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/source/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET5"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESS_GROUP + "/entry[@name='NET5']/static/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET5_0", "NET5_1"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET5_0']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"4000::/3"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET5_1']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"6000::/3"}, addrs, output) T = """ source-address:: NET4 destination-address:: NET5 """ pol = policy.ParsePolicy(POL % ("mixed", T), definitions) paloalto = paloaltofw.PaloAltoFW(pol, EXP_INFO) output = str(paloalto) x = paloalto.config.findall(PATH_RULES + "/entry[@name='rule-1']/destination/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET5"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESS_GROUP + "/entry[@name='NET5']/static/member") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"NET5_0", "NET5_1"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET5_0']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"4000::/3"}, addrs, output) x = paloalto.config.findall(PATH_ADDRESSES + "/entry[@name='NET5_1']/ip-netmask") self.assertTrue(len(x) > 0, output) addrs = {elem.text for elem in x} self.assertEqual({"6000::/3"}, addrs, output) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/pcap_test.py000066400000000000000000000271741437377527500174270ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for pcap rendering module.""" import datetime from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import pcap from capirca.lib import policy GOOD_HEADER = """ header { comment:: "this is a test acl" target:: pcap test-filter } """ GOOD_HEADER_IN = """ header { comment:: "this is a test acl" target:: pcap test-filter in } """ GOOD_HEADER_OUT = """ header { comment:: "this is a test acl" target:: pcap test-filter out } """ GOOD_TERM_ICMP = """ term good-term-icmp { protocol:: icmp action:: accept } """ GOOD_TERM_ICMP_TYPES = """ term good-term-icmp-types { protocol:: icmp icmp-type:: echo-reply unreachable time-exceeded action:: deny } """ GOOD_TERM_ICMPV6 = """ term good-term-icmpv6 { protocol:: icmpv6 action:: accept } """ BAD_TERM_ICMP = """ term test-icmp { icmp-type:: echo-request echo-reply action:: accept } """ BAD_TERM_ACTION = """ term bad-term-action { protocol:: icmp action:: undefined } """ GOOD_TERM_TCP = """ term good-term-tcp { comment:: "Test term 1" destination-address:: PROD_NETWRK destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_WARNING_TERM = """ term good-warning-term { comment:: "Test term 1" destination-address:: PROD_NETWRK destination-port:: SMTP protocol:: tcp policer:: batman action:: accept } """ GOOD_TERM_LOG = """ term good-term-log { protocol:: tcp logging:: true action:: accept } """ GOOD_ICMP_CODE = """ term good_term { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ EXPIRED_TERM = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ NEXT_TERM = """ term next { action:: next } """ NEXT_LOG_TERM = """ term next-log { logging:: true action:: next } """ ESTABLISHED_TERM = """ term accept-established { protocol:: tcp option:: tcp-established action:: accept } """ VRRP_TERM = """ term vrrp-term { protocol:: vrrp action:: accept } """ UNICAST_TERM = """ term unicast-term { destination-address:: ANY protocol:: tcp action:: accept } """ GOOD_TERM_HBH = """ term good-term-hbh { protocol:: hopopt action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'name', 'option', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'translated', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none', 'established', 'tcp-established'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class PcapFilter(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testTcp(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(dst net 10.0.0.0/8) and (proto \\tcp) and (dst port 25)', result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testLog(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_LOG, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'proto \\tcp', result, 'did not find actual term for good-term-log') def testIcmp(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'proto \\icmp', result, 'did not find actual term for good-term-icmp') def testIcmpCode(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_ICMP_CODE, self.naming), EXP_INFO) result = str(acl) self.assertIn('and icmp[icmpcode] == 3', result, result) self.assertIn('and icmp[icmpcode] == 4', result, result) def testIcmpTypes(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMP_TYPES, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(proto \\icmp) and (icmp[icmptype] == 0 or icmp[icmptype] == 3' ' or icmp[icmptype] == 11)', result, 'did not find actual term for good-term-icmp-types') def testIcmpv6(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMPV6, self.naming), EXP_INFO) result = str(acl) self.assertIn( 'icmp6', result, 'did not find actual term for good-term-icmpv6') def testBadIcmp(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + BAD_TERM_ICMP, self.naming), EXP_INFO) self.assertRaises(aclgenerator.UnsupportedFilterError, str, acl) @mock.patch.object(pcap.logging, 'warning') def testExpiredTerm(self, mock_warn): pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', 'expired_test', 'test-filter') @mock.patch.object(pcap.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testMultiprotocol(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(proto \\tcp or proto \\udp or proto \\icmp)', result, 'did not find actual term for multi-proto') def testNextTerm(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + NEXT_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn('', result, 'did not find actual term for good-term-icmpv6') def testTcpOptions(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + ESTABLISHED_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(tcp[tcpflags] & (tcp-ack) == (tcp-ack)', result, 'did not find actual term for established') def testVrrpTerm(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + VRRP_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(proto 112)', result, 'did not find actual term for vrrp') def testMultiHeader(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_LOG + GOOD_HEADER + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertIn( '((((proto \\tcp))\n))\nor\n((((proto \\icmp))\n))', result, 'did not find actual terms for multi-header') def testDirectional(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER_IN + GOOD_TERM_LOG + GOOD_HEADER_OUT + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(((dst net localhost and ((proto \\tcp)))\n))\nor\n' '(((src net localhost and ((proto \\icmp)))\n))', result, 'did not find actual terms for directional') def testUnicastIPv6(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('::/0')] acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER_IN + UNICAST_TERM, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(dst net localhost and ((proto \\tcp)))', result, 'did not find actual terms for unicast-term') self.naming.GetNetAddr.assert_called_once_with('ANY') def testHbh(self): acl = pcap.PcapFilter(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_HBH, self.naming), EXP_INFO) result = str(acl) self.assertIn( '(ip6 protochain 0)', result, 'did not find actual terms for unicast-term') def testBuildTokens(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol1 = pcap.PcapFilter(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] pol1 = pcap.PcapFilter( policy.ParsePolicy(GOOD_HEADER + GOOD_WARNING_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/policy_simple_test.py000066400000000000000000000151571437377527500213520ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import absltest from absl import logging from capirca.lib import policy_simple class FieldTest(absltest.TestCase): def setUp(self): super().setUp() logging.debug('======> %s <======', self.id()) def testAppendAppends(self): f = policy_simple.Field('Testvalue') f.Append('TESTVALUE') self.assertEqual(f.value, 'TestvalueTESTVALUE') def testStr(self): f = policy_simple.Field('Testvalue') self.assertEqual('UNKNOWN::Testvalue', str(f)) def testStrIndents(self): f = policy_simple.Field('Testvalue\nTestValue') self.assertEqual('UNKNOWN::Testvalue\n TestValue', str(f)) def testIntegerField(self): self.assertRaises(ValueError, policy_simple.IntegerField, '7.01') try: _ = policy_simple.IntegerField('7') except ValueError: self.fail("IntegerField should accept '7' as value.") def testNamingFieldRejectsBad(self): bads = ( 'corp_internal', 'CORP+INTERNAL', ) for bad in bads: logging.debug('Testing bad "%s".', bad) self.assertRaises(ValueError, policy_simple.NamingField, bad) def testNamingFieldAcceptsGood(self): goods = ( 'CORP_INTERNAL', 'RFC1918', 'FOO_BAR102.BAZ101', ) for good in goods: try: logging.debug('Testing good "%s".', good) _ = policy_simple.NamingField(good) except ValueError: self.fail('Rejected good NamingField value "%s".' % good) def testNamingFieldAppendRejectsBad(self): f = policy_simple.NamingField('RFC1918') bads = ( 'corp_internal', 'CORP+INTERNAL', ) for bad in bads: logging.debug('Testing bad "%s".', bad) self.assertRaises(ValueError, f.Append, bad) def testNamingFieldAppendAcceptsGood(self): f = policy_simple.NamingField('RFC1918') goods = ( 'CORP_INTERNAL', 'RFC1918', 'FOO_BAR102.BAZ101', ) for good in goods: try: logging.debug('Testing good "%s".', good) _ = f.Append(good) except ValueError: self.fail('Rejected good NamingField value "%s".' % good) def testNamingFieldDedupes(self): f = policy_simple.NamingField('RFC1918 CORP_INTERNAL RFC1918') f.Append('RFC1918') f.Append('CORP_INTERNAL RFC1918') self.assertEqual(set(['RFC1918', 'CORP_INTERNAL']), f.value) def testNamingFieldStr(self): f = policy_simple.NamingField(' '.join(str(x) for x in range(25))) expected_str = ('UNKNOWN:: 0 1 10 11 12 13 14 15 16 17 18 19 2 20 21' ' 22 23 24 3 4 5 6 7\n 9') self.assertEqual(expected_str, str(f)) class BlockTest(absltest.TestCase): def setUp(self): super().setUp() logging.debug('======> %s <======', self.id()) def testRejectsNonField(self): b = policy_simple.Block() for t in ('', 3, lambda x: x, policy_simple.Header(), policy_simple.Policy('test')): self.assertRaises(TypeError, b.AddField, t) def testFieldsWithType(self): b = policy_simple.Block() c1 = policy_simple.Comment('test1') c2 = policy_simple.Comment('test2') d = policy_simple.DestinationAddress('XYZ') s = policy_simple.SourceAddress('ABC') for field in (c1, d, c2, s): b.AddField(field) self.assertEqual([c1, d, c2, s], b.fields) self.assertEqual([c1, c2], b.FieldsWithType(policy_simple.Comment)) def testIter(self): a = object() b = object() c = object() block = policy_simple.Block() block.fields = (a, b, c) self.assertEqual([a, b, c], list(block)) class PolicyTest(absltest.TestCase): def setUp(self): super().setUp() logging.debug('======> %s <======', self.id()) def testAddMember(self): p = policy_simple.Policy('test') good = [policy_simple.Header(), policy_simple.Term('test'), policy_simple.BlankLine(), policy_simple.CommentLine('test'), policy_simple.Include('other_pol')] bad = ('', 3, lambda x: x, policy_simple.Field('test')) for member in good: try: p.AddMember(member) except TypeError: self.fail('Policy should accept member "%s"' % member) self.assertEqual(good, p.members) for member in bad: self.assertRaises(TypeError, p.AddMember, member) def testIter(self): a = object() b = object() c = object() pol = policy_simple.Policy(identifier=None) pol.members = (a, b, c) self.assertEqual([a, b, c], list(pol)) class PolicyParserTest(absltest.TestCase): def setUp(self): super().setUp() logging.debug('======> %s <======', self.id()) def Parser(self, data): return policy_simple.PolicyParser(data=data, identifier='test') def testParseCommentLine(self): parser = self.Parser('# test-comment-value') expected = policy_simple.CommentLine('# test-comment-value') pol = parser.Parse() self.assertEqual([expected], pol.members) def testParseBlankLine(self): parser = self.Parser('') expected = policy_simple.BlankLine() pol = parser.Parse() self.assertEqual([expected], pol.members) def testParseInclude(self): parser = self.Parser('#include other/file #whatever') expected = policy_simple.Include('other/file') pol = parser.Parse() self.assertEqual([expected], pol.members) def testParseHeader(self): parser = self.Parser('header {\ntarget::Test\n}') expected = policy_simple.Header() expected.AddField(policy_simple.Target('Test')) pol = parser.Parse() self.assertEqual(expected, pol.members[0]) def testParseTerm(self): parser = self.Parser('term testy {\ntarget::Test\n}') expected = policy_simple.Term('testy') expected.AddField(policy_simple.Target('Test')) pol = parser.Parse() self.assertEqual(expected, pol.members[0]) def testParseTermBadField(self): parser = self.Parser('term testy {\nbad_field::Test\n}') self.assertRaises(ValueError, parser.Parse) def testUnfinishedBlock(self): parser = self.Parser('term testy {\ntarget::Test\n') self.assertRaises(ValueError, parser.Parse) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/policy_test.py000066400000000000000000001570211437377527500177760ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for policy.py library.""" from absl.testing import absltest from unittest import mock from absl import logging from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy HEADER = """ header { comment:: "this is a test acl" comment:: "this is another comment" target:: juniper test-filter } """ HEADER_2 = """ header { comment:: "this goes in the other direction" target:: juniper test-filter-outbound } """ HEADER_3 = """ header { comment:: "test header 3" target:: cisco 50 standard } """ HEADER_4 = """ header { comment:: "test header 4" target:: iptables } """ HEADER_5 = """ header { comment:: "test header 5" target:: gce global/networks/default } """ HEADER_6 = """ header { comment:: "this is a test nftable acl" target:: nftables chain_name input 0 inet } """ HEADER_V6 = """ header { comment:: "this is a test inet6 acl" comment:: "this is another comment" target:: juniper test-filter inet6 } """ HEADER_SRX = """ header { target:: srx from-zone foo to-zone bar } """ HEADER_OBJ_GRP = """ header { target:: cisco foo object-group } """ HEADER_ADDRBOOK_MIXED = """ header { target:: srx from-zone to-zone bar target:: cisco foo } """ HEADER_HF_1 = """ header { comment:: "This is a test of HF INGRESS Policy." target:: gcp_hf INGRESS } """ INCLUDE_STATEMENT = """ #include "includes/y.inc" """ INCLUDED_Y_FILE = """ term included-term-1 { protocol:: tcp action:: accept } #include "includes/z.inc" """ BAD_INCLUDED_FILE = """ term included-term-1 { protocol:: tcp action:: accept } #include "/tmp/z.inc" """ BAD_INCLUDED_FILE_1 = """ term included-term-1 { protocol:: tcp action:: accept } #include "includes/../../etc/passwd.inc" """ GOOD_INCLUDED_FILE_1 = """ term good-included-term-1 { protocol:: tcp action:: accept } #include "includes/../pol/z.inc" """ GOOD_TERM_0 = """ term good-term-0 { protocol:: icmp action:: accept } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: tcp source-address:: PROD_NETWRK action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: tcp source-address:: PROD_NETWRK destination-port:: SMTP action:: accept } """ GOOD_TERM_4 = """ term good-term-4 { protocol:: 1 action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { action:: accept } """ GOOD_TERM_6 = """ term good-term-6 { protocol:: tcp destination-port:: MYSQL HIGH_PORTS action:: accept } """ GOOD_TERM_7 = """ term good-term-7 { protocol:: tcp destination-address:: PROD_NETWRK destination-exclude:: PROD_EH action:: accept } """ GOOD_TERM_8 = """ term good-term-8 { protocol:: tcp udp destination-port:: DNS action:: accept } """ GOOD_TERM_9 = """ term good-term-9 { comment:: "first comment" comment:: "second comment" action:: accept } """ GOOD_TERM_10 = """ term good-term-10 { logging:: true action:: accept } """ GOOD_TERM_11 = """ term good-term-11 { protocol:: icmp icmp-type:: echo-reply echo-request unreachable action:: accept } """ GOOD_TERM_12 = """ term qos-good-term-12 { action:: accept qos:: af4 } """ GOOD_TERM_13 = """ term good-term-13 { source-port:: GOOGLE_PUBLIC source-port:: SNMP protocol:: udp action:: accept } """ GOOD_TERM_14 = """ term good-term-14 { source-prefix:: foo_prefix_list action:: accept } """ GOOD_TERM_15 = """ term good-term-15 { destination-prefix:: bar_prefix_list baz_prefix_list action:: accept } """ GOOD_TERM_16 = """ term good-term-16 { ether-type:: arp ipv4 ether-type:: vlan action:: accept } """ GOOD_TERM_17 = """ term good-term-17 { traffic-type:: broadcast unknown-unicast traffic-type:: multicast action:: accept } """ GOOD_TERM_18 = """ term good-term-18 { comment:: "test verbatim output" verbatim:: iptables "mary had a little lamb" verbatim:: juniper "mary had another lamb" } """ GOOD_TERM_19 = """ term good-term-19 { source-port:: HTTP MYSQL destination-address:: PROD_EXTERNAL_SUPER PROD_NETWRK protocol:: tcp action:: accept } """ GOOD_TERM_20 = """ term good-term-20 { source-port:: MYSQL HTTP destination-address:: PROD_NETWRK PROD_EXTERNAL_SUPER protocol:: tcp action:: accept } """ GOOD_TERM_21 = """ term good-term-21 { source-port:: MYSQL HTTPS destination-address:: PROD_NETWRK PROD_EXTERNAL_SUPER protocol:: tcp action:: accept } """ GOOD_TERM_22 = """ term precedence-term { protocol:: icmp precedence:: 1 action:: accept } """ GOOD_TERM_23 = """ term loss-priority-term { source-port:: SSH protocol:: tcp loss-priority:: low action:: accept } """ GOOD_TERM_24 = """ term routing-instance-term { source-port:: SSH protocol:: tcp routing-instance:: foobar-router } """ GOOD_TERM_25 = """ term source-interface-term { source-port:: SSH protocol:: tcp source-interface:: foo0 action:: accept } """ GOOD_TERM_26 = """ term good-term-26 { protocol:: tcp source-address:: PROD_NETWRK source-exclude:: PROD_EH action:: accept } """ GOOD_TERM_27 = """ term good-term-27 { protocol:: tcp address:: PROD_NETWRK address-exclude:: PROD_EH action:: accept } """ GOOD_TERM_28 = """ term good-term-28 { protocol:: tcp source-address:: PROD_NETWRK source-exclude:: BOTTOM_HALF action:: accept } """ GOOD_TERM_29 = """ term good-term-29 { protocol:: tcp option:: tcp-established source-address:: PROD_NETWRK action:: accept } """ GOOD_TERM_30 = """ term good-term-30 { protocol:: tcp action:: accept vpn:: special-30 } """ GOOD_TERM_31 = """ term good-term-31 { protocol:: tcp action:: accept vpn:: special-31 policy-11 } """ GOOD_TERM_32 = """ term good-term-32 { forwarding-class:: fritzy action:: accept } """ GOOD_TERM_33 = """ term good-term-33 { forwarding-class:: flashy action:: accept } """ GOOD_TERM_34 = """ term good-term-34 { source-tag:: src-tag destination-tag:: dest-tag action:: accept } """ GOOD_TERM_35 = """ term good-term-35 { source-address:: PROD_NETWRK next-ip:: NEXT_IP } """ GOOD_TERM_36 = """ term good-term-36 { forwarding-class:: flashy fritzy action:: accept } """ GOOD_TERM_37 = """ term good-term-37 { protocol:: icmp action:: accept log_name:: "my special prefix" } """ GOOD_TERM_38 = """ term good-term-38 { source-prefix-except:: foo_prefix_list action:: accept } """ GOOD_TERM_39 = """ term good-term-39 { destination-prefix-except:: bar_prefix_list baz_prefix_list action:: accept } """ GOOD_TERM_40 = """ term good-term-38 { source-prefix:: foo_prefix_list source-prefix-except:: foo_prefix_list_except action:: accept } """ GOOD_TERM_41 = """ term good-term-39 { destination-prefix:: bar_prefix_list destination-prefix-except:: bar_prefix_list_except action:: accept } """ GOOD_TERM_42 = """ term good-term-42 { protocol:: icmp icmp-type:: unreachable icmp-code:: 3 4 action:: accept } """ GOOD_TERM_43 = """ term good-term-43 { ttl:: 10 action:: accept } """ GOOD_TERM_44 = """ term good-term-44 { logging:: syslog log-limit:: 999/day action:: accept } """ GOOD_TERM_45 = """ term good-term-45 { source-address:: ANY action:: accept target-service-accounts:: acct1@blah.com } """ GOOD_TERM_46 = """ term good-term-46 { protocol:: icmp tcp udp gre esp ah sctp encapsulate:: stuff_and_things } """ GOOD_TERM_47 = """ term good-term-47 { protocol:: icmp tcp udp gre esp ah sctp port-mirror:: true } """ GOOD_TERM_48 = """ term good-term-48 { protocol:: icmp source-zone:: zone1 zone2 destination-zone:: zone1 zone2 action:: accept } """ GOOD_TERM_49 = """ term good-term-46 { protocol:: udp decapsulate:: mpls-in-udp } """ GOOD_TERM_50 = """ term good-term-45 { source-address:: ANY action:: accept source-service-accounts:: acct1@blah.com } """ GOOD_TERM_V6_1 = """ term good-term-v6-1 { hop-limit:: 5 action:: accept } """ GOOD_TERM_V6_2 = """ term good-term-v6-1 { hop-limit:: 5-7 action:: accept } """ TERM_SUPER_2 = """ term term-super { address:: PROD action:: accept } """ TERM_SUPER_3 = """ term term-super { protocol-except:: tcp udp icmpv6 counter:: stuff_and_things action:: reject } """ TERM_SUB_2 = """ term term-sub { protocol:: icmp action:: accept } """ TERM_UNSORTED_ICMP_TYPE = """ term good-term-11 { protocol:: icmp icmp-type:: unreachable echo-request echo-reply action:: accept } """ TERM_UNSORTED_ICMP_CODE = """ term good-term-11 { icmp-type:: unreachable icmp-code:: 15 4 9 1 action:: accept } """ BAD_TERM_1 = """ term bad-term- 1 { protocol:: tcp action:: reject } """ BAD_TERM_2 = """ term bad-term-2 { prootocol:: tcp action:: accept } """ BAD_TERM_3 = """ term bad-term-3 { protocol:: tcp source-port:: SNMP action:: accept } """ BAD_TERM_4 = """ term bad-term-4 { source-port:: SMTP action:: accept } """ BAD_TERM_5 = """ term bad-term-5 { protocol:: tcp destination-address:: PROD_EH destination-exclude:: PROD_NETWRK action:: accept } """ BAD_TERM_6 = """ term bad-term-6 { logging:: unvalidloggingoption action:: accept } """ BAD_TERM_7 = """ term bad-term-7 { action:: discard } """ BAD_TERM_8 = """ term bad-term-8 { akshun:: accept } """ BAD_TERM_9 = """ term bad-term-9 { ether-type:: arp protocol:: udp action:: accept } """ BAD_TERM_10 = """ term bad-term-10 { verbatim:: cisco "mary had a little lamb" action:: accept } """ BAD_TERM_12 = """ term bad-term-12 { protocol:: icmp icmp-type:: echo-foo packet-too-beaucoups action:: accept } """ BAD_TERM_13 = """ term bad-term-13 { protocol:: icmp icmp-type:: unreachable icmp-code:: 99 action:: accept } """ BAD_TERM_14 = """ term bad-term-14 { protocol:: icmp icmp-type:: unreachable redirect icmp-code:: 3 action:: accept } """ BAD_TERM_15 = """ term bad-term-15 { ttl:: 300 action:: accept } """ BAD_TERM_16 = """ term bad-term-16 { destination-port:: FOO protocol:: tcp udp gre action:: accept } """ # pylint: disable=maybe-no-member class PolicyTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) @mock.patch.object(policy, '_ReadFile') def testIncludes(self, mock_file): """Ensure includes work, as well as nested included.""" mock_file.side_effect = [INCLUDED_Y_FILE, GOOD_TERM_5] # contents of our base policy (which has an included file) pol = HEADER + INCLUDE_STATEMENT + GOOD_TERM_1 p = policy.ParsePolicy(pol, self.naming) _, terms = p.filters[0] # ensure include worked and we now have 3 terms in this policy self.assertEqual(len(terms), 3) # ensure included_term_1 is included as first term self.assertEqual(terms[0].name, 'included-term-1') # ensure good-term-5 is included as second term self.assertEqual(terms[1].name, 'good-term-5') # ensure good-term-1 shows up as the second term self.assertEqual(terms[2].name, 'good-term-1') mock_file.assert_has_calls( [mock.call('includes/y.inc'), mock.call('includes/z.inc')] ) @mock.patch.object(policy, '_ReadFile') def testBadIncludes(self, mock_file): """Ensure nested includes error handling works.""" mock_file.side_effect = [BAD_INCLUDED_FILE, GOOD_TERM_5] # contents of our base policy (which has a bad included file) pol = HEADER + INCLUDE_STATEMENT + GOOD_TERM_1 self.assertRaises( policy.InvalidIncludeDirectoryError, policy.ParsePolicy, pol, self.naming, ) # Ensuring relative paths don't bypass invalid directory checks mock_file.side_effect = [BAD_INCLUDED_FILE_1, GOOD_TERM_5] pol = HEADER + BAD_INCLUDED_FILE_1 + GOOD_TERM_1 self.assertRaises( policy.InvalidIncludeDirectoryError, policy.ParsePolicy, pol, self.naming, ) @mock.patch.object(policy, '_ReadFile') def testGoodIncludesWithRelativePaths(self, mock_file): """Ensure nested includes error handling works for valid files.""" mock_file.side_effect = [GOOD_TERM_5] # base policy has a good included file, with relative paths pol = HEADER + GOOD_INCLUDED_FILE_1 + GOOD_TERM_1 p = policy.ParsePolicy(pol, self.naming) _, terms = p.filters[0] # ensure include worked and we now have 3 terms in this policy self.assertEqual(len(terms), 3) self.assertEqual(terms[0].name, 'good-included-term-1') self.assertEqual(terms[1].name, 'good-term-5') self.assertEqual(terms[2].name, 'good-term-1') def testGoodPol(self): pol = HEADER + GOOD_TERM_1 + GOOD_TERM_2 self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')] ret = policy.ParsePolicy(pol, self.naming) # we should only have one filter from that self.assertEqual(len(ret.filters), 1) header, terms = ret.filters[0] self.assertEqual(type(ret), policy.Policy) self.assertEqual(str(terms[0].protocol[0]), 'icmp') self.assertEqual(len(terms), 2) # the comment is stored as a double quoted string, complete with double # quotes. self.assertEqual(str(header.comment[0]), 'this is a test acl') self.assertEqual(str(header.comment[1]), 'this is another comment') self.assertEqual(str(header.target[0]), 'juniper') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK') def testBadPol(self): pol = HEADER + BAD_TERM_1 self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming) def testMissingHeader(self): pol = GOOD_TERM_1 + GOOD_TERM_2 self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming) def testService(self): pol = HEADER + GOOD_TERM_1 + GOOD_TERM_3 self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(len(terms), 2) self.assertEqual(str(terms[1].protocol[0]), 'tcp') self.assertEqual(terms[1].destination_port[0], (25, 25)) self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testInvalidKeyword(self): pol = HEADER + BAD_TERM_2 self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming) def testNumericProtocol(self): pol = HEADER + GOOD_TERM_4 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].protocol[0]), '1') def testHopLimitSingle(self): pol = HEADER_V6 + GOOD_TERM_V6_1 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].hop_limit[0]), '5') def testHopLimitRange(self): pol = HEADER_V6 + GOOD_TERM_V6_2 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].hop_limit[2]), '7') def testBadPortProtocols(self): pol = HEADER + BAD_TERM_3 self.naming.GetServiceByProto('SNMP', 'tcp').AndReturn([]) self.assertRaises(policy.TermPortProtocolError, policy.ParsePolicy, pol, self.naming) def testBadPortProtocols2(self): pol = HEADER + BAD_TERM_4 self.assertRaises(policy.TermPortProtocolError, policy.ParsePolicy, pol, self.naming) def testMinimumTerm(self): pol = HEADER + GOOD_TERM_5 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(len(terms), 1) self.assertEqual(str(terms[0].action[0]), 'accept') def testPortCollapsing(self): pol = HEADER + GOOD_TERM_6 self.naming.GetServiceByProto.return_value = ['3306'] self.naming.GetServiceByProto.return_value = ['1024-65535'] ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertSequenceEqual(terms[0].destination_port, [(1024, 65535)]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('MYSQL', 'tcp'), mock.call('HIGH_PORTS', 'tcp')], any_order=True) def testPortCollapsing2(self): pol = HEADER + GOOD_TERM_8 self.naming.GetServiceByProto.side_effect = [['53'], ['53']] ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertSequenceEqual(terms[0].destination_port, [(53, 53)]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')], any_order=True) def testMinimumTerm2(self): pol = HEADER + GOOD_TERM_9 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].comment[0]), 'first comment') self.assertEqual(str(terms[0].comment[1]), 'second comment') def testLogNameTerm(self): pol = HEADER_6 + GOOD_TERM_37 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].log_name), 'my special prefix') def testTermEquality(self): self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('64.233.160.0/19'), nacaddr.IPv4('66.102.0.0/20'), nacaddr.IPv4('66.249.80.0/20'), nacaddr.IPv4('72.14.192.0/18'), nacaddr.IPv4('72.14.224.0/20'), nacaddr.IPv4('216.239.32.0/19')], [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('64.233.160.0/19'), nacaddr.IPv4('66.102.0.0/20'), nacaddr.IPv4('66.249.80.0/20'), nacaddr.IPv4('72.14.192.0/18'), nacaddr.IPv4('72.14.224.0/20'), nacaddr.IPv4('216.239.32.0/19')], [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('64.233.160.0/19'), nacaddr.IPv4('66.102.0.0/20'), nacaddr.IPv4('66.249.80.0/20'), nacaddr.IPv4('72.14.192.0/18'), nacaddr.IPv4('72.14.224.0/20'), nacaddr.IPv4('216.239.32.0/19')]] self.naming.GetServiceByProto.side_effect = [ ['80'], ['3306'], ['3306'], ['80'], ['3306'], ['443']] pol_text = HEADER + GOOD_TERM_19 + GOOD_TERM_20 + GOOD_TERM_21 ret = policy.ParsePolicy(pol_text, self.naming, shade_check=False) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(len(terms), 3) self.assertEqual(terms[0], terms[1]) self.assertNotEqual(terms[0], terms[2]) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_EXTERNAL_SUPER'), mock.call('PROD_NETWRK'), mock.call('PROD_NETWRK'), mock.call('PROD_EXTERNAL_SUPER'), mock.call('PROD_NETWRK'), mock.call('PROD_EXTERNAL_SUPER')], any_order=True) self.naming.GetServiceByProto.assert_has_calls([ mock.call('HTTP', 'tcp'), mock.call('MYSQL', 'tcp'), mock.call('MYSQL', 'tcp'), mock.call('HTTP', 'tcp'), mock.call('MYSQL', 'tcp'), mock.call('HTTPS', 'tcp')], any_order=True) def testGoodDestAddrExcludes(self): pol = HEADER + GOOD_TERM_7 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.62.0.0/15')]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] self.assertEqual(terms[0].destination_address_exclude[0], nacaddr.IPv4('10.62.0.0/15')) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testGoodSrcAddrExcludes(self): pol = HEADER + GOOD_TERM_26 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.62.0.0/15')]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] self.assertEqual(terms[0].source_address_exclude[0], nacaddr.IPv4('10.62.0.0/15')) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testGoodAddrExcludes(self): pol = HEADER + GOOD_TERM_27 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.62.0.0/15')]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] self.assertEqual(terms[0].address_exclude[0], nacaddr.IPv4('10.62.0.0/15')) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testGoodAddrExcludesFlatten(self): expected = sorted([nacaddr.IPv4(u'10.0.0.0/11'), nacaddr.IPv4(u'10.32.0.0/12'), nacaddr.IPv4(u'10.48.0.0/13'), nacaddr.IPv4(u'10.56.0.0/14'), nacaddr.IPv4(u'10.60.0.0/15'), nacaddr.IPv4(u'10.64.0.0/10'), nacaddr.IPv4(u'10.130.0.0/15'), nacaddr.IPv4(u'10.132.0.0/14'), nacaddr.IPv4(u'10.136.0.0/13'), nacaddr.IPv4(u'10.144.0.0/12'), nacaddr.IPv4(u'10.160.0.0/11'), nacaddr.IPv4(u'10.192.0.0/10')]) pol = HEADER + GOOD_TERM_27 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.62.0.0/15'), nacaddr.IPv4('10.129.0.0/15', strict=False)]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] terms[0].FlattenAll() self.assertEqual(sorted(terms[0].address), expected) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testGoodAddrExcludesFlattenMultiple(self): pol = HEADER + GOOD_TERM_27 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.1.0.0/16'), nacaddr.IPv4('10.2.0.0/16'), nacaddr.IPv4('10.3.0.0/16'), nacaddr.IPv4('192.168.0.0/16')], [nacaddr.IPv4('10.2.0.0/15')]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] terms[0].FlattenAll() self.assertEqual(terms[0].address, [nacaddr.IPv4('10.1.0.0/16'), nacaddr.IPv4('192.168.0.0/16')]) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testGoodAddrExcludesFlattenAll(self): pol = HEADER + GOOD_TERM_27 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.1.0.0/16'), nacaddr.IPv4('10.2.0.0/16'), nacaddr.IPv4('10.3.0.0/16')], [nacaddr.IPv4('10.0.0.0/8')]] ret = policy.ParsePolicy(pol, self.naming) _, terms = ret.filters[0] terms[0].FlattenAll() self.assertEqual(terms[0].address, []) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True) def testLogging(self): pol = HEADER + GOOD_TERM_10 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(str(terms[0].logging[0]), 'true') def testBadLogging(self): pol = HEADER + BAD_TERM_6 self.assertRaises(policy.InvalidTermLoggingError, policy.ParsePolicy, pol, self.naming) def testBadAction(self): pol = HEADER + BAD_TERM_7 self.assertRaises(policy.InvalidTermActionError, policy.ParsePolicy, pol, self.naming) def testMultifilter(self): pol = HEADER + GOOD_TERM_1 + HEADER_2 + GOOD_TERM_1 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.headers), 2) def testBadMultifilter(self): pol = HEADER + HEADER_2 + GOOD_TERM_1 self.assertRaises(policy.NoTermsError, policy.ParsePolicy, pol, self.naming) def testICMPTypes(self): pol = HEADER + GOOD_TERM_11 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].icmp_type[0], 'echo-reply') def testBadICMPTypes(self): pol = HEADER + BAD_TERM_12 self.assertRaises(policy.TermInvalidIcmpType, policy.ParsePolicy, pol, self.naming) def testICMPTypesSorting(self): pol = HEADER + TERM_UNSORTED_ICMP_TYPE ret = policy.ParsePolicy(pol, self.naming) icmp_types = ['echo-reply', 'echo-request', 'unreachable'] expected = 'icmp_type: %s' % icmp_types self.assertIn(expected, str(ret)) def testICMPCodesSorting(self): pol = HEADER + TERM_UNSORTED_ICMP_CODE ret = policy.ParsePolicy(pol, self.naming) self.assertIn('icmp_code: [1, 4, 9, 15]', str(ret)) def testReservedWordTermName(self): pol = HEADER + GOOD_TERM_12 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].qos, 'af4') self.assertEqual(terms[0].name, 'qos-good-term-12') def testMultiPortLines(self): pol = HEADER + GOOD_TERM_13 self.naming.GetServiceByProto.side_effect = [['22', '160-162'], ['161']] ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertSequenceEqual(terms[0].source_port, [(22, 22), (160, 162)]) self.naming.GetServiceByProto.assert_has_calls([ mock.call('GOOGLE_PUBLIC', 'udp'), mock.call('SNMP', 'udp')], any_order=True) def testErrorLineNumber(self): pol = HEADER + GOOD_TERM_13 + BAD_TERM_8 self.assertRaisesRegex(policy.ParseError, r'ERROR on "akshun" \(type STRING, line 1', policy.ParsePolicy, pol, self.naming) def testPrefixList(self): spol = HEADER + GOOD_TERM_14 dpol = HEADER + GOOD_TERM_15 # check on the source prefix list ret = policy.ParsePolicy(spol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].source_prefix, ['foo_prefix_list']) # check on the destination prefix list ret = policy.ParsePolicy(dpol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].destination_prefix, ['bar_prefix_list', 'baz_prefix_list']) def testPrefixListExcept(self): spol = HEADER + GOOD_TERM_38 dpol = HEADER + GOOD_TERM_39 # check on the source prefix except list ret = policy.ParsePolicy(spol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].source_prefix_except, ['foo_prefix_list']) # check on the destination prefix except list ret = policy.ParsePolicy(dpol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].destination_prefix_except, ['bar_prefix_list', 'baz_prefix_list']) def testPrefixListMixed(self): spol = HEADER + GOOD_TERM_40 dpol = HEADER + GOOD_TERM_41 # check on the source prefix list with mixed values ret = policy.ParsePolicy(spol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].source_prefix, ['foo_prefix_list']) self.assertEqual(terms[0].source_prefix_except, ['foo_prefix_list_except']) # check on the destination prefix with mixed values ret = policy.ParsePolicy(dpol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].destination_prefix, ['bar_prefix_list']) self.assertEqual(terms[0].destination_prefix_except, ['bar_prefix_list_except']) def testEtherTypes(self): pol = HEADER + GOOD_TERM_16 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].ether_type[0], 'arp') self.assertEqual(terms[0].ether_type[1], 'ipv4') self.assertEqual(terms[0].ether_type[2], 'vlan') def testTrafficTypes(self): pol = HEADER + GOOD_TERM_17 ret = policy.ParsePolicy(pol, self.naming) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(terms[0].traffic_type[0], 'broadcast') self.assertEqual(terms[0].traffic_type[1], 'unknown-unicast') self.assertEqual(terms[0].traffic_type[2], 'multicast') def testBadProtocolEtherTypes(self): pol = HEADER + BAD_TERM_9 self.assertRaises(policy.TermProtocolEtherTypeError, policy.ParsePolicy, pol, self.naming) def testVerbatimTerm(self): pol = policy.ParsePolicy(HEADER + GOOD_TERM_18, self.naming) _, terms = pol.filters[0] self.assertEqual(terms[0].verbatim[0][0], 'iptables') self.assertEqual(terms[0].verbatim[0][1], 'mary had a little lamb') self.assertEqual(terms[0].verbatim[1][0], 'juniper') self.assertEqual(terms[0].verbatim[1][1], 'mary had another lamb') def testVerbatimMixed(self): pol = HEADER + BAD_TERM_10 self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming) def testIntegerFilterName(self): pol_text = HEADER_3 + GOOD_TERM_0 pol = policy.ParsePolicy(pol_text, self.naming) self.assertEqual(pol.headers[0].target[0].options[0], '50') def testPrecedence(self): pol_text = HEADER + GOOD_TERM_22 pol = policy.ParsePolicy(pol_text, self.naming) self.assertEqual(len(pol.filters), 1) _, terms = pol.filters[0] self.assertEqual(terms[0].precedence, [1]) def testLossPriority(self): self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(HEADER + GOOD_TERM_23, self.naming) self.assertEqual(len(pol.filters), 1) _, terms = pol.filters[0] self.assertEqual(terms[0].loss_priority, 'low') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testRoutingInstance(self): self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(HEADER + GOOD_TERM_24, self.naming) self.assertEqual(len(pol.filters), 1) _, terms = pol.filters[0] self.assertEqual(terms[0].routing_instance, 'foobar-router') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testSourceInterface(self): self.naming.GetServiceByProto.return_value = ['22'] pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_25, self.naming) self.assertEqual(len(pol.filters), 1) header, terms = pol.filters[0] self.assertEqual(str(header.target[0]), 'iptables') self.assertEqual(terms[0].source_interface, 'foo0') self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp') def testShadingDetection(self): pol2 = HEADER + GOOD_TERM_2 + GOOD_TERM_3 self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.0.0.0/8')]] self.naming.GetServiceByProto.return_value = ['25'] # same protocol, same saddr, shaded term defines a port. self.assertRaises(policy.ShadingError, policy.ParsePolicy, pol2, self.naming, shade_check=True) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('PROD_NETWRK')]) self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testVpnConfigWithoutPairPolicy(self): pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming) self.assertEqual(len(pol.filters), 1) self.assertEqual('special-30', pol.filters[0][1][0].vpn[0]) self.assertEqual('', pol.filters[0][1][0].vpn[1]) def testVpnConfigWithPairPolicy(self): pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_31, self.naming) self.assertEqual(len(pol.filters), 1) self.assertEqual('special-31', pol.filters[0][1][0].vpn[0]) self.assertEqual('policy-11', pol.filters[0][1][0].vpn[1]) def testForwardingClassPolicy(self): pol = policy.ParsePolicy(HEADER + GOOD_TERM_32, self.naming) self.assertEqual(['fritzy'], pol.filters[0][1][0].forwarding_class) def testMultipleForwardingClassPolicy(self): pol = policy.ParsePolicy(HEADER + GOOD_TERM_36, self.naming) self.assertEqual(['flashy', 'fritzy'], pol.filters[0][1][0].forwarding_class) def testForwardingClassEqual(self): pol_text = HEADER + GOOD_TERM_32 + GOOD_TERM_33 ret = policy.ParsePolicy(pol_text, self.naming, shade_check=False) self.assertEqual(len(ret.filters), 1) _, terms = ret.filters[0] self.assertEqual(len(terms), 2) self.assertNotEqual(terms[0], terms[1]) def testTagSupportAndNetworkHeaderParsing(self): pol = policy.ParsePolicy(HEADER_5 + GOOD_TERM_34, self.naming) self.assertEqual(len(pol.filters), 1) header, terms = pol.filters[0] self.assertEqual(str(header.target[0]), 'gce') self.assertEqual(header.FilterOptions('gce'), ['global/networks/default']) self.assertEqual(terms[0].source_tag, ['src-tag']) self.assertEqual(terms[0].destination_tag, ['dest-tag']) def testEq(self): """Sanity test to verify __eq__ works on Policy objects.""" policy1 = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming) policy2 = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming) policy3 = policy.ParsePolicy(HEADER_5 + GOOD_TERM_34, self.naming) self.assertEqual(policy1, policy2) self.assertNotEqual(policy1, policy3) self.assertNotEqual(policy2, policy3) def testNextIP(self): pol = HEADER_2 + GOOD_TERM_35 expected = nacaddr.IPv4('10.1.1.1/32') self.naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.1.1.1/32')]] result = policy.ParsePolicy(pol, self.naming) self.assertEqual(result.filters[0][1][0].next_ip[0], expected) self.naming.GetNetAddr.assert_has_calls([ mock.call('PROD_NETWRK'), mock.call('NEXT_IP')]) def testStr(self): """Sanity test to verify __eq__ works on Policy objects.""" pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming) logging.info('Ensuring string formatting doesn\'t throw errors: %s', pol) def testTermAddressByteLength(self): """Tests the AddressByteLength function.""" pol = HEADER + GOOD_TERM_2 self.naming.GetNetAddr.return_value = [ nacaddr.IPv4('10.0.0.1/32'), nacaddr.IPv4('10.0.0.2/32'), nacaddr.IPv6('2001:4860:4860::8844/128'), nacaddr.IPv6('2001:4860:4860::8888/128')] ret = policy.ParsePolicy(pol, self.naming) term = ret.filters[0][1][0] self.assertEqual(2, term.AddressesByteLength([4])) self.assertEqual(8, term.AddressesByteLength([6])) self.assertEqual(10, term.AddressesByteLength()) # pylint: enable=maybe-no-member def testICMPCodes(self): pol = HEADER + GOOD_TERM_42 result = policy.ParsePolicy(pol, self.naming) self.assertIn('icmp_code: [3, 4]', str(result)) def testBadICMPCodes(self): pol = HEADER + BAD_TERM_13 pol2 = HEADER + BAD_TERM_14 self.assertRaises(policy.ICMPCodeError, policy.ParsePolicy, pol, self.naming) self.assertRaises(policy.ICMPCodeError, policy.ParsePolicy, pol2, self.naming) def testOptimizedConsistency(self): pol = HEADER + GOOD_TERM_2 + GOOD_TERM_3 unoptimized_addr = [nacaddr.IPv4('10.16.128.6/32'), nacaddr.IPv4('10.16.128.7/32')] optimized_addr = nacaddr.CollapseAddrList(unoptimized_addr) self.naming.GetNetAddr.return_value = unoptimized_addr self.naming.GetServiceByProto.return_value = ['25'] ret_unoptimized = policy.ParsePolicy(pol, self.naming, optimize=False) self.assertFalse(policy._OPTIMIZE) ret_optimized = policy.ParsePolicy(pol, self.naming) self.assertTrue(policy._OPTIMIZE) for _, terms in ret_unoptimized.filters: for term in terms: self.assertEqual(unoptimized_addr, term.source_address) for _, terms in ret_optimized.filters: for term in terms: self.assertEqual(optimized_addr, term.source_address) def testShadeCheckConsistency(self): pol = HEADER + TERM_SUPER_3 + TERM_SUB_2 self.assertRaises(policy.ShadingError, policy.ParsePolicy, pol, self.naming, shade_check=True) self.assertTrue(policy._SHADE_CHECK) _ = policy.ParsePolicy(pol, self.naming) self.assertFalse(policy._SHADE_CHECK) def testEncapsulate(self): pol = HEADER + GOOD_TERM_46 result = policy.ParsePolicy(pol, self.naming) self.assertIn('encapsulate: stuff_and_things', str(result)) def testDecapsulate(self): pol = HEADER + GOOD_TERM_49 result = policy.ParsePolicy(pol, self.naming) self.assertIn('decapsulate: mpls-in-udp', str(result)) def testPortMirror(self): pol = HEADER + GOOD_TERM_47 result = policy.ParsePolicy(pol, self.naming) self.assertIn('port_mirror: true', str(result)) def testSrxGLobalZone(self): pol = HEADER + GOOD_TERM_48 result = policy.ParsePolicy(pol, self.naming) zones = ['zone1', 'zone2'] expected_source = 'source_zone: %s' % zones expected_destination = 'destination_zone: %s' % zones self.assertIn(expected_source, str(result)) self.assertIn(expected_destination, str(result)) def testTTL(self): pol = HEADER + GOOD_TERM_43 result = policy.ParsePolicy(pol, self.naming) self.assertIn('ttl: 10', str(result)) def testInvalidTTL(self): pol = HEADER + BAD_TERM_15 self.assertRaises(policy.InvalidTermTTLValue, policy.ParsePolicy, pol, self.naming) def testNeedAddressBook(self): pol1 = policy.ParsePolicy(HEADER + GOOD_TERM_1, self.naming) pol2 = policy.ParsePolicy(HEADER_SRX + GOOD_TERM_1, self.naming) pol3 = policy.ParsePolicy(HEADER_OBJ_GRP + GOOD_TERM_1, self.naming) pol4 = policy.ParsePolicy(HEADER_ADDRBOOK_MIXED + GOOD_TERM_1, self.naming) self.assertFalse(pol1._NeedsAddressBook()) self.assertTrue(pol2._NeedsAddressBook()) self.assertTrue(pol3._NeedsAddressBook()) self.assertTrue(pol4._NeedsAddressBook()) def testAddressCleanupCorrect(self): unoptimized_addr = [nacaddr.IPv4('10.16.128.6/32', token='FOO'), nacaddr.IPv4('10.16.128.7/32', token='BAR')] self.naming.GetNetAddr.return_value = unoptimized_addr pol = policy.ParsePolicy(HEADER + GOOD_TERM_2, self.naming) term = pol.filters[0][1][0] self.assertEqual(nacaddr.CollapseAddrList(unoptimized_addr), term.source_address) pol = policy.ParsePolicy(HEADER_SRX + GOOD_TERM_2, self.naming) term = pol.filters[0][1][0] self.assertEqual(nacaddr.CollapseAddrListPreserveTokens(unoptimized_addr), term.source_address) def testLogLimit(self): pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_44, self.naming) term = pol.filters[0][1][0] self.assertEqual((u'999', u'day'), term.log_limit) def testGREandTCPUDPError(self): pol = HEADER + BAD_TERM_16 self.naming.GetServiceByProto.return_value = ['25'] self.assertRaises(policy.MixedPortandNonPortProtos, policy.ParsePolicy, pol, self.naming) def testSourceServiceAccount(self): pol = HEADER_HF_1 + GOOD_TERM_50 result = policy.ParsePolicy(pol, self.naming) term = result.filters[0][1][0] self.assertEqual( ['acct1@blah.com'], term.source_service_accounts) def testTargetServiceAccount(self): pol = HEADER_HF_1 + GOOD_TERM_45 result = policy.ParsePolicy(pol, self.naming) term = result.filters[0][1][0] self.assertEqual( ['acct1@blah.com'], term.target_service_accounts) # Contains Tests def testVerbatimContains(self): term_one = policy.Term(policy.VarType(23, ('iptables', 'foo'))) term_two = policy.Term(policy.VarType(23, ('iptables', 'bar'))) term_three = policy.Term(policy.VarType(23, ('juniper', 'foo'))) self.assertIn(term_one, term_one) self.assertNotIn(term_two, term_one) self.assertNotIn(term_three, term_one) @mock.patch.object(policy, 'DEFINITIONS') def testIpAndPortContains(self, mock_naming): mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.1.1.1/32')]] term_one = policy.Term([policy.VarType(3, 'PROD'), policy.VarType(7, (22, 22)), policy.VarType(7, (80, 80)), policy.VarType(10, 'tcp')]) term_one.AddObject(policy.VarType(2, 'accept')) term_two = policy.Term([policy.VarType(3, 'SMALLER_PROD'), policy.VarType(7, (22, 22)), policy.VarType(10, 'tcp')]) term_two.AddObject(policy.VarType(2, 'accept')) self.assertIn(term_two, term_one) self.assertNotIn(term_one, term_two) @mock.patch.object(policy, 'DEFINITIONS') def testEmptyIpContains(self, mock_naming): # testTermContains2 differs from testTermContains in that TERM_SUPER_2 # only defines a source addres. it's meant to catch the case where # the containing term has less detail (and is hence, less restrictive) # than the contained term mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.1.1.1/32')]] term_one = policy.Term([policy.VarType(5, 'PROD')]) term_one.AddObject(policy.VarType(2, 'accept')) term_two = policy.Term([policy.VarType(3, 'SMALLER_PROD'), policy.VarType(7, (22, 22))]) term_two.AddObject(policy.VarType(2, 'accept')) self.assertIn(term_two, term_one) self.assertNotIn(term_one, term_two) @mock.patch.object(policy, 'DEFINITIONS') def testIpExcludeContains(self, mock_naming): # This 'contains' test kicks the tires on source-address and # source-address-exclude. mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.62.0.0/15')]] term_one = policy.Term([policy.VarType(3, 'FOO')]) term_two = policy.Term([policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]) self.assertIn(term_two, term_one) self.assertNotIn(term_one, term_two) @mock.patch.object(policy, 'DEFINITIONS') def testIpDualExcludeContains(self, mock_naming): # One term has (10.0.0.0/8, except 10.10.0.0/24), it should contain a term # that has (10.0.0.0/8 except 10.0.0.0/9. mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.10.0.0/24')], [nacaddr.IPv4('10.0.0.0/8')], [nacaddr.IPv4('10.0.0.0/9')]] term_one = policy.Term([policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]) term_two = policy.Term([policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]) self.assertIn(term_two, term_one) self.assertNotIn(term_one, term_two) def testOptionsContains(self): # Tests 'contains' testing of the options field. A term without set options # contains one which has them set. tcp_est_term = policy.Term([policy.VarType(9, 'tcp-established')]) term = policy.Term([]) tcp_udp_est_term = policy.Term([policy.VarType(9, 'tcp-established'), policy.VarType(9, 'established')]) self.assertNotIn(term, tcp_est_term) self.assertNotIn(tcp_est_term, term) self.assertIn(tcp_est_term, tcp_udp_est_term) self.assertNotIn(tcp_udp_est_term, tcp_est_term) def testPrecedenceContains(self): # Tests 'contains' testing of the precedence field. A term without set # precedence contains one which has them set. p_term = policy.Term([policy.VarType(26, 1)]) no_p_term = policy.Term([]) self.assertIn(p_term, p_term) self.assertIn(no_p_term, no_p_term) self.assertNotIn(no_p_term, p_term) self.assertNotIn(p_term, no_p_term) def testProtocolExceptContains(self): # Test the protocol-except keyword. pexcept_term = policy.Term([policy.VarType(8, 'tcp')]) pexpect_term_udp = policy.Term([policy.VarType(8, 'udp')]) p_term = policy.Term([policy.VarType(10, 'icmp')]) p_term_tcp = policy.Term([policy.VarType(10, 'tcp')]) self.assertIn(p_term, pexcept_term) self.assertIn(pexcept_term, pexcept_term) self.assertNotIn(p_term_tcp, pexcept_term) self.assertNotIn(pexpect_term_udp, pexcept_term) def testProtocolTermNotInAnotherTermContains(self): term_one = policy.Term([policy.VarType(10, 'tcp')]) term_two = policy.Term([policy.VarType(10, 'udp')]) self.assertNotIn(term_one, term_two) def testTargetServiceAccountContains(self): two_target_sa = ['acct1@blah.com', 'acct2@blah.com'] one_target_sa = ['acct3@blah.com'] term = policy.Term([policy.VarType(60, two_target_sa)]) self.assertIn(two_target_sa, term.target_service_accounts) term.AddObject(policy.VarType(60, one_target_sa)) self.assertIn(one_target_sa, term.target_service_accounts) def testProtoExceptNotInEmptyTerm(self): term_one = policy.Term([policy.VarType(8, 'tcp')]) term_two = policy.Term([]) self.assertNotIn(term_two, term_one) def testProtocolNotInProtoExcept(self): term_one = policy.Term([policy.VarType(8, 'tcp')]) term_two = policy.Term([policy.VarType(10, 'udp')]) self.assertNotIn(term_one, term_two) def testProtocolNotInEmptyTerm(self): term_one = policy.Term([policy.VarType(10, 'tcp')]) term_two = policy.Term([]) self.assertNotIn(term_two, term_one) @mock.patch.object(policy, 'DEFINITIONS') def testAddrNotInAddr(self, mock_naming): mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('192.168.1.1/32')], [nacaddr.IPv4('10.1.1.0/24')], [nacaddr.IPv4('10.1.1.0/24')], [nacaddr.IPv4('10.1.1.0/24')]] term = policy.Term([policy.VarType(5, 'FOO')]) addr_term = policy.Term([policy.VarType(5, 'FOO')]) saddr_term = policy.Term([policy.VarType(3, 'FOO')]) daddr_term = policy.Term([policy.VarType(4, 'FOO')]) self.assertNotIn(addr_term, term) self.assertNotIn(saddr_term, term) self.assertNotIn(daddr_term, term) @mock.patch.object(policy, 'DEFINITIONS') def testDestAddrNotInDestAddr(self, mock_naming): mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('192.168.1.1/32')], [nacaddr.IPv4('10.1.1.0/24')]] term_one = policy.Term([policy.VarType(4, 'FOO')]) term_two = policy.Term([policy.VarType(4, 'FOO')]) self.assertNotIn(term_one, term_two) def testSourcePortNotInSourcePort(self): term_one = policy.Term([policy.VarType(6, (22, 22))]) term_two = policy.Term([policy.VarType(6, (23, 23))]) self.assertNotIn(term_one, term_two) def testDestinationPortNotInDestinationPort(self): term_one = policy.Term([policy.VarType(7, (22, 22))]) term_two = policy.Term([policy.VarType(7, (23, 23))]) self.assertNotIn(term_one, term_two) def testSourcePrefixContains(self): term_one = policy.Term([policy.VarType(19, 'foo')]) self.assertIn(term_one, term_one) def testSourcePrefixNotInSourcePrefix(self): term_one = policy.Term([policy.VarType(19, 'foo')]) term_two = policy.Term([policy.VarType(19, 'bar')]) self.assertNotIn(term_one, term_two) def testDestinationPrefixContains(self): term_one = policy.Term([policy.VarType(20, 'foo')]) self.assertIn(term_one, term_one) def testDestinationPrefixNotInDestinationPrefix(self): term_one = policy.Term([policy.VarType(20, 'foo')]) term_two = policy.Term([policy.VarType(20, 'bar')]) self.assertNotIn(term_one, term_two) def testSourcePrefixExceptContains(self): term_one = policy.Term([policy.VarType(50, 'foo')]) self.assertIn(term_one, term_one) def testSourcePrefixExceptNotInSourcePrefixExcept(self): term_one = policy.Term([policy.VarType(50, 'foo')]) term_two = policy.Term([policy.VarType(50, 'bar')]) self.assertNotIn(term_one, term_two) def testDestinationPrefixExceptContains(self): term_one = policy.Term([policy.VarType(51, 'foo')]) self.assertIn(term_one, term_one) def testDestinationPrefixExceptNotInDestinationPrefixExcept(self): term_one = policy.Term([policy.VarType(51, 'foo')]) term_two = policy.Term([policy.VarType(51, 'bar')]) self.assertNotIn(term_one, term_two) def testSourceTagContains(self): term_one = policy.Term([policy.VarType(44, 'foo')]) self.assertIn(term_one, term_one) def testSourceTagNotInSourceTag(self): term_one = policy.Term([policy.VarType(44, 'foo')]) term_two = policy.Term([policy.VarType(44, 'bar')]) self.assertNotIn(term_one, term_two) def testForwardingClassContains(self): term_one = policy.Term([policy.VarType(43, 'foo')]) term_two = policy.Term( [policy.VarType(43, 'bar'), policy.VarType(43, 'foo')]) self.assertIn(term_one, term_one) self.assertIn(term_one, term_two) def testForwardingClassNotIn(self): term_one = policy.Term([policy.VarType(43, 'foo')]) term_two = policy.Term([policy.VarType(43, 'bar')]) term_three = policy.Term([]) self.assertNotIn(term_one, term_two) self.assertNotIn(term_three, term_one) def testForwardingClassExceptContains(self): term_one = policy.Term([policy.VarType(52, 'foo')]) self.assertIn(term_one, term_one) def testForwardingClassExceptNotIn(self): term_one = policy.Term([policy.VarType(52, 'foo')]) term_two = policy.Term([policy.VarType(52, 'bar')]) term_three = policy.Term([]) self.assertNotIn(term_one, term_two) self.assertNotIn(term_three, term_one) @mock.patch.object(policy, 'DEFINITIONS') def testNextIPContained(self, mock_naming): mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('192.168.1.1/32')]] term_one = policy.Term([policy.VarType(46, 'FOO')]) self.assertIn(term_one, term_one) @mock.patch.object(policy, 'DEFINITIONS') def testNextIPNotIn(self, mock_naming): mock_naming.GetNetAddr.side_effect = [ [nacaddr.IPv4('192.168.1.1/32')]] term_one = policy.Term([policy.VarType(46, 'FOO')]) term_two = policy.Term([]) self.assertNotIn(term_two, term_one) def testPortContains(self): # Test 'contains' against port field and that it matches # source/destination/port fields. port_term = policy.Term([policy.VarType(32, (25, 25))]) sport_term = policy.Term([policy.VarType(6, (25, 25))]) dport_term = policy.Term([policy.VarType(7, (25, 25))]) self.assertIn(sport_term, port_term) self.assertIn(dport_term, port_term) self.assertIn(port_term, port_term) alt_port_term = policy.Term([policy.VarType(32, (25, 30))]) sport_term = policy.Term([policy.VarType(6, (25, 30))]) dport_term = policy.Term([policy.VarType(7, (25, 30))]) self.assertNotIn(alt_port_term, port_term) self.assertNotIn(sport_term, port_term) self.assertNotIn(dport_term, port_term) def testFragmentOffset(self): fo_term = policy.Term([]) fo_term.AddObject(policy.VarType(17, '80')) fo_range_term = policy.Term([]) fo_range_term.AddObject(policy.VarType(17, '60-90')) fo_smaller_range_term = policy.Term([]) fo_smaller_range_term.AddObject(policy.VarType(17, '65-82')) term = policy.Term([]) self.assertIn(fo_term, fo_term) self.assertIn(fo_term, fo_range_term) self.assertNotIn(fo_range_term, fo_term) self.assertIn(fo_smaller_range_term, fo_range_term) self.assertNotIn(fo_range_term, fo_smaller_range_term) self.assertNotIn(term, fo_term) def testTermTargetResources(self): target_resources = [('p1', 'v1'), ('p2', 'v2')] target_resource_2 = [('p3', 'v3')] term_one = policy.Term( [policy.VarType(policy.VarType.TARGET_RESOURCES, target_resources)]) term_one.AddObject(policy.VarType(59, target_resource_2)) self.assertIn(target_resources, term_one.target_resources) self.assertIn(target_resource_2, term_one.target_resources) def testParsePolicySingleTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: (proj1,vpc1) }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) self.assertIn(('proj1', 'vpc1'), terms[0].target_resources) def testParsePolicyMultipleTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: (proj1,vpc1) target-resources:: (proj2,vpc2) target-resources:: (proj3,vpc3) target-resources:: (proj4,vpc4) }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) expected_target_resources = [('proj1', 'vpc1'), ('proj2', 'vpc2'), ('proj3', 'vpc3'), ('proj4', 'vpc4')] self.assertListEqual(expected_target_resources, terms[0].target_resources) def testParsePolicyMultipleCommaSepTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: (proj1,vpc1),(proj2,vpc2),(proj3,vpc3),(proj4,vpc4) }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) expected_target_resources = [('proj1', 'vpc1'), ('proj2', 'vpc2'), ('proj3', 'vpc3'), ('proj4', 'vpc4')] self.assertListEqual(expected_target_resources, terms[0].target_resources) def testParsePolicyMultipleSpaceSepTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: (proj1,vpc1) (proj2,vpc2) (proj3,vpc3) (proj4,vpc4) }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) expected_target_resources = [('proj1', 'vpc1'), ('proj2', 'vpc2'), ('proj3', 'vpc3'), ('proj4', 'vpc4')] self.assertListEqual(expected_target_resources, terms[0].target_resources) def testParsePolicyMultipleArrayCommaTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: [(proj1,vpc1),(proj2,vpc2),(proj3,vpc3),(proj4,vpc4)] }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) expected_target_resources = [('proj1', 'vpc1'), ('proj2', 'vpc2'), ('proj3', 'vpc3'), ('proj4', 'vpc4')] self.assertListEqual(expected_target_resources, terms[0].target_resources) def testParsePolicyMultipleArraySpaceTargetResources(self): good_term_target_resources = """ term target-resource-term { action:: deny target-resources:: [(proj1,vpc1) (proj2,vpc2) (proj3,vpc3) (proj4,vpc4)] }""" pol = HEADER_HF_1 + good_term_target_resources p = policy.ParsePolicy(pol, self.naming) self.assertIsInstance(p, policy.Policy) _, terms = p.filters[0] self.assertIn('deny', terms[0].action) expected_target_resources = [('proj1', 'vpc1'), ('proj2', 'vpc2'), ('proj3', 'vpc3'), ('proj4', 'vpc4')] self.assertListEqual(expected_target_resources, terms[0].target_resources) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/sonic_test.py000066400000000000000000000344641437377527500176170ustar00rootroot00000000000000# Copyright 2022 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Sonic rendering module.""" import json from unittest import mock from absl.testing import absltest from absl.testing import parameterized from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy from capirca.lib import sonic GOOD_HEADER = """ header { comment:: "The general policy comment." target:: sonic MyPolicyName inet } """ class SonicTest(parameterized.TestCase): def setUp(self): super().setUp() self.addCleanup(mock.patch.stopall) self.naming = naming.Naming("./def") self.mock_naming_get_net_addr = mock.patch.object( self.naming, "GetNetAddr", autospec=True).start() self.mock_naming_get_net_addr.return_value = [ nacaddr.IP("10.2.3.4/32"), nacaddr.IP("2001:4860:8000::5/128"), ] # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. self.exp_info = 2 def testSingleSrcIPv4(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) self.mock_naming_get_net_addr.assert_called_once_with("CORP_EXTERNAL") def testSingleSrcSingleDstIPv4(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL destination-address:: CORP_EXTERNAL action:: accept } """ expected = json.loads("""{ "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32", "DST_IP": "10.2.3.4/32" } } }""") acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testSingleSrcSingleDstIPv6(self): header = """ header { comment:: "The general policy comment." target:: sonic MyPolicyName inet6 } """ pol = """ term good-term-1 { source-address:: CORP_EXTERNAL destination-address:: CORP_EXTERNAL action:: accept } """ expected = json.loads("""{ "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IPV6": "2001:4860:8000::5/128", "DST_IPV6": "2001:4860:8000::5/128" } } }""") acl = sonic.Sonic( policy.ParsePolicy(header + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testMultiSrcIPv4(self): self.mock_naming_get_net_addr.return_value = [ nacaddr.IP("10.2.3.4/32"), nacaddr.IP("4.4.4.4/32"), ] pol = """ term good-term-1 { source-address:: CORP_EXTERNAL action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "4.4.4.4/32" }, "MyPolicyName|RULE_20": { "PRIORITY": "65516", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testMultiSrcMultiDstIPv4(self): self.mock_naming_get_net_addr.return_value = [ nacaddr.IP("10.2.3.4/32"), nacaddr.IP("4.4.4.4/32"), ] expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "DST_IP": "4.4.4.4/32", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "4.4.4.4/32" }, "MyPolicyName|RULE_20": { "DST_IP": "10.2.3.4/32", "PACKET_ACTION": "FORWARD", "PRIORITY": "65516", "SRC_IP": "4.4.4.4/32" }, "MyPolicyName|RULE_30": { "DST_IP": "4.4.4.4/32", "PACKET_ACTION": "FORWARD", "PRIORITY": "65506", "SRC_IP": "10.2.3.4/32" }, "MyPolicyName|RULE_40": { "DST_IP": "10.2.3.4/32", "PACKET_ACTION": "FORWARD", "PRIORITY": "65496", "SRC_IP": "10.2.3.4/32" } } } """) pol = """ term good-term-1 { source-address:: CORP_EXTERNAL destination-address:: CORP_EXTERNAL action:: accept } """ acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testDrop(self): pol = """ term good-term-1 { action:: deny } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "DROP" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testMultiTerm(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL action:: accept } term good-term-2 { destination-address:: CORP_EXTERNAL action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" }, "MyPolicyName|RULE_20": { "PRIORITY": "65516", "PACKET_ACTION": "FORWARD", "DST_IP": "10.2.3.4/32" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testProtocols(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: tcp udp action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "6", "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" }, "MyPolicyName|RULE_20": { "IP_PROTOCOL": "17", "PRIORITY": "65516", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testICMPv4(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: icmp icmp-type:: echo-request echo-reply action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "ICMP_TYPE": "0", "IP_PROTOCOL": "1", "PRIORITY": "65526", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" }, "MyPolicyName|RULE_20": { "ICMP_TYPE": "8", "IP_PROTOCOL": "1", "PRIORITY": "65516", "PACKET_ACTION": "FORWARD", "SRC_IP": "10.2.3.4/32" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testICMPv6(self): header = """ header { comment:: "The general policy comment." target:: sonic MyPolicyName inet6 } """ pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: icmpv6 icmp-type:: echo-request echo-reply action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "ICMPV6_TYPE": "128", "IP_PROTOCOL": "58", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IPV6": "2001:4860:8000::5/128" }, "MyPolicyName|RULE_20": { "ICMPV6_TYPE": "129", "IP_PROTOCOL": "58", "PACKET_ACTION": "FORWARD", "PRIORITY": "65516", "SRC_IPV6": "2001:4860:8000::5/128" } } }""") acl = sonic.Sonic( policy.ParsePolicy(header + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testICMPv4DoesNotRenderIPv6(self): mixed_af_header = """ header { comment:: "The general policy comment." target:: sonic MyPolicyName inet inet6 } """ self.mock_naming_get_net_addr.return_value = [ nacaddr.IP("2001:a:b:c::/128"), ] pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: icmp icmp-type:: echo-request action:: accept } """ acl = sonic.Sonic( policy.ParsePolicy(mixed_af_header + pol, self.naming), self.exp_info) self.assertEqual({"ACL_RULE": {}}, json.loads(str(acl))) def testICMPv6DoesNotRenderIPv4(self): mixed_af_header = """ header { comment:: "The general policy comment." target:: sonic MyPolicyName inet inet6 } """ self.mock_naming_get_net_addr.return_value = [ nacaddr.IP("1.2.3.4/32"), ] pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: icmpv6 icmp-type:: echo-request action:: accept } """ acl = sonic.Sonic( policy.ParsePolicy(mixed_af_header + pol, self.naming), self.exp_info) self.assertEqual({"ACL_RULE": {}}, json.loads(str(acl))) def testSrcPortSingle(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: tcp source-port:: SSH HTTPS action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "6", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "10.2.3.4/32", "L4_SRC_PORT": "22" }, "MyPolicyName|RULE_20": { "IP_PROTOCOL": "6", "PACKET_ACTION": "FORWARD", "PRIORITY": "65516", "SRC_IP": "10.2.3.4/32", "L4_SRC_PORT": "443" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testSrcPortRange(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: udp source-port:: TRACEROUTE action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "17", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "10.2.3.4/32", "L4_SRC_PORT_RANGE": "33434-33534" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testDstPortSingle(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: tcp destination-port:: SSH HTTPS action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "6", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "10.2.3.4/32", "L4_DST_PORT": "22" }, "MyPolicyName|RULE_20": { "IP_PROTOCOL": "6", "PACKET_ACTION": "FORWARD", "PRIORITY": "65516", "SRC_IP": "10.2.3.4/32", "L4_DST_PORT": "443" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testDstPortRange(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: udp destination-port:: TRACEROUTE action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "17", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "10.2.3.4/32", "L4_DST_PORT_RANGE": "33434-33534" } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testTCPEstablished(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: tcp option:: tcp-established action:: accept } """ expected = json.loads(""" { "ACL_RULE": { "MyPolicyName|RULE_10": { "IP_PROTOCOL": "6", "PACKET_ACTION": "FORWARD", "PRIORITY": "65526", "SRC_IP": "10.2.3.4/32", "TCP_FLAGS": [ "0x10/0x10", "0x4/0x4" ] } } } """) acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual(expected, json.loads(str(acl))) def testTermWithWrongPlatform(self): pol = """ term good-term-1 { source-address:: CORP_EXTERNAL protocol:: tcp source-port:: SSH action:: accept platform:: FAKEPLATFORM } """ acl = sonic.Sonic( policy.ParsePolicy(GOOD_HEADER + pol, self.naming), self.exp_info) self.assertEqual({"ACL_RULE": {}}, json.loads(str(acl))) if __name__ == "__main__": absltest.main() capirca-2.0.9/tests/lib/speedway_test.py000066400000000000000000000124621437377527500203170ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Speedway rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import naming from capirca.lib import policy from capirca.lib import speedway GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: speedway INPUT ACCEPT } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmp action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: icmp policer:: batman action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_interface', 'destination_port', 'destination_prefix', 'expiration', 'fragment_offset', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'log_limit', 'name', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'protocol', 'routing_instance', 'source_address', 'source_address_exclude', 'source_interface', 'source_port', 'source_prefix', 'translated', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'initial', 'sample', 'tcp-established', 'tcp-initial', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class SpeedwayTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testSpeedwayOutputFormat(self): acl = speedway.Speedway(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) result = [] result.extend(str(acl).split('\n')) self.assertEqual('*filter', result[0], '*filter designation does not appear at top of generated ' 'policy.') self.assertIn(':INPUT ACCEPT', result, 'input default policy of accept not set.') self.assertIn('-N I_good-term-1', result, 'did not find new chain for good-term-1.') self.assertIn( '-A I_good-term-1 -p icmp -m state --state NEW,ESTABLISHED,RELATED' ' -j ACCEPT', result, 'did not find append for good-term-1.') self.assertEqual('COMMIT', result[len(result)-2], 'COMMIT does not appear at end of output policy.') def testBuildTokens(self): pol1 = speedway.Speedway(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = speedway.Speedway(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/srxlo_test.py000066400000000000000000000172371437377527500176520ustar00rootroot00000000000000# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Srxlo rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import naming from capirca.lib import policy from capirca.lib import srxlo GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: srxlo test-filter inet6 } """ GOOD_HEADER_2 = """ header { comment:: "this is a test acl" target:: srxlo test-filter inet } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmpv6 action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: icmpv6 icmp-type:: destination-unreachable action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: icmpv6 action:: accept option:: inactive } """ GOOD_TERM_4 = """ term good-term-4 { protocol:: icmp action:: accept } """ GOOD_TERM_5 = """ term good-term-5 { protocol-except:: icmp action:: accept } """ GOOD_TERM_6 = """ term good-term-6 { protocol-except:: icmpv6 action:: accept } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_prefix', 'destination_prefix_except', 'dscp_except', 'dscp_match', 'dscp_set', 'ether_type', 'expiration', 'filter_term', 'forwarding_class', 'forwarding_class_except', 'fragment_offset', 'hop_limit', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'loss_priority', 'name', 'next_ip', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'policer', 'port', 'precedence', 'protocol', 'protocol_except', 'qos', 'restrict_address_family', 'routing_instance', 'source_address', 'source_address_exclude', 'source_port', 'source_prefix', 'source_prefix_except', 'traffic_class_count', 'traffic_type', 'translated', 'ttl', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'inactive', 'is-fragment', '.*', # not actually a lex token! 'sample', 'tcp-established', 'tcp-initial'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class SRXloTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testIcmp(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_1 + GOOD_TERM_4, self.naming), EXP_INFO)) self.assertIn('protocol icmp;', output, 'missing or incorrect ICMP specification') self.assertNotIn('icmp6;', output, 'missing or incorrect ICMP specification') self.assertNotIn('icmpv6;', output, 'missing or incorrect ICMP specification') def testIcmpv6(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1 + GOOD_TERM_4, self.naming), EXP_INFO)) self.assertIn('next-header icmp6;', output, 'missing or incorrect ICMPv6 specification') self.assertNotIn('icmp;', output, 'missing or incorrect ICMPv6 specification') def testIcmpv6Type(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO)) self.assertIn('next-header icmp6;', output, 'missing or incorrect ICMPv6 specification') self.assertIn('icmp-type 1;', output, 'missing or incorrect ICMPv6 type specification') def testBuildTokens(self): # self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.max_diff = None self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = srxlo.SRXlo(policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testInactiveTerm(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO)) self.assertIn('inactive: term good-term-3 {', output) def testIcmpExcept(self): output = str( srxlo.SRXlo( policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_5, self.naming), EXP_INFO)) self.assertIn('protocol-except icmp;', output, 'missing or incorrect ICMP specification in protocol-except') self.assertNotIn( 'icmp6;', output, 'missing or incorrect ICMP specification in protocol-except') self.assertNotIn( 'icmpv6;', output, 'missing or incorrect ICMP specification in protocol-except') def testIcmpv6Except(self): output = str( srxlo.SRXlo( policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_6, self.naming), EXP_INFO)) self.assertIn( 'next-header-except icmp6;', output, 'missing or incorrect ICMPv6 specification in protocol-except') self.assertNotIn( 'icmp;', output, 'missing or incorrect ICMPv6 specification in protocol-except') if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/summarizer_test.py000066400000000000000000000166401437377527500206760ustar00rootroot00000000000000# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for discontinuous subnet mask summarizer.""" import os import random import time from absl.testing import absltest from absl import logging from capirca.lib import nacaddr from capirca.lib import summarizer class SummarizerTest(absltest.TestCase): def setUp(self): super().setUp() random_seed = int(time.time()) value = os.environ.get('TEST_RANDOM_SEED', '') try: random_seed = int(value) except ValueError: pass logging.info('Seeding random generator with seed %d', random_seed) random.seed(random_seed) def testToDottedQuad(self): net = summarizer.DSMNet(1 << 32, 4294967264) self.assertRaises(ValueError) net = summarizer.DSMNet(3232235584, 1 << 16) self.assertRaises(ValueError) net = summarizer.DSMNet(3232235584, 4294967264) self.assertEqual(summarizer.ToDottedQuad(net), ('192.168.0.64', '255.255.255.224')) net = summarizer.DSMNet(3232235584, 4294901984) self.assertEqual(summarizer.ToDottedQuad(net, negate=True), ('192.168.0.64', '0.0.255.31')) test_data = [(summarizer.DSMNet(3232235584, 4294967295), True, ('192.168.0.64', '32')), (summarizer.DSMNet(3232235584, 4294901760), True, ('192.168.0.64', '16')), (summarizer.DSMNet(3232235584, 4294967294), True, ('192.168.0.64', '31')), (summarizer.DSMNet(3232235584, 4290772992), True, ('192.168.0.64', '10')), (summarizer.DSMNet(3232235584, 4294966016), True, ('192.168.0.64', '255.255.251.0')), (summarizer.DSMNet(3232235584, 4294901504), True, ('192.168.0.64', '255.254.255.0'))] for net, nondsm, expected in test_data: self.assertEqual(summarizer.ToDottedQuad(net, nondsm=nondsm), expected) def testInt32ToDottedQuad(self): self.assertEqual(summarizer._Int32ToDottedQuad(3232235584), '192.168.0.64') def testSummarizeEmptyList(self): nets = [] result = summarizer.Summarize(nets) self.assertEqual(result, []) def testSummarizeNoNetworks(self): nets = [] for octet in range(0, 256): net = nacaddr.IPv4('192.' + str(255 - octet) + '.' + str(octet) + '.64/27') nets.append(net) random.shuffle(nets) result = summarizer.Summarize(nets) self.assertEqual(len(result), 256) def testSummarizeSomeNetworks(self): nets = [ # continiously summarizable to one /25 nacaddr.IPv4('192.168.0.0/27'), nacaddr.IPv4('192.168.0.32/27'), nacaddr.IPv4('192.168.0.64/27'), nacaddr.IPv4('192.168.0.96/27'), # discontiniously summarizable with above nacaddr.IPv4('128.168.0.0/25'), # not summarizable with above nacaddr.IPv4('10.0.0.0/8'), ] for octet in range(0, 256): net = nacaddr.IPv4('172.16.' + str(octet) + '.96/30') nets.append(net) random.shuffle(nets) result = summarizer.Summarize(nets) self.assertEqual(result, [summarizer.DSMNet(167772160, 4278190080), summarizer.DSMNet(2158493696, 3221225344), summarizer.DSMNet(2886729824, 4294902012)]) def testSummarizeAllNetworks(self): nets = [] for octet in range(0, 256): net = nacaddr.IPv4('192.168.' + str(octet) + '.64/27') nets.append(net) random.shuffle(nets) result = summarizer.Summarize(nets) # summarizes to 192.168.0.64 / 255.255.0.224 self.assertEqual(result, [summarizer.DSMNet(3232235584, 4294901984)]) def testSummarizeToAllSpace(self): nets = [ nacaddr.IPv4('0.0.0.0/1'), nacaddr.IPv4('128.0.0.0/1'), ] random.shuffle(nets) result = summarizer.Summarize(nets) self.assertEqual(result, [summarizer.DSMNet(0, 0)]) def testNacaddrNetToDSMNet(self): nacaddr_net = nacaddr.IPv4('192.168.0.64/27') dsm_net = summarizer.DSMNet(3232235584, 4294967264, '') self.assertEqual(summarizer._NacaddrNetToDSMNet(nacaddr_net), dsm_net) def testToPrettyBinaryFormat(self): # 192.168.0.64 self.assertEqual(summarizer._ToPrettyBinaryFormat(3232235584), '11000000 10101000 00000000 01000000') # 8.8.8.8 self.assertEqual(summarizer._ToPrettyBinaryFormat(134744072), '00001000 00001000 00001000 00001000') # 0.0.0.0 self.assertEqual(summarizer._ToPrettyBinaryFormat(0), '00000000 00000000 00000000 00000000') # fc00::1 self.assertEqual( summarizer._ToPrettyBinaryFormat( 334965454937798799971759379190646833153), '11111100 00000000 00000000 00000000 00000000 00000000 00000000 ' '00000000 00000000 00000000 00000000 00000000 00000000 00000000 ' '00000000 00000001') def testSummarizeDSMONetworks(self): fourth_octet = [2, 8, 20, 26, 28, 32, 40, 52, 58, 86, 130, 136, 148, 154, 156, 160, 168, 180, 186, 214] nets = list() for octet3 in range(56, 60): for octet4 in fourth_octet: nets.append(nacaddr.IPv4('192.168.' + str(octet3) + '.' + str(octet4) + '/31')) result = summarizer.Summarize(nets) self.assertEqual(result, [summarizer.DSMNet(3232249858, 4294966398), summarizer.DSMNet(3232249864, 4294966366), summarizer.DSMNet(3232249876, 4294966390), summarizer.DSMNet(3232249882, 4294966366), summarizer.DSMNet(3232249888, 4294966398), summarizer.DSMNet(3232249908, 4294966398), summarizer.DSMNet(3232249942, 4294966398), ]) def testMergeText(self): existing_comment = 'comment that already exists' addition = 'addition' dsm_net = summarizer.DSMNet(167772160, 4278190080) self.assertEqual(dsm_net.MergeText(addition), addition) dsm_net = summarizer.DSMNet(167772160, 4278190080, existing_comment) self.assertEqual(dsm_net.MergeText(existing_comment), existing_comment) dsm_net = summarizer.DSMNet(167772160, 4278190080, existing_comment) self.assertEqual(dsm_net.MergeText(addition), existing_comment + ', ' + addition) def testOrder(self): nets = [ # not discontinously summarizable with the other two nacaddr.IPv4('209.85.147.129/32'), # discontinuosly summarizable, but should come before the first one nacaddr.IPv4('74.125.20.129/32'), nacaddr.IPv4('74.125.21.129/32'), ] result = summarizer.Summarize(nets) self.assertEqual(result, [summarizer.DSMNet(1249711233, 4294967039), summarizer.DSMNet(3512046465, 4294967295) ]) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/windows_advfirewall_test.py000066400000000000000000000255001437377527500225450ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for windows_advfirewall rendering module.""" import datetime from absl.testing import absltest from unittest import mock from capirca.lib import aclgenerator from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy from capirca.lib import windows from capirca.lib import windows_advfirewall GOOD_HEADER_OUT = """ header { comment:: "this is an out test acl" target:: windows_advfirewall out inet } """ GOOD_HEADER_IN = """ header { comment:: "this is an in test acl" target:: windows_advfirewall in inet } """ GOOD_SIMPLE = """ term good-simple { protocol:: tcp action:: accept } """ GOOD_SIMPLE_WARNING = """ term good-simple-warning { protocol:: tcp policer:: batman action:: accept } """ GOOD_TERM_ICMP = """ term good-term-icmp { protocol:: icmp action:: accept } """ GOOD_TERM_ANYPROTO = """ term good-term-anyproto { source-address:: FOO destination-address:: FOO action:: accept } """ GOOD_TERM_MISCPROTO = """ term good-term-miscproto { protocol:: vrrp action:: accept } """ # Edge case: protocol value for hopopt is 0 GOOD_TERM_HOPOPT = """ term good-term-hopopt { protocol:: hopopt action:: accept } """ GOOD_TERM_ICMP_TYPES = """ term good-term-icmp-types { protocol:: icmp icmp-type:: echo-reply unreachable time-exceeded action:: deny } """ GOOD_TERM_ICMPV6 = """ term good-term-icmpv6 { protocol:: icmpv6 action:: accept } """ BAD_TERM_ICMP = """ term test-icmp { icmp-type:: echo-request echo-reply action:: accept } """ BAD_TERM_ACTION = """ term bad-term-action { protocol:: icmp action:: undefined } """ GOOD_TERM_TCP = """ term good-term-tcp { comment:: "Test term 1" destination-address:: PROD_NETWRK destination-port:: SMTP protocol:: tcp action:: accept } """ GOOD_TERM_LOG = """ term good-term-log { protocol:: tcp logging:: true action:: accept } """ EXPIRED_TERM = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ NEXT_TERM = """ term next { action:: next } """ NEXT_LOG_TERM = """ term next-log { logging:: true action:: next } """ EXCEPTION_POLICY = """ header { comment:: "Header comment" target:: windows_advfirewall out inet } term accept-corpdns { comment:: "accept-corpdns comment1" comment:: "accept-corpdns comment2" destination-address:: CORP_ANYCAST_DNS destination-port:: DNS protocol:: udp action:: accept } term deny-to-google { comment:: "deny-to-google comment" destination-address:: INTERNAL action:: deny } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'stateless_reply', 'name', 'option', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'translated', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class WindowsAdvFirewallTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def assertTrue(self, strings, result, term): for string in strings: fullstring = 'netsh advfirewall firewall add rule %s' % (string) super().assertIn( fullstring, result, 'did not find "%s" for %s\nGot:\n%s' % (fullstring, term, result)) def testTcp(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_good-term-tcp enable=yes interfacetype=any dir=out localip=any' ' remoteip=10.0.0.0/8 remoteport=25 protocol=tcp action=allow'], result, 'did not find actual term for good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testIcmp(self): acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_good-term-icmp enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=icmpv4 action=allow'], result, 'did not find actual term for good-term-icmp') def testIcmpTypes(self): acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + GOOD_TERM_ICMP_TYPES, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_good-term-icmp-types enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=icmpv4:0,any action=block', 'name=o_good-term-icmp-types enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=icmpv4:3,any action=block', 'name=o_good-term-icmp-types enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=icmpv4:11,any action=block'], result, 'did not find actual term for good-term-icmp-types') def testBadIcmp(self): acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + BAD_TERM_ICMP, self.naming), EXP_INFO) self.assertRaises(aclgenerator.UnsupportedFilterError, str, acl) @mock.patch.object(windows.logging, 'warning') def testExpiredTerm(self, mock_warn): windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired ' 'and will not be rendered.', 'expired_test', 'out') @mock.patch.object(windows.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'out') def testMultiprotocol(self): acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + MULTIPLE_PROTOCOLS_TERM, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_multi-proto enable=yes interfacetype=any dir=out localip=any' ' remoteip=any protocol=tcp action=allow', 'name=o_multi-proto enable=yes interfacetype=any dir=out localip=any' ' remoteip=any protocol=udp action=allow', 'name=o_multi-proto enable=yes interfacetype=any dir=out localip=any' ' remoteip=any protocol=icmpv4 action=allow'], result, 'did not find actual term for multi-proto') def testAnyProtocol(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + GOOD_TERM_ANYPROTO, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_good-term-anyproto enable=yes interfacetype=any dir=out' ' localip=10.0.0.0/8 remoteip=10.0.0.0/8 protocol=any action=allow'], result, '"any" proto') def testMiscProtocol(self): acl = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_OUT + GOOD_TERM_MISCPROTO + GOOD_TERM_HOPOPT, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['name=o_good-term-miscproto enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=112 action=allow', 'name=o_good-term-hopopt enable=yes interfacetype=any dir=out' ' localip=any remoteip=any protocol=0 action=allow'], result, 'explicit miscellaneous proto') def testBuildTokens(self): pol1 = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_IN + GOOD_SIMPLE, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = windows_advfirewall.WindowsAdvFirewall(policy.ParsePolicy( GOOD_HEADER_IN + GOOD_SIMPLE_WARNING, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/windows_ipsec_test.py000066400000000000000000000155321437377527500213540ustar00rootroot00000000000000# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for windows_ipsec rendering module.""" import datetime from absl.testing import absltest from unittest import mock from capirca.lib import nacaddr from capirca.lib import naming from capirca.lib import policy from capirca.lib import windows_ipsec GOOD_HEADER = """ header { comment:: "this is a test acl" target:: windows_ipsec test-filter } """ GOOD_SIMPLE = """ term good-simple { protocol:: tcp action:: accept } """ GOOD_SIMPLE_WARNING = """ term good-simple-warning { protocol:: tcp policer:: batman action:: accept } """ GOOD_TERM_ICMP = """ term good-term-icmp { protocol:: icmp action:: accept } """ BAD_TERM_ICMP = """ term test-icmp { icmp-type:: echo-request echo-reply action:: accept } """ GOOD_TERM_TCP = """ term good-term-tcp { comment:: "Test term 1" destination-address:: PROD_NET destination-port:: SMTP protocol:: tcp action:: accept } """ EXPIRED_TERM = """ term expired_test { expiration:: 2000-1-1 action:: deny } """ EXPIRING_TERM = """ term is_expiring { expiration:: %s action:: accept } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'stateless_reply', 'name', 'option', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'translated', } SUPPORTED_SUB_TOKENS = {'action': {'accept', 'deny'}} # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class WindowsIPSecTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) # pylint: disable=invalid-name def assertTrue(self, strings, result, term): for string in strings: fullstring = 'netsh ipsec static add %s' % (string) super().assertIn( fullstring, result, 'did not find "%s" for %s' % (fullstring, term)) def testPolicy(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] acl = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['policy name=test-filter-policy assign=yes'], result, 'header') self.naming.GetNetAddr.assert_called_once_with('PROD_NET') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testTcp(self): self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')] self.naming.GetServiceByProto.return_value = ['25'] acl = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_TCP, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['filteraction name=t_good-term-tcp-action action=permit', 'filter filterlist=t_good-term-tcp-list mirrored=yes srcaddr=any ' ' dstaddr=10.0.0.0 dstmask=8 dstport=25', 'rule name=t_good-term-tcp-rule policy=test-filter' ' filterlist=t_good-term-tcp-list' ' filteraction=t_good-term-tcp-action'], result, 'good-term-tcp') self.naming.GetNetAddr.assert_called_once_with('PROD_NET') self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp') def testIcmp(self): acl = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + GOOD_TERM_ICMP, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['filterlist name=t_good-term-icmp-list', 'filteraction name=t_good-term-icmp-action action=permit', 'filter filterlist=t_good-term-icmp-list mirrored=yes srcaddr=any ' ' dstaddr=any', 'rule name=t_good-term-icmp-rule policy=test-filter' ' filterlist=t_good-term-icmp-list' ' filteraction=t_good-term-icmp-action'], result, 'good-term-icmp') @mock.patch.object(windows_ipsec.logging, 'warning') def testExpiredTerm(self, mock_warn): windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + EXPIRED_TERM, self.naming), EXP_INFO) mock_warn.assert_called_once_with( 'WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', 'expired_test', 'test-filter') @mock.patch.object(windows_ipsec.logging, 'info') def testExpiringTerm(self, mock_info): exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO) windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + EXPIRING_TERM % exp_date.strftime('%Y-%m-%d'), self.naming), EXP_INFO) mock_info.assert_called_once_with( 'INFO: Term %s in policy %s expires in ' 'less than two weeks.', 'is_expiring', 'test-filter') def testMultiprotocol(self): acl = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming), EXP_INFO) result = str(acl) self.assertTrue( ['filterlist name=t_multi-proto-list', 'filteraction name=t_multi-proto-action action=permit', 'filter filterlist=t_multi-proto-list mirrored=yes srcaddr=any ' ' dstaddr=any protocol=tcp', 'filter filterlist=t_multi-proto-list mirrored=yes srcaddr=any ' ' dstaddr=any protocol=udp', 'filter filterlist=t_multi-proto-list mirrored=yes srcaddr=any ' ' dstaddr=any protocol=icmp', 'rule name=t_multi-proto-rule policy=test-filter' ' filterlist=t_multi-proto-list filteraction=t_multi-proto-action'], result, 'multi-proto') def testBuildTokens(self): pol1 = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + GOOD_SIMPLE, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = windows_ipsec.WindowsIPSec(policy.ParsePolicy( GOOD_HEADER + GOOD_SIMPLE_WARNING, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/lib/windows_test.py000066400000000000000000000113411437377527500201630ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for windows acl rendering module.""" from absl.testing import absltest from unittest import mock from capirca.lib import naming from capirca.lib import policy from capirca.lib import windows GOOD_HEADER = """ header { comment:: "this is a test acl" target:: windows test-filter } """ MULTIPLE_PROTOCOLS_TERM = """ term multi-proto { protocol:: tcp udp icmp action:: accept } """ GOOD_WARNING_TERM = """ term good-warning-term { protocol:: tcp udp icmp policer:: batman action:: accept } """ GOOD_TERM = """ term good-term { source-port:: FOO destination-port:: BAR protocol:: tcp action:: accept } """ TCP_ESTABLISHED_TERM = """ term tcp-established { source-port:: FOO destination-port:: BAR protocol:: tcp option:: tcp-established action:: accept } """ UDP_ESTABLISHED_TERM = """ term udp-established-term { source-port:: FOO destination-port:: BAR protocol:: udp option:: established action:: accept } """ SUPPORTED_TOKENS = { 'action', 'comment', 'destination_address', 'destination_address_exclude', 'destination_port', 'expiration', 'icmp_type', 'stateless_reply', 'name', 'option', 'platform', 'platform_exclude', 'protocol', 'source_address', 'source_address_exclude', 'source_port', 'translated', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class WindowsGeneratorTest(absltest.TestCase): def setUp(self): super().setUp() self.naming = mock.create_autospec(naming.Naming) def testBuildTokens(self): pol1 = windows.WindowsGenerator( policy.ParsePolicy(GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = windows.WindowsGenerator(policy.ParsePolicy( GOOD_HEADER + GOOD_WARNING_TERM, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEqual(st, SUPPORTED_TOKENS) self.assertEqual(sst, SUPPORTED_SUB_TOKENS) def testSkipEstablished(self): # self.naming.GetNetAddr.return_value = _IPSET self.naming.GetServiceByProto.return_value = ['123'] pol = windows.WindowsGenerator(policy.ParsePolicy( GOOD_HEADER + TCP_ESTABLISHED_TERM + GOOD_TERM, self.naming), EXP_INFO) self.assertEqual(len(pol.windows_policies[0][4]), 1) pol = windows.WindowsGenerator(policy.ParsePolicy( GOOD_HEADER + UDP_ESTABLISHED_TERM + GOOD_TERM, self.naming), EXP_INFO) self.assertEqual(len(pol.windows_policies[0][4]), 1) if __name__ == '__main__': absltest.main() capirca-2.0.9/tests/unit/000077500000000000000000000000001437377527500152715ustar00rootroot00000000000000capirca-2.0.9/tests/unit/wrapwords_test.py000066400000000000000000000046061437377527500207400ustar00rootroot00000000000000from capirca.lib.aclgenerator import WrapWords import pytest SINGLE_LINE_OVERFLOW_TEXT_LONG = \ "http://github.com/google/capirca/commit/c5" + \ "6ddf19e2679892ff078cf27aeb18310c2697ed This " + \ "is a long header. It's long on purpose. It's " + \ "purpose is to test that the splitting works co" + \ "rrectly. It should be well over the line limit" + \ ". If it is shorter, it would not test the limit." SINGLE_LINE_OVERFLOW_TEXT_LONG_EXPECTED = [ "http://github.com/google/capirca/commit/c56ddf19e2679892ff078cf27aeb18", "310c2697ed", "This is a long header. It's long on purpose. It's purpose is to test", "that the splitting works correctly. It should be well over the line", "limit. If it is shorter, it would not test the limit." ] MULTI_LINE_OVERFLOW_TEXT_LONG = \ "this is a veryveryveryveryveryveryveryveryver" + \ "yveryveryveryveryveryveryveryveryveryveryvery" + \ "veryveryveryveryveryveryveryveryveryveryveryv" + \ "eryveryveryveryveryveryveryveryveryveryvery long word" MULTI_LINE_OVERFLOW_TEXT_LONG_EXPECTED = [ "this is a", "veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryve", "ryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryvery", "veryveryveryveryveryveryvery", "long word" ] NO_OVERFLOW_LONG = \ "This " + \ "is a long header. It's long on purpose. It's " + \ "purpose is to test that the splitting works co" + \ "rrectly. It should be well over the line limit" + \ ". If it is shorter, it would not test the limit." NO_OVERFLOW_LONG_EXPECTED = [ "This is a long header. It's long on purpose. It's purpose is to test", "that the splitting works correctly. It should be well over the line", "limit. If it is shorter, it would not test the limit." ] NO_OVERFLOW_SHORT = \ "This is a short line of text" NO_OVERFLOW_SHORT_EXPECTED = [ "This is a short line of text" ] @pytest.mark.parametrize("test_input,expected", [ (NO_OVERFLOW_SHORT, NO_OVERFLOW_SHORT_EXPECTED), (NO_OVERFLOW_LONG, NO_OVERFLOW_LONG_EXPECTED), (SINGLE_LINE_OVERFLOW_TEXT_LONG, SINGLE_LINE_OVERFLOW_TEXT_LONG_EXPECTED), (MULTI_LINE_OVERFLOW_TEXT_LONG, MULTI_LINE_OVERFLOW_TEXT_LONG_EXPECTED) ] ) def testWrapWords(test_input, expected): result = WrapWords([test_input], 70) assert all((res == exp for res, exp in zip(result, expected))) capirca-2.0.9/tests/utils/000077500000000000000000000000001437377527500154525ustar00rootroot00000000000000capirca-2.0.9/tests/utils/__init__.py000066400000000000000000000000001437377527500175510ustar00rootroot00000000000000capirca-2.0.9/tests/utils/address_exclude_test_cases.txt000066400000000000000000001117421437377527500235740ustar00rootroot00000000000000fddf:d8b5:d34c:c98c::/64 fddf:d8b5:d34c:c98c:9700::/72,fddf:d8b5:d34c:c98c:850::/76,fddf:d8b5:d34c:c98c:6f20::/76,fddf:d8b5:d34c:c98c:9b90::/76,fddf:d8b5:d34c:c98c:f490::/76,fddf:d8b5:d34c:c98c:8458::/77,fddf:d8b5:d34c:c98c:a810::/77,fddf:d8b5:d34c:c98c:ab00::/77,fddf:d8b5:d34c:c98c:bba8::/77,fddf:d8b5:d34c:c98c:174::/78,fddf:d8b5:d34c:c98c:1c80::/78,fddf:d8b5:d34c:c98c:5c84::/78,fddf:d8b5:d34c:c98c:6688::/78,fddf:d8b5:d34c:c98c:6eac::/78,fddf:d8b5:d34c:c98c:8258::/78,fddf:d8b5:d34c:c98c:c948::/78,fddf:d8b5:d34c:c98c:df68::/78,fddf:d8b5:d34c:c98c:e79c::/78,fddf:d8b5:d34c:c98c:fbf8::/78,fddf:d8b5:d34c:c98c:17b2::/79,fddf:d8b5:d34c:c98c:1a76::/79,fddf:d8b5:d34c:c98c:1b40::/79,fddf:d8b5:d34c:c98c:294a::/79,fddf:d8b5:d34c:c98c:4548::/79,fddf:d8b5:d34c:c98c:6d72::/79,fddf:d8b5:d34c:c98c:9ff8::/79,fddf:d8b5:d34c:c98c:c1f0::/79,fddf:d8b5:d34c:c98c:c70e::/79,fddf:d8b5:d34c:c98c:dd6e::/79,fddf:d8b5:d34c:c98c:dfae::/79 fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/68,fddf:d8b5:d34c:c98c:9000::/70,fddf:d8b5:d34c:c98c:9400::/71,fddf:d8b5:d34c:c98c:9600::/72,fddf:d8b5:d34c:c98c:9800::/69,fddf:d8b5:d34c:c98c:a000::/67,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/69,fddf:d8b5:d34c:c98c:800::/74,fddf:d8b5:d34c:c98c:840::/76,fddf:d8b5:d34c:c98c:860::/75,fddf:d8b5:d34c:c98c:880::/73,fddf:d8b5:d34c:c98c:900::/72,fddf:d8b5:d34c:c98c:a00::/71,fddf:d8b5:d34c:c98c:c00::/70,fddf:d8b5:d34c:c98c:1000::/68,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/67,fddf:d8b5:d34c:c98c:6000::/69,fddf:d8b5:d34c:c98c:6800::/70,fddf:d8b5:d34c:c98c:6c00::/71,fddf:d8b5:d34c:c98c:6e00::/72,fddf:d8b5:d34c:c98c:6f00::/75,fddf:d8b5:d34c:c98c:6f30::/76,fddf:d8b5:d34c:c98c:6f40::/74,fddf:d8b5:d34c:c98c:6f80::/73,fddf:d8b5:d34c:c98c:7000::/68,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/68,fddf:d8b5:d34c:c98c:9000::/69,fddf:d8b5:d34c:c98c:9800::/71,fddf:d8b5:d34c:c98c:9a00::/72,fddf:d8b5:d34c:c98c:9b00::/73,fddf:d8b5:d34c:c98c:9b80::/76,fddf:d8b5:d34c:c98c:9ba0::/75,fddf:d8b5:d34c:c98c:9bc0::/74,fddf:d8b5:d34c:c98c:9c00::/70,fddf:d8b5:d34c:c98c:a000::/67,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/67,fddf:d8b5:d34c:c98c:e000::/68,fddf:d8b5:d34c:c98c:f000::/70,fddf:d8b5:d34c:c98c:f400::/73,fddf:d8b5:d34c:c98c:f480::/76,fddf:d8b5:d34c:c98c:f4a0::/75,fddf:d8b5:d34c:c98c:f4c0::/74,fddf:d8b5:d34c:c98c:f500::/72,fddf:d8b5:d34c:c98c:f600::/71,fddf:d8b5:d34c:c98c:f800::/69;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/70,fddf:d8b5:d34c:c98c:8400::/74,fddf:d8b5:d34c:c98c:8440::/76,fddf:d8b5:d34c:c98c:8450::/77,fddf:d8b5:d34c:c98c:8460::/75,fddf:d8b5:d34c:c98c:8480::/73,fddf:d8b5:d34c:c98c:8500::/72,fddf:d8b5:d34c:c98c:8600::/71,fddf:d8b5:d34c:c98c:8800::/69,fddf:d8b5:d34c:c98c:9000::/68,fddf:d8b5:d34c:c98c:a000::/67,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/67,fddf:d8b5:d34c:c98c:a000::/69,fddf:d8b5:d34c:c98c:a800::/76,fddf:d8b5:d34c:c98c:a818::/77,fddf:d8b5:d34c:c98c:a820::/75,fddf:d8b5:d34c:c98c:a840::/74,fddf:d8b5:d34c:c98c:a880::/73,fddf:d8b5:d34c:c98c:a900::/72,fddf:d8b5:d34c:c98c:aa00::/71,fddf:d8b5:d34c:c98c:ac00::/70,fddf:d8b5:d34c:c98c:b000::/68,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/67,fddf:d8b5:d34c:c98c:a000::/69,fddf:d8b5:d34c:c98c:a800::/71,fddf:d8b5:d34c:c98c:aa00::/72,fddf:d8b5:d34c:c98c:ab08::/77,fddf:d8b5:d34c:c98c:ab10::/76,fddf:d8b5:d34c:c98c:ab20::/75,fddf:d8b5:d34c:c98c:ab40::/74,fddf:d8b5:d34c:c98c:ab80::/73,fddf:d8b5:d34c:c98c:ac00::/70,fddf:d8b5:d34c:c98c:b000::/68,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/67,fddf:d8b5:d34c:c98c:a000::/68,fddf:d8b5:d34c:c98c:b000::/69,fddf:d8b5:d34c:c98c:b800::/71,fddf:d8b5:d34c:c98c:ba00::/72,fddf:d8b5:d34c:c98c:bb00::/73,fddf:d8b5:d34c:c98c:bb80::/75,fddf:d8b5:d34c:c98c:bba0::/77,fddf:d8b5:d34c:c98c:bbb0::/76,fddf:d8b5:d34c:c98c:bbc0::/74,fddf:d8b5:d34c:c98c:bc00::/70,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/72,fddf:d8b5:d34c:c98c:100::/74,fddf:d8b5:d34c:c98c:140::/75,fddf:d8b5:d34c:c98c:160::/76,fddf:d8b5:d34c:c98c:170::/78,fddf:d8b5:d34c:c98c:178::/77,fddf:d8b5:d34c:c98c:180::/73,fddf:d8b5:d34c:c98c:200::/71,fddf:d8b5:d34c:c98c:400::/70,fddf:d8b5:d34c:c98c:800::/69,fddf:d8b5:d34c:c98c:1000::/68,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/68,fddf:d8b5:d34c:c98c:1000::/69,fddf:d8b5:d34c:c98c:1800::/70,fddf:d8b5:d34c:c98c:1c00::/73,fddf:d8b5:d34c:c98c:1c84::/78,fddf:d8b5:d34c:c98c:1c88::/77,fddf:d8b5:d34c:c98c:1c90::/76,fddf:d8b5:d34c:c98c:1ca0::/75,fddf:d8b5:d34c:c98c:1cc0::/74,fddf:d8b5:d34c:c98c:1d00::/72,fddf:d8b5:d34c:c98c:1e00::/71,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/68,fddf:d8b5:d34c:c98c:5000::/69,fddf:d8b5:d34c:c98c:5800::/70,fddf:d8b5:d34c:c98c:5c00::/73,fddf:d8b5:d34c:c98c:5c80::/78,fddf:d8b5:d34c:c98c:5c88::/77,fddf:d8b5:d34c:c98c:5c90::/76,fddf:d8b5:d34c:c98c:5ca0::/75,fddf:d8b5:d34c:c98c:5cc0::/74,fddf:d8b5:d34c:c98c:5d00::/72,fddf:d8b5:d34c:c98c:5e00::/71,fddf:d8b5:d34c:c98c:6000::/67,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/67,fddf:d8b5:d34c:c98c:6000::/70,fddf:d8b5:d34c:c98c:6400::/71,fddf:d8b5:d34c:c98c:6600::/73,fddf:d8b5:d34c:c98c:6680::/77,fddf:d8b5:d34c:c98c:668c::/78,fddf:d8b5:d34c:c98c:6690::/76,fddf:d8b5:d34c:c98c:66a0::/75,fddf:d8b5:d34c:c98c:66c0::/74,fddf:d8b5:d34c:c98c:6700::/72,fddf:d8b5:d34c:c98c:6800::/69,fddf:d8b5:d34c:c98c:7000::/68,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/67,fddf:d8b5:d34c:c98c:6000::/69,fddf:d8b5:d34c:c98c:6800::/70,fddf:d8b5:d34c:c98c:6c00::/71,fddf:d8b5:d34c:c98c:6e00::/73,fddf:d8b5:d34c:c98c:6e80::/75,fddf:d8b5:d34c:c98c:6ea0::/77,fddf:d8b5:d34c:c98c:6ea8::/78,fddf:d8b5:d34c:c98c:6eb0::/76,fddf:d8b5:d34c:c98c:6ec0::/74,fddf:d8b5:d34c:c98c:6f00::/72,fddf:d8b5:d34c:c98c:7000::/68,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/71,fddf:d8b5:d34c:c98c:8200::/74,fddf:d8b5:d34c:c98c:8240::/76,fddf:d8b5:d34c:c98c:8250::/77,fddf:d8b5:d34c:c98c:825c::/78,fddf:d8b5:d34c:c98c:8260::/75,fddf:d8b5:d34c:c98c:8280::/73,fddf:d8b5:d34c:c98c:8300::/72,fddf:d8b5:d34c:c98c:8400::/70,fddf:d8b5:d34c:c98c:8800::/69,fddf:d8b5:d34c:c98c:9000::/68,fddf:d8b5:d34c:c98c:a000::/67,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/69,fddf:d8b5:d34c:c98c:c800::/72,fddf:d8b5:d34c:c98c:c900::/74,fddf:d8b5:d34c:c98c:c940::/77,fddf:d8b5:d34c:c98c:c94c::/78,fddf:d8b5:d34c:c98c:c950::/76,fddf:d8b5:d34c:c98c:c960::/75,fddf:d8b5:d34c:c98c:c980::/73,fddf:d8b5:d34c:c98c:ca00::/71,fddf:d8b5:d34c:c98c:cc00::/70,fddf:d8b5:d34c:c98c:d000::/68,fddf:d8b5:d34c:c98c:e000::/67;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/68,fddf:d8b5:d34c:c98c:d000::/69,fddf:d8b5:d34c:c98c:d800::/70,fddf:d8b5:d34c:c98c:dc00::/71,fddf:d8b5:d34c:c98c:de00::/72,fddf:d8b5:d34c:c98c:df00::/74,fddf:d8b5:d34c:c98c:df40::/75,fddf:d8b5:d34c:c98c:df60::/77,fddf:d8b5:d34c:c98c:df6c::/78,fddf:d8b5:d34c:c98c:df70::/76,fddf:d8b5:d34c:c98c:df80::/73,fddf:d8b5:d34c:c98c:e000::/67;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/67,fddf:d8b5:d34c:c98c:e000::/70,fddf:d8b5:d34c:c98c:e400::/71,fddf:d8b5:d34c:c98c:e600::/72,fddf:d8b5:d34c:c98c:e700::/73,fddf:d8b5:d34c:c98c:e780::/76,fddf:d8b5:d34c:c98c:e790::/77,fddf:d8b5:d34c:c98c:e798::/78,fddf:d8b5:d34c:c98c:e7a0::/75,fddf:d8b5:d34c:c98c:e7c0::/74,fddf:d8b5:d34c:c98c:e800::/69,fddf:d8b5:d34c:c98c:f000::/68;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/67,fddf:d8b5:d34c:c98c:e000::/68,fddf:d8b5:d34c:c98c:f000::/69,fddf:d8b5:d34c:c98c:f800::/71,fddf:d8b5:d34c:c98c:fa00::/72,fddf:d8b5:d34c:c98c:fb00::/73,fddf:d8b5:d34c:c98c:fb80::/74,fddf:d8b5:d34c:c98c:fbc0::/75,fddf:d8b5:d34c:c98c:fbe0::/76,fddf:d8b5:d34c:c98c:fbf0::/77,fddf:d8b5:d34c:c98c:fbfc::/78,fddf:d8b5:d34c:c98c:fc00::/70;fddf:d8b5:d34c:c98c::/68,fddf:d8b5:d34c:c98c:1000::/70,fddf:d8b5:d34c:c98c:1400::/71,fddf:d8b5:d34c:c98c:1600::/72,fddf:d8b5:d34c:c98c:1700::/73,fddf:d8b5:d34c:c98c:1780::/75,fddf:d8b5:d34c:c98c:17a0::/76,fddf:d8b5:d34c:c98c:17b0::/79,fddf:d8b5:d34c:c98c:17b4::/78,fddf:d8b5:d34c:c98c:17b8::/77,fddf:d8b5:d34c:c98c:17c0::/74,fddf:d8b5:d34c:c98c:1800::/69,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/68,fddf:d8b5:d34c:c98c:1000::/69,fddf:d8b5:d34c:c98c:1800::/71,fddf:d8b5:d34c:c98c:1a00::/74,fddf:d8b5:d34c:c98c:1a40::/75,fddf:d8b5:d34c:c98c:1a60::/76,fddf:d8b5:d34c:c98c:1a70::/78,fddf:d8b5:d34c:c98c:1a74::/79,fddf:d8b5:d34c:c98c:1a78::/77,fddf:d8b5:d34c:c98c:1a80::/73,fddf:d8b5:d34c:c98c:1b00::/72,fddf:d8b5:d34c:c98c:1c00::/70,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/68,fddf:d8b5:d34c:c98c:1000::/69,fddf:d8b5:d34c:c98c:1800::/71,fddf:d8b5:d34c:c98c:1a00::/72,fddf:d8b5:d34c:c98c:1b00::/74,fddf:d8b5:d34c:c98c:1b42::/79,fddf:d8b5:d34c:c98c:1b44::/78,fddf:d8b5:d34c:c98c:1b48::/77,fddf:d8b5:d34c:c98c:1b50::/76,fddf:d8b5:d34c:c98c:1b60::/75,fddf:d8b5:d34c:c98c:1b80::/73,fddf:d8b5:d34c:c98c:1c00::/70,fddf:d8b5:d34c:c98c:2000::/67,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/67,fddf:d8b5:d34c:c98c:2000::/69,fddf:d8b5:d34c:c98c:2800::/72,fddf:d8b5:d34c:c98c:2900::/74,fddf:d8b5:d34c:c98c:2940::/77,fddf:d8b5:d34c:c98c:2948::/79,fddf:d8b5:d34c:c98c:294c::/78,fddf:d8b5:d34c:c98c:2950::/76,fddf:d8b5:d34c:c98c:2960::/75,fddf:d8b5:d34c:c98c:2980::/73,fddf:d8b5:d34c:c98c:2a00::/71,fddf:d8b5:d34c:c98c:2c00::/70,fddf:d8b5:d34c:c98c:3000::/68,fddf:d8b5:d34c:c98c:4000::/66,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/70,fddf:d8b5:d34c:c98c:4400::/72,fddf:d8b5:d34c:c98c:4500::/74,fddf:d8b5:d34c:c98c:4540::/77,fddf:d8b5:d34c:c98c:454a::/79,fddf:d8b5:d34c:c98c:454c::/78,fddf:d8b5:d34c:c98c:4550::/76,fddf:d8b5:d34c:c98c:4560::/75,fddf:d8b5:d34c:c98c:4580::/73,fddf:d8b5:d34c:c98c:4600::/71,fddf:d8b5:d34c:c98c:4800::/69,fddf:d8b5:d34c:c98c:5000::/68,fddf:d8b5:d34c:c98c:6000::/67,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/66,fddf:d8b5:d34c:c98c:4000::/67,fddf:d8b5:d34c:c98c:6000::/69,fddf:d8b5:d34c:c98c:6800::/70,fddf:d8b5:d34c:c98c:6c00::/72,fddf:d8b5:d34c:c98c:6d00::/74,fddf:d8b5:d34c:c98c:6d40::/75,fddf:d8b5:d34c:c98c:6d60::/76,fddf:d8b5:d34c:c98c:6d70::/79,fddf:d8b5:d34c:c98c:6d74::/78,fddf:d8b5:d34c:c98c:6d78::/77,fddf:d8b5:d34c:c98c:6d80::/73,fddf:d8b5:d34c:c98c:6e00::/71,fddf:d8b5:d34c:c98c:7000::/68,fddf:d8b5:d34c:c98c:8000::/65;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/68,fddf:d8b5:d34c:c98c:9000::/69,fddf:d8b5:d34c:c98c:9800::/70,fddf:d8b5:d34c:c98c:9c00::/71,fddf:d8b5:d34c:c98c:9e00::/72,fddf:d8b5:d34c:c98c:9f00::/73,fddf:d8b5:d34c:c98c:9f80::/74,fddf:d8b5:d34c:c98c:9fc0::/75,fddf:d8b5:d34c:c98c:9fe0::/76,fddf:d8b5:d34c:c98c:9ff0::/77,fddf:d8b5:d34c:c98c:9ffa::/79,fddf:d8b5:d34c:c98c:9ffc::/78,fddf:d8b5:d34c:c98c:a000::/67,fddf:d8b5:d34c:c98c:c000::/66;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/72,fddf:d8b5:d34c:c98c:c100::/73,fddf:d8b5:d34c:c98c:c180::/74,fddf:d8b5:d34c:c98c:c1c0::/75,fddf:d8b5:d34c:c98c:c1e0::/76,fddf:d8b5:d34c:c98c:c1f2::/79,fddf:d8b5:d34c:c98c:c1f4::/78,fddf:d8b5:d34c:c98c:c1f8::/77,fddf:d8b5:d34c:c98c:c200::/71,fddf:d8b5:d34c:c98c:c400::/70,fddf:d8b5:d34c:c98c:c800::/69,fddf:d8b5:d34c:c98c:d000::/68,fddf:d8b5:d34c:c98c:e000::/67;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/70,fddf:d8b5:d34c:c98c:c400::/71,fddf:d8b5:d34c:c98c:c600::/72,fddf:d8b5:d34c:c98c:c700::/77,fddf:d8b5:d34c:c98c:c708::/78,fddf:d8b5:d34c:c98c:c70c::/79,fddf:d8b5:d34c:c98c:c710::/76,fddf:d8b5:d34c:c98c:c720::/75,fddf:d8b5:d34c:c98c:c740::/74,fddf:d8b5:d34c:c98c:c780::/73,fddf:d8b5:d34c:c98c:c800::/69,fddf:d8b5:d34c:c98c:d000::/68,fddf:d8b5:d34c:c98c:e000::/67;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/68,fddf:d8b5:d34c:c98c:d000::/69,fddf:d8b5:d34c:c98c:d800::/70,fddf:d8b5:d34c:c98c:dc00::/72,fddf:d8b5:d34c:c98c:dd00::/74,fddf:d8b5:d34c:c98c:dd40::/75,fddf:d8b5:d34c:c98c:dd60::/77,fddf:d8b5:d34c:c98c:dd68::/78,fddf:d8b5:d34c:c98c:dd6c::/79,fddf:d8b5:d34c:c98c:dd70::/76,fddf:d8b5:d34c:c98c:dd80::/73,fddf:d8b5:d34c:c98c:de00::/71,fddf:d8b5:d34c:c98c:e000::/67;fddf:d8b5:d34c:c98c::/65,fddf:d8b5:d34c:c98c:8000::/66,fddf:d8b5:d34c:c98c:c000::/68,fddf:d8b5:d34c:c98c:d000::/69,fddf:d8b5:d34c:c98c:d800::/70,fddf:d8b5:d34c:c98c:dc00::/71,fddf:d8b5:d34c:c98c:de00::/72,fddf:d8b5:d34c:c98c:df00::/73,fddf:d8b5:d34c:c98c:df80::/75,fddf:d8b5:d34c:c98c:dfa0::/77,fddf:d8b5:d34c:c98c:dfa8::/78,fddf:d8b5:d34c:c98c:dfac::/79,fddf:d8b5:d34c:c98c:dfb0::/76,fddf:d8b5:d34c:c98c:dfc0::/74,fddf:d8b5:d34c:c98c:e000::/67 fddf:d8b5:f3ba:c98c::/64 fddf:d8b5:f3ba:c98c:7b40::/74,fddf:d8b5:f3ba:c98c:7dc0::/75,fddf:d8b5:f3ba:c98c:5520::/76,fddf:d8b5:f3ba:c98c:3428::/77,fddf:d8b5:f3ba:c98c:69b0::/77,fddf:d8b5:f3ba:c98c:91b0::/77,fddf:d8b5:f3ba:c98c:ae10::/77,fddf:d8b5:f3ba:c98c:f4c8::/77,fddf:d8b5:f3ba:c98c:78::/78,fddf:d8b5:f3ba:c98c:3c0::/78,fddf:d8b5:f3ba:c98c:4098::/78,fddf:d8b5:f3ba:c98c:a318::/78,fddf:d8b5:f3ba:c98c:feb8::/78,fddf:d8b5:f3ba:c98c:2246::/79,fddf:d8b5:f3ba:c98c:2dd8::/79,fddf:d8b5:f3ba:c98c:3966::/79,fddf:d8b5:f3ba:c98c:3f62::/79,fddf:d8b5:f3ba:c98c:425e::/79,fddf:d8b5:f3ba:c98c:73c2::/79,fddf:d8b5:f3ba:c98c:803c::/79,fddf:d8b5:f3ba:c98c:87e4::/79,fddf:d8b5:f3ba:c98c:88e4::/79,fddf:d8b5:f3ba:c98c:931a::/79,fddf:d8b5:f3ba:c98c:ab08::/79,fddf:d8b5:f3ba:c98c:ad52::/79,fddf:d8b5:f3ba:c98c:c9c0::/79,fddf:d8b5:f3ba:c98c:cab8::/79,fddf:d8b5:f3ba:c98c:cf88::/79,fddf:d8b5:f3ba:c98c:de70::/79,fddf:d8b5:f3ba:c98c:f8b6::/79 fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/68,fddf:d8b5:f3ba:c98c:7000::/69,fddf:d8b5:f3ba:c98c:7800::/71,fddf:d8b5:f3ba:c98c:7a00::/72,fddf:d8b5:f3ba:c98c:7b00::/74,fddf:d8b5:f3ba:c98c:7b80::/73,fddf:d8b5:f3ba:c98c:7c00::/70,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/68,fddf:d8b5:f3ba:c98c:7000::/69,fddf:d8b5:f3ba:c98c:7800::/70,fddf:d8b5:f3ba:c98c:7c00::/72,fddf:d8b5:f3ba:c98c:7d00::/73,fddf:d8b5:f3ba:c98c:7d80::/74,fddf:d8b5:f3ba:c98c:7de0::/75,fddf:d8b5:f3ba:c98c:7e00::/71,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/68,fddf:d8b5:f3ba:c98c:5000::/70,fddf:d8b5:f3ba:c98c:5400::/72,fddf:d8b5:f3ba:c98c:5500::/75,fddf:d8b5:f3ba:c98c:5530::/76,fddf:d8b5:f3ba:c98c:5540::/74,fddf:d8b5:f3ba:c98c:5580::/73,fddf:d8b5:f3ba:c98c:5600::/71,fddf:d8b5:f3ba:c98c:5800::/69,fddf:d8b5:f3ba:c98c:6000::/67,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/68,fddf:d8b5:f3ba:c98c:3000::/70,fddf:d8b5:f3ba:c98c:3400::/75,fddf:d8b5:f3ba:c98c:3420::/77,fddf:d8b5:f3ba:c98c:3430::/76,fddf:d8b5:f3ba:c98c:3440::/74,fddf:d8b5:f3ba:c98c:3480::/73,fddf:d8b5:f3ba:c98c:3500::/72,fddf:d8b5:f3ba:c98c:3600::/71,fddf:d8b5:f3ba:c98c:3800::/69,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/69,fddf:d8b5:f3ba:c98c:6800::/72,fddf:d8b5:f3ba:c98c:6900::/73,fddf:d8b5:f3ba:c98c:6980::/75,fddf:d8b5:f3ba:c98c:69a0::/76,fddf:d8b5:f3ba:c98c:69b8::/77,fddf:d8b5:f3ba:c98c:69c0::/74,fddf:d8b5:f3ba:c98c:6a00::/71,fddf:d8b5:f3ba:c98c:6c00::/70,fddf:d8b5:f3ba:c98c:7000::/68,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/68,fddf:d8b5:f3ba:c98c:9000::/72,fddf:d8b5:f3ba:c98c:9100::/73,fddf:d8b5:f3ba:c98c:9180::/75,fddf:d8b5:f3ba:c98c:91a0::/76,fddf:d8b5:f3ba:c98c:91b8::/77,fddf:d8b5:f3ba:c98c:91c0::/74,fddf:d8b5:f3ba:c98c:9200::/71,fddf:d8b5:f3ba:c98c:9400::/70,fddf:d8b5:f3ba:c98c:9800::/69,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/69,fddf:d8b5:f3ba:c98c:a800::/70,fddf:d8b5:f3ba:c98c:ac00::/71,fddf:d8b5:f3ba:c98c:ae00::/76,fddf:d8b5:f3ba:c98c:ae18::/77,fddf:d8b5:f3ba:c98c:ae20::/75,fddf:d8b5:f3ba:c98c:ae40::/74,fddf:d8b5:f3ba:c98c:ae80::/73,fddf:d8b5:f3ba:c98c:af00::/72,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/68,fddf:d8b5:f3ba:c98c:f000::/70,fddf:d8b5:f3ba:c98c:f400::/73,fddf:d8b5:f3ba:c98c:f480::/74,fddf:d8b5:f3ba:c98c:f4c0::/77,fddf:d8b5:f3ba:c98c:f4d0::/76,fddf:d8b5:f3ba:c98c:f4e0::/75,fddf:d8b5:f3ba:c98c:f500::/72,fddf:d8b5:f3ba:c98c:f600::/71,fddf:d8b5:f3ba:c98c:f800::/69;fddf:d8b5:f3ba:c98c::/74,fddf:d8b5:f3ba:c98c:40::/75,fddf:d8b5:f3ba:c98c:60::/76,fddf:d8b5:f3ba:c98c:70::/77,fddf:d8b5:f3ba:c98c:7c::/78,fddf:d8b5:f3ba:c98c:80::/73,fddf:d8b5:f3ba:c98c:100::/72,fddf:d8b5:f3ba:c98c:200::/71,fddf:d8b5:f3ba:c98c:400::/70,fddf:d8b5:f3ba:c98c:800::/69,fddf:d8b5:f3ba:c98c:1000::/68,fddf:d8b5:f3ba:c98c:2000::/67,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/71,fddf:d8b5:f3ba:c98c:200::/72,fddf:d8b5:f3ba:c98c:300::/73,fddf:d8b5:f3ba:c98c:380::/74,fddf:d8b5:f3ba:c98c:3c4::/78,fddf:d8b5:f3ba:c98c:3c8::/77,fddf:d8b5:f3ba:c98c:3d0::/76,fddf:d8b5:f3ba:c98c:3e0::/75,fddf:d8b5:f3ba:c98c:400::/70,fddf:d8b5:f3ba:c98c:800::/69,fddf:d8b5:f3ba:c98c:1000::/68,fddf:d8b5:f3ba:c98c:2000::/67,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/73,fddf:d8b5:f3ba:c98c:4080::/76,fddf:d8b5:f3ba:c98c:4090::/77,fddf:d8b5:f3ba:c98c:409c::/78,fddf:d8b5:f3ba:c98c:40a0::/75,fddf:d8b5:f3ba:c98c:40c0::/74,fddf:d8b5:f3ba:c98c:4100::/72,fddf:d8b5:f3ba:c98c:4200::/71,fddf:d8b5:f3ba:c98c:4400::/70,fddf:d8b5:f3ba:c98c:4800::/69,fddf:d8b5:f3ba:c98c:5000::/68,fddf:d8b5:f3ba:c98c:6000::/67,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/71,fddf:d8b5:f3ba:c98c:a200::/72,fddf:d8b5:f3ba:c98c:a300::/76,fddf:d8b5:f3ba:c98c:a310::/77,fddf:d8b5:f3ba:c98c:a31c::/78,fddf:d8b5:f3ba:c98c:a320::/75,fddf:d8b5:f3ba:c98c:a340::/74,fddf:d8b5:f3ba:c98c:a380::/73,fddf:d8b5:f3ba:c98c:a400::/70,fddf:d8b5:f3ba:c98c:a800::/69,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/68,fddf:d8b5:f3ba:c98c:f000::/69,fddf:d8b5:f3ba:c98c:f800::/70,fddf:d8b5:f3ba:c98c:fc00::/71,fddf:d8b5:f3ba:c98c:fe00::/73,fddf:d8b5:f3ba:c98c:fe80::/75,fddf:d8b5:f3ba:c98c:fea0::/76,fddf:d8b5:f3ba:c98c:feb0::/77,fddf:d8b5:f3ba:c98c:febc::/78,fddf:d8b5:f3ba:c98c:fec0::/74,fddf:d8b5:f3ba:c98c:ff00::/72;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/71,fddf:d8b5:f3ba:c98c:2200::/74,fddf:d8b5:f3ba:c98c:2240::/78,fddf:d8b5:f3ba:c98c:2244::/79,fddf:d8b5:f3ba:c98c:2248::/77,fddf:d8b5:f3ba:c98c:2250::/76,fddf:d8b5:f3ba:c98c:2260::/75,fddf:d8b5:f3ba:c98c:2280::/73,fddf:d8b5:f3ba:c98c:2300::/72,fddf:d8b5:f3ba:c98c:2400::/70,fddf:d8b5:f3ba:c98c:2800::/69,fddf:d8b5:f3ba:c98c:3000::/68,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/69,fddf:d8b5:f3ba:c98c:2800::/70,fddf:d8b5:f3ba:c98c:2c00::/72,fddf:d8b5:f3ba:c98c:2d00::/73,fddf:d8b5:f3ba:c98c:2d80::/74,fddf:d8b5:f3ba:c98c:2dc0::/76,fddf:d8b5:f3ba:c98c:2dd0::/77,fddf:d8b5:f3ba:c98c:2dda::/79,fddf:d8b5:f3ba:c98c:2ddc::/78,fddf:d8b5:f3ba:c98c:2de0::/75,fddf:d8b5:f3ba:c98c:2e00::/71,fddf:d8b5:f3ba:c98c:3000::/68,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/68,fddf:d8b5:f3ba:c98c:3000::/69,fddf:d8b5:f3ba:c98c:3800::/72,fddf:d8b5:f3ba:c98c:3900::/74,fddf:d8b5:f3ba:c98c:3940::/75,fddf:d8b5:f3ba:c98c:3960::/78,fddf:d8b5:f3ba:c98c:3964::/79,fddf:d8b5:f3ba:c98c:3968::/77,fddf:d8b5:f3ba:c98c:3970::/76,fddf:d8b5:f3ba:c98c:3980::/73,fddf:d8b5:f3ba:c98c:3a00::/71,fddf:d8b5:f3ba:c98c:3c00::/70,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/68,fddf:d8b5:f3ba:c98c:3000::/69,fddf:d8b5:f3ba:c98c:3800::/70,fddf:d8b5:f3ba:c98c:3c00::/71,fddf:d8b5:f3ba:c98c:3e00::/72,fddf:d8b5:f3ba:c98c:3f00::/74,fddf:d8b5:f3ba:c98c:3f40::/75,fddf:d8b5:f3ba:c98c:3f60::/79,fddf:d8b5:f3ba:c98c:3f64::/78,fddf:d8b5:f3ba:c98c:3f68::/77,fddf:d8b5:f3ba:c98c:3f70::/76,fddf:d8b5:f3ba:c98c:3f80::/73,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/71,fddf:d8b5:f3ba:c98c:4200::/74,fddf:d8b5:f3ba:c98c:4240::/76,fddf:d8b5:f3ba:c98c:4250::/77,fddf:d8b5:f3ba:c98c:4258::/78,fddf:d8b5:f3ba:c98c:425c::/79,fddf:d8b5:f3ba:c98c:4260::/75,fddf:d8b5:f3ba:c98c:4280::/73,fddf:d8b5:f3ba:c98c:4300::/72,fddf:d8b5:f3ba:c98c:4400::/70,fddf:d8b5:f3ba:c98c:4800::/69,fddf:d8b5:f3ba:c98c:5000::/68,fddf:d8b5:f3ba:c98c:6000::/67,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/68,fddf:d8b5:f3ba:c98c:7000::/71,fddf:d8b5:f3ba:c98c:7200::/72,fddf:d8b5:f3ba:c98c:7300::/73,fddf:d8b5:f3ba:c98c:7380::/74,fddf:d8b5:f3ba:c98c:73c0::/79,fddf:d8b5:f3ba:c98c:73c4::/78,fddf:d8b5:f3ba:c98c:73c8::/77,fddf:d8b5:f3ba:c98c:73d0::/76,fddf:d8b5:f3ba:c98c:73e0::/75,fddf:d8b5:f3ba:c98c:7400::/70,fddf:d8b5:f3ba:c98c:7800::/69,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/75,fddf:d8b5:f3ba:c98c:8020::/76,fddf:d8b5:f3ba:c98c:8030::/77,fddf:d8b5:f3ba:c98c:8038::/78,fddf:d8b5:f3ba:c98c:803e::/79,fddf:d8b5:f3ba:c98c:8040::/74,fddf:d8b5:f3ba:c98c:8080::/73,fddf:d8b5:f3ba:c98c:8100::/72,fddf:d8b5:f3ba:c98c:8200::/71,fddf:d8b5:f3ba:c98c:8400::/70,fddf:d8b5:f3ba:c98c:8800::/69,fddf:d8b5:f3ba:c98c:9000::/68,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/70,fddf:d8b5:f3ba:c98c:8400::/71,fddf:d8b5:f3ba:c98c:8600::/72,fddf:d8b5:f3ba:c98c:8700::/73,fddf:d8b5:f3ba:c98c:8780::/74,fddf:d8b5:f3ba:c98c:87c0::/75,fddf:d8b5:f3ba:c98c:87e0::/78,fddf:d8b5:f3ba:c98c:87e6::/79,fddf:d8b5:f3ba:c98c:87e8::/77,fddf:d8b5:f3ba:c98c:87f0::/76,fddf:d8b5:f3ba:c98c:8800::/69,fddf:d8b5:f3ba:c98c:9000::/68,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/69,fddf:d8b5:f3ba:c98c:8800::/73,fddf:d8b5:f3ba:c98c:8880::/74,fddf:d8b5:f3ba:c98c:88c0::/75,fddf:d8b5:f3ba:c98c:88e0::/78,fddf:d8b5:f3ba:c98c:88e6::/79,fddf:d8b5:f3ba:c98c:88e8::/77,fddf:d8b5:f3ba:c98c:88f0::/76,fddf:d8b5:f3ba:c98c:8900::/72,fddf:d8b5:f3ba:c98c:8a00::/71,fddf:d8b5:f3ba:c98c:8c00::/70,fddf:d8b5:f3ba:c98c:9000::/68,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/68,fddf:d8b5:f3ba:c98c:9000::/71,fddf:d8b5:f3ba:c98c:9200::/72,fddf:d8b5:f3ba:c98c:9300::/76,fddf:d8b5:f3ba:c98c:9310::/77,fddf:d8b5:f3ba:c98c:9318::/79,fddf:d8b5:f3ba:c98c:931c::/78,fddf:d8b5:f3ba:c98c:9320::/75,fddf:d8b5:f3ba:c98c:9340::/74,fddf:d8b5:f3ba:c98c:9380::/73,fddf:d8b5:f3ba:c98c:9400::/70,fddf:d8b5:f3ba:c98c:9800::/69,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/69,fddf:d8b5:f3ba:c98c:a800::/71,fddf:d8b5:f3ba:c98c:aa00::/72,fddf:d8b5:f3ba:c98c:ab00::/77,fddf:d8b5:f3ba:c98c:ab0a::/79,fddf:d8b5:f3ba:c98c:ab0c::/78,fddf:d8b5:f3ba:c98c:ab10::/76,fddf:d8b5:f3ba:c98c:ab20::/75,fddf:d8b5:f3ba:c98c:ab40::/74,fddf:d8b5:f3ba:c98c:ab80::/73,fddf:d8b5:f3ba:c98c:ac00::/70,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/69,fddf:d8b5:f3ba:c98c:a800::/70,fddf:d8b5:f3ba:c98c:ac00::/72,fddf:d8b5:f3ba:c98c:ad00::/74,fddf:d8b5:f3ba:c98c:ad40::/76,fddf:d8b5:f3ba:c98c:ad50::/79,fddf:d8b5:f3ba:c98c:ad54::/78,fddf:d8b5:f3ba:c98c:ad58::/77,fddf:d8b5:f3ba:c98c:ad60::/75,fddf:d8b5:f3ba:c98c:ad80::/73,fddf:d8b5:f3ba:c98c:ae00::/71,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/69,fddf:d8b5:f3ba:c98c:c800::/72,fddf:d8b5:f3ba:c98c:c900::/73,fddf:d8b5:f3ba:c98c:c980::/74,fddf:d8b5:f3ba:c98c:c9c2::/79,fddf:d8b5:f3ba:c98c:c9c4::/78,fddf:d8b5:f3ba:c98c:c9c8::/77,fddf:d8b5:f3ba:c98c:c9d0::/76,fddf:d8b5:f3ba:c98c:c9e0::/75,fddf:d8b5:f3ba:c98c:ca00::/71,fddf:d8b5:f3ba:c98c:cc00::/70,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/69,fddf:d8b5:f3ba:c98c:c800::/71,fddf:d8b5:f3ba:c98c:ca00::/73,fddf:d8b5:f3ba:c98c:ca80::/75,fddf:d8b5:f3ba:c98c:caa0::/76,fddf:d8b5:f3ba:c98c:cab0::/77,fddf:d8b5:f3ba:c98c:caba::/79,fddf:d8b5:f3ba:c98c:cabc::/78,fddf:d8b5:f3ba:c98c:cac0::/74,fddf:d8b5:f3ba:c98c:cb00::/72,fddf:d8b5:f3ba:c98c:cc00::/70,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/69,fddf:d8b5:f3ba:c98c:c800::/70,fddf:d8b5:f3ba:c98c:cc00::/71,fddf:d8b5:f3ba:c98c:ce00::/72,fddf:d8b5:f3ba:c98c:cf00::/73,fddf:d8b5:f3ba:c98c:cf80::/77,fddf:d8b5:f3ba:c98c:cf8a::/79,fddf:d8b5:f3ba:c98c:cf8c::/78,fddf:d8b5:f3ba:c98c:cf90::/76,fddf:d8b5:f3ba:c98c:cfa0::/75,fddf:d8b5:f3ba:c98c:cfc0::/74,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/68,fddf:d8b5:f3ba:c98c:d000::/69,fddf:d8b5:f3ba:c98c:d800::/70,fddf:d8b5:f3ba:c98c:dc00::/71,fddf:d8b5:f3ba:c98c:de00::/74,fddf:d8b5:f3ba:c98c:de40::/75,fddf:d8b5:f3ba:c98c:de60::/76,fddf:d8b5:f3ba:c98c:de72::/79,fddf:d8b5:f3ba:c98c:de74::/78,fddf:d8b5:f3ba:c98c:de78::/77,fddf:d8b5:f3ba:c98c:de80::/73,fddf:d8b5:f3ba:c98c:df00::/72,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/68,fddf:d8b5:f3ba:c98c:f000::/69,fddf:d8b5:f3ba:c98c:f800::/73,fddf:d8b5:f3ba:c98c:f880::/75,fddf:d8b5:f3ba:c98c:f8a0::/76,fddf:d8b5:f3ba:c98c:f8b0::/78,fddf:d8b5:f3ba:c98c:f8b4::/79,fddf:d8b5:f3ba:c98c:f8b8::/77,fddf:d8b5:f3ba:c98c:f8c0::/74,fddf:d8b5:f3ba:c98c:f900::/72,fddf:d8b5:f3ba:c98c:fa00::/71,fddf:d8b5:f3ba:c98c:fc00::/70 fddf:d8b5:f3ba:c98c::/64 fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:c000::/68,fddf:d8b5:f3ba:c98c:5800::/69,fddf:d8b5:f3ba:c98c:6800::/69,fddf:d8b5:f3ba:c98c:a800::/69,fddf:d8b5:f3ba:c98c:4400::/70,fddf:d8b5:f3ba:c98c:7400::/70,fddf:d8b5:f3ba:c98c:ac00::/70,fddf:d8b5:f3ba:c98c:d800::/70,fddf:d8b5:f3ba:c98c:dc00::/70,fddf:d8b5:f3ba:c98c:e800::/70,fddf:d8b5:f3ba:c98c:1e00::/71,fddf:d8b5:f3ba:c98c:2c00::/71,fddf:d8b5:f3ba:c98c:6200::/71,fddf:d8b5:f3ba:c98c:7a00::/71,fddf:d8b5:f3ba:c98c:8c00::/71,fddf:d8b5:f3ba:c98c:a600::/71,fddf:d8b5:f3ba:c98c:b800::/71,fddf:d8b5:f3ba:c98c:c000::/71,fddf:d8b5:f3ba:c98c:c600::/71,fddf:d8b5:f3ba:c98c:ca00::/71,fddf:d8b5:f3ba:c98c:ce00::/71,fddf:d8b5:f3ba:c98c:d600::/71,fddf:d8b5:f3ba:c98c:e000::/71,fddf:d8b5:f3ba:c98c:e800::/71,fddf:d8b5:f3ba:c98c:ea00::/71 fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/68,fddf:d8b5:f3ba:c98c:5000::/69,fddf:d8b5:f3ba:c98c:6000::/67,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/69,fddf:d8b5:f3ba:c98c:7000::/68,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/69,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/70,fddf:d8b5:f3ba:c98c:4800::/69,fddf:d8b5:f3ba:c98c:5000::/68,fddf:d8b5:f3ba:c98c:6000::/67,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/68,fddf:d8b5:f3ba:c98c:7000::/70,fddf:d8b5:f3ba:c98c:7800::/69,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/69,fddf:d8b5:f3ba:c98c:a800::/70,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/68,fddf:d8b5:f3ba:c98c:d000::/69,fddf:d8b5:f3ba:c98c:dc00::/70,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/68,fddf:d8b5:f3ba:c98c:d000::/69,fddf:d8b5:f3ba:c98c:d800::/70,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/69,fddf:d8b5:f3ba:c98c:ec00::/70,fddf:d8b5:f3ba:c98c:f000::/68;fddf:d8b5:f3ba:c98c::/68,fddf:d8b5:f3ba:c98c:1000::/69,fddf:d8b5:f3ba:c98c:1800::/70,fddf:d8b5:f3ba:c98c:1c00::/71,fddf:d8b5:f3ba:c98c:2000::/67,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/67,fddf:d8b5:f3ba:c98c:2000::/69,fddf:d8b5:f3ba:c98c:2800::/70,fddf:d8b5:f3ba:c98c:2e00::/71,fddf:d8b5:f3ba:c98c:3000::/68,fddf:d8b5:f3ba:c98c:4000::/66,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/71,fddf:d8b5:f3ba:c98c:6400::/70,fddf:d8b5:f3ba:c98c:6800::/69,fddf:d8b5:f3ba:c98c:7000::/68,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/66,fddf:d8b5:f3ba:c98c:4000::/67,fddf:d8b5:f3ba:c98c:6000::/68,fddf:d8b5:f3ba:c98c:7000::/69,fddf:d8b5:f3ba:c98c:7800::/71,fddf:d8b5:f3ba:c98c:7c00::/70,fddf:d8b5:f3ba:c98c:8000::/65;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/69,fddf:d8b5:f3ba:c98c:8800::/70,fddf:d8b5:f3ba:c98c:8e00::/71,fddf:d8b5:f3ba:c98c:9000::/68,fddf:d8b5:f3ba:c98c:a000::/67,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/70,fddf:d8b5:f3ba:c98c:a400::/71,fddf:d8b5:f3ba:c98c:a800::/69,fddf:d8b5:f3ba:c98c:b000::/68,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/67,fddf:d8b5:f3ba:c98c:a000::/68,fddf:d8b5:f3ba:c98c:b000::/69,fddf:d8b5:f3ba:c98c:ba00::/71,fddf:d8b5:f3ba:c98c:bc00::/70,fddf:d8b5:f3ba:c98c:c000::/66;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c200::/71,fddf:d8b5:f3ba:c98c:c400::/70,fddf:d8b5:f3ba:c98c:c800::/69,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/70,fddf:d8b5:f3ba:c98c:c400::/71,fddf:d8b5:f3ba:c98c:c800::/69,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/69,fddf:d8b5:f3ba:c98c:c800::/71,fddf:d8b5:f3ba:c98c:cc00::/70,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/69,fddf:d8b5:f3ba:c98c:c800::/70,fddf:d8b5:f3ba:c98c:cc00::/71,fddf:d8b5:f3ba:c98c:d000::/68,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/68,fddf:d8b5:f3ba:c98c:d000::/70,fddf:d8b5:f3ba:c98c:d400::/71,fddf:d8b5:f3ba:c98c:d800::/69,fddf:d8b5:f3ba:c98c:e000::/67;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e200::/71,fddf:d8b5:f3ba:c98c:e400::/70,fddf:d8b5:f3ba:c98c:e800::/69,fddf:d8b5:f3ba:c98c:f000::/68;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/69,fddf:d8b5:f3ba:c98c:ea00::/71,fddf:d8b5:f3ba:c98c:ec00::/70,fddf:d8b5:f3ba:c98c:f000::/68;fddf:d8b5:f3ba:c98c::/65,fddf:d8b5:f3ba:c98c:8000::/66,fddf:d8b5:f3ba:c98c:c000::/67,fddf:d8b5:f3ba:c98c:e000::/69,fddf:d8b5:f3ba:c98c:e800::/71,fddf:d8b5:f3ba:c98c:ec00::/70,fddf:d8b5:f3ba:c98c:f000::/68 10.10.2.0/24 10.10.2.128/26,10.10.2.192/26,10.10.2.64/28,10.10.2.224/28,10.10.2.0/29,10.10.2.8/29,10.10.2.24/29,10.10.2.64/29,10.10.2.152/29,10.10.2.200/29,10.10.2.208/29,10.10.2.92/30,10.10.2.228/30,10.10.2.244/30,10.10.2.12/31,10.10.2.38/31,10.10.2.48/31,10.10.2.68/31,10.10.2.90/31,10.10.2.114/31,10.10.2.134/31,10.10.2.140/31,10.10.2.148/31,10.10.2.170/31,10.10.2.172/31,10.10.2.182/31,10.10.2.188/31,10.10.2.196/31,10.10.2.234/31,10.10.2.250/31 10.10.2.0/25,10.10.2.192/26;10.10.2.0/25,10.10.2.128/26;10.10.2.0/26,10.10.2.80/28,10.10.2.96/27,10.10.2.128/25;10.10.2.0/25,10.10.2.128/26,10.10.2.192/27,10.10.2.240/28;10.10.2.8/29,10.10.2.16/28,10.10.2.32/27,10.10.2.64/26,10.10.2.128/25;10.10.2.0/29,10.10.2.16/28,10.10.2.32/27,10.10.2.64/26,10.10.2.128/25;10.10.2.0/28,10.10.2.16/29,10.10.2.32/27,10.10.2.64/26,10.10.2.128/25;10.10.2.0/26,10.10.2.72/29,10.10.2.80/28,10.10.2.96/27,10.10.2.128/25;10.10.2.0/25,10.10.2.128/28,10.10.2.144/29,10.10.2.160/27,10.10.2.192/26;10.10.2.0/25,10.10.2.128/26,10.10.2.192/29,10.10.2.208/28,10.10.2.224/27;10.10.2.0/25,10.10.2.128/26,10.10.2.192/28,10.10.2.216/29,10.10.2.224/27;10.10.2.0/26,10.10.2.64/28,10.10.2.80/29,10.10.2.88/30,10.10.2.96/27,10.10.2.128/25;10.10.2.0/25,10.10.2.128/26,10.10.2.192/27,10.10.2.224/30,10.10.2.232/29,10.10.2.240/28;10.10.2.0/25,10.10.2.128/26,10.10.2.192/27,10.10.2.224/28,10.10.2.240/30,10.10.2.248/29;10.10.2.0/29,10.10.2.8/30,10.10.2.14/31,10.10.2.16/28,10.10.2.32/27,10.10.2.64/26,10.10.2.128/25;10.10.2.0/27,10.10.2.32/30,10.10.2.36/31,10.10.2.40/29,10.10.2.48/28,10.10.2.64/26,10.10.2.128/25;10.10.2.0/27,10.10.2.32/28,10.10.2.50/31,10.10.2.52/30,10.10.2.56/29,10.10.2.64/26,10.10.2.128/25;10.10.2.0/26,10.10.2.64/30,10.10.2.70/31,10.10.2.72/29,10.10.2.80/28,10.10.2.96/27,10.10.2.128/25;10.10.2.0/26,10.10.2.64/28,10.10.2.80/29,10.10.2.88/31,10.10.2.92/30,10.10.2.96/27,10.10.2.128/25;10.10.2.0/26,10.10.2.64/27,10.10.2.96/28,10.10.2.112/31,10.10.2.116/30,10.10.2.120/29,10.10.2.128/25;10.10.2.0/25,10.10.2.128/30,10.10.2.132/31,10.10.2.136/29,10.10.2.144/28,10.10.2.160/27,10.10.2.192/26;10.10.2.0/25,10.10.2.128/29,10.10.2.136/30,10.10.2.142/31,10.10.2.144/28,10.10.2.160/27,10.10.2.192/26;10.10.2.0/25,10.10.2.128/28,10.10.2.144/30,10.10.2.150/31,10.10.2.152/29,10.10.2.160/27,10.10.2.192/26;10.10.2.0/25,10.10.2.128/27,10.10.2.160/29,10.10.2.168/31,10.10.2.172/30,10.10.2.176/28,10.10.2.192/26;10.10.2.0/25,10.10.2.128/27,10.10.2.160/29,10.10.2.168/30,10.10.2.174/31,10.10.2.176/28,10.10.2.192/26;10.10.2.0/25,10.10.2.128/27,10.10.2.160/28,10.10.2.176/30,10.10.2.180/31,10.10.2.184/29,10.10.2.192/26;10.10.2.0/25,10.10.2.128/27,10.10.2.160/28,10.10.2.176/29,10.10.2.184/30,10.10.2.190/31,10.10.2.192/26;10.10.2.0/25,10.10.2.128/26,10.10.2.192/30,10.10.2.198/31,10.10.2.200/29,10.10.2.208/28,10.10.2.224/27;10.10.2.0/25,10.10.2.128/26,10.10.2.192/27,10.10.2.224/29,10.10.2.232/31,10.10.2.236/30,10.10.2.240/28;10.10.2.0/25,10.10.2.128/26,10.10.2.192/27,10.10.2.224/28,10.10.2.240/29,10.10.2.248/31,10.10.2.252/30 10.0.0.0/8 10.64.0.0/12,10.176.0.0/12,10.40.0.0/13,10.128.0.0/13,10.32.0.0/14,10.36.0.0/14,10.44.0.0/14,10.52.0.0/14,10.112.0.0/14,10.120.0.0/14,10.136.0.0/14,10.152.0.0/14,10.160.0.0/14,10.164.0.0/14,10.208.0.0/14,10.224.0.0/14,10.30.0.0/15,10.32.0.0/15,10.100.0.0/15,10.104.0.0/15,10.128.0.0/15,10.138.0.0/15,10.160.0.0/15,10.164.0.0/15,10.190.0.0/15,10.230.0.0/15,10.232.0.0/15,10.236.0.0/15 10.0.0.0/10,10.80.0.0/12,10.96.0.0/11,10.128.0.0/9;10.0.0.0/9,10.128.0.0/11,10.160.0.0/12,10.192.0.0/10;10.0.0.0/11,10.32.0.0/13,10.48.0.0/12,10.64.0.0/10,10.128.0.0/9;10.0.0.0/9,10.136.0.0/13,10.144.0.0/12,10.160.0.0/11,10.192.0.0/10;10.0.0.0/11,10.36.0.0/14,10.40.0.0/13,10.48.0.0/12,10.64.0.0/10,10.128.0.0/9;10.0.0.0/11,10.32.0.0/14,10.40.0.0/13,10.48.0.0/12,10.64.0.0/10,10.128.0.0/9;10.0.0.0/11,10.32.0.0/13,10.40.0.0/14,10.48.0.0/12,10.64.0.0/10,10.128.0.0/9;10.0.0.0/11,10.32.0.0/12,10.48.0.0/14,10.56.0.0/13,10.64.0.0/10,10.128.0.0/9;10.0.0.0/10,10.64.0.0/11,10.96.0.0/12,10.116.0.0/14,10.120.0.0/13,10.128.0.0/9;10.0.0.0/10,10.64.0.0/11,10.96.0.0/12,10.112.0.0/13,10.124.0.0/14,10.128.0.0/9;10.0.0.0/9,10.128.0.0/13,10.140.0.0/14,10.144.0.0/12,10.160.0.0/11,10.192.0.0/10;10.0.0.0/9,10.128.0.0/12,10.144.0.0/13,10.156.0.0/14,10.160.0.0/11,10.192.0.0/10;10.0.0.0/9,10.128.0.0/11,10.164.0.0/14,10.168.0.0/13,10.176.0.0/12,10.192.0.0/10;10.0.0.0/9,10.128.0.0/11,10.160.0.0/14,10.168.0.0/13,10.176.0.0/12,10.192.0.0/10;10.0.0.0/9,10.128.0.0/10,10.192.0.0/12,10.212.0.0/14,10.216.0.0/13,10.224.0.0/11;10.0.0.0/9,10.128.0.0/10,10.192.0.0/11,10.228.0.0/14,10.232.0.0/13,10.240.0.0/12;10.0.0.0/12,10.16.0.0/13,10.24.0.0/14,10.28.0.0/15,10.32.0.0/11,10.64.0.0/10,10.128.0.0/9;10.0.0.0/11,10.34.0.0/15,10.36.0.0/14,10.40.0.0/13,10.48.0.0/12,10.64.0.0/10,10.128.0.0/9;10.0.0.0/10,10.64.0.0/11,10.96.0.0/14,10.102.0.0/15,10.104.0.0/13,10.112.0.0/12,10.128.0.0/9;10.0.0.0/10,10.64.0.0/11,10.96.0.0/13,10.106.0.0/15,10.108.0.0/14,10.112.0.0/12,10.128.0.0/9;10.0.0.0/9,10.130.0.0/15,10.132.0.0/14,10.136.0.0/13,10.144.0.0/12,10.160.0.0/11,10.192.0.0/10;10.0.0.0/9,10.128.0.0/13,10.136.0.0/15,10.140.0.0/14,10.144.0.0/12,10.160.0.0/11,10.192.0.0/10;10.0.0.0/9,10.128.0.0/11,10.162.0.0/15,10.164.0.0/14,10.168.0.0/13,10.176.0.0/12,10.192.0.0/10;10.0.0.0/9,10.128.0.0/11,10.160.0.0/14,10.166.0.0/15,10.168.0.0/13,10.176.0.0/12,10.192.0.0/10;10.0.0.0/9,10.128.0.0/11,10.160.0.0/12,10.176.0.0/13,10.184.0.0/14,10.188.0.0/15,10.192.0.0/10;10.0.0.0/9,10.128.0.0/10,10.192.0.0/11,10.224.0.0/14,10.228.0.0/15,10.232.0.0/13,10.240.0.0/12;10.0.0.0/9,10.128.0.0/10,10.192.0.0/11,10.224.0.0/13,10.234.0.0/15,10.236.0.0/14,10.240.0.0/12;10.0.0.0/9,10.128.0.0/10,10.192.0.0/11,10.224.0.0/13,10.232.0.0/14,10.238.0.0/15,10.240.0.0/12 capirca-2.0.9/tests/utils/iputils_test.py000066400000000000000000000017461437377527500205640ustar00rootroot00000000000000import pytest import pathlib from capirca.utils import iputils from capirca.lib import nacaddr file_directory = pathlib.Path(__file__).parent.absolute() exclude_address_testcases = [] with open(str(file_directory)+"/address_exclude_test_cases.txt", 'r') as f: for line in f: ipstr, exstrs, restrs = line.strip().split(' ') ip = nacaddr.IP(ipstr) exclude_ips = list(map(nacaddr.IP, exstrs.split(','))) expected_results = [] for i in restrs.split(';'): result_strings = i.split(',') ip_map = map(nacaddr.IP, result_strings) ip_list = list(ip_map) expected_results.append(ip_list) for ex, res in zip(exclude_ips, expected_results): exclude_address_testcases.append((ip, ex, res)) class TestIPUtils: @pytest.mark.unit @pytest.mark.parametrize("ip,exclude,expected", exclude_address_testcases) def test_exclude_address(self, ip, exclude, expected): result = iputils.exclude_address(ip, exclude) assert list(result) == expected capirca-2.0.9/tools/000077500000000000000000000000001437377527500143105ustar00rootroot00000000000000capirca-2.0.9/tools/__init__.py000066400000000000000000000000001437377527500164070ustar00rootroot00000000000000capirca-2.0.9/tools/cgrep.py000066400000000000000000000357511437377527500157750ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Simple util to grep through network and service definitions. Examples: To find out which tokens contain "10.4.3.1" use $ cgrep.py -i 10.4.3.1 To find out if token 'FOO' includes ip "1.2.3.4" use $ cgrep.py -t FOO -i 1.2.3.4 To find the difference and union of tokens 'FOO' and 'BAR' use $ cgrep.py -c FOO BAR To find the difference of network tokens to which 2 IPs belong use $ cgrep.py -g 1.1.1.1 2.2.2.2 To find which IPs are in the 'FOO' network token use $ cgrep.py -o FOO To find which port & protocol pairs are in a service token 'FOO' use $ cgrep.py -s FOO To find which service tokens contain port '22' and protocol 'tcp' use $ cgrep.py -p 22 tcp """ import argparse import pprint import sys from absl import app from absl import logging from capirca.lib import nacaddr from capirca.lib import naming def is_valid_ip(arg): """Validates a value to be an IP or not. Args: arg: potential IP address as a string. Returns: arg as IP object (if arg is an IP) Raises: Error (if arg is not an IP) """ try: nacaddr.IP(arg) except: raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg) return arg def cli_options(): """Builds the argparse options for cgrep. TODO(robankeny): Move this to flags. Returns: parser: the arguments, ready to be parsed. """ parser = argparse.ArgumentParser( description='c[apirca]grep', formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument('-d', '--def', dest='defs', help='Network Definitions directory location. \n', default='./def') # -i and -t can be used together, but not with any other option. ip_group = parser.add_argument_group() # take 1 or more IPs ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip, help='Return list of definitions containing the ' 'IP(s).\nMultiple IPs permitted.') ip_group.add_argument('-t', '--token', dest='token', help=('See if an IP is contained within the given ' 'token.\nMust be used in conjunction with ' '-i/--ip [addr].')) exclusive_group = parser.add_mutually_exclusive_group() # the rest of the arguments are mutually exclusive with each other, # and -i / -t exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2, metavar=('OBJ', 'OBJ'), help=('Compare the two given network ' 'definition tokens')) exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2, type=is_valid_ip, metavar=('IP', 'IP'), help=('Diff the network objects to' ' which the given IP(s) belong')) exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+', help=('Return list of IP(s) contained within ' 'the given token(s)')) exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+', help=('Return list of port(s) contained ' 'within given token(s)')) exclusive_group.add_argument('-p', '--port', dest='port', nargs=2, metavar=('PORT', 'PROTO'), help=('Returns a list of tokens containing ' 'the given port and protocol')) return parser def main(argv): """Determines the code path based on the arguments passed.""" del argv # Unused. parser = cli_options() options = parser.parse_args() db = naming.Naming(options.defs) p = pprint.PrettyPrinter(indent=1, depth=4, width=1).pprint # if -i and any other option: if options.ip and any([options.gmp, options.cmp, options.obj, options.svc, options.port]): logging.info('You can only use -i with -t or by itself') # if -i and -t elif options.token and options.ip: try: get_nets([options.token], db) except naming.UndefinedAddressError: logging.info("Network group '%s' is not defined!", options.token) else: results = compare_ip_token(options, db) logging.info(results) # if -t, but not -i; invalid! elif options.token and not options.ip: logging.info('You must specify an IP Address with -i [addr]') # if -i elif options.ip: for ip in options.ip: groups = get_ip_parents(ip, db) logging.info('Results for IP: %s', ip) # iterate and print the tokens we found. for name, networks in groups: # print the group name [0], and the networks it was in [1] logging.info('%s %s', name, networks) elif options.gmp: common, diff1, diff2 = group_diff(options, db) print_diff(options.gmp[0], common, diff1, diff2) logging.info('') print_diff(options.gmp[1], common, diff2, diff1) # if -c elif options.cmp: meta, results = compare_tokens(options, db) first_name = meta[0] second_name = meta[1] union = meta[2] logging.info('Union of %s and %s:\n %s\n', first_name, second_name, union) logging.info('Diff of %s and %s:', first_name, second_name) for i in results: logging.info(' %s', i) logging.info('') first_obj, sec_obj = options.cmp if check_encapsulated('network', first_obj, sec_obj, db): logging.info('%s fully encapsulates %s', sec_obj, first_obj) else: logging.info('%s does _not_ fully encapsulate %s', sec_obj, first_obj) # check the other way around. if check_encapsulated('network', sec_obj, first_obj, db): logging.info('%s fully encapsulates %s', first_obj, sec_obj) else: logging.info('%s does _not_ fully encapsulate %s', first_obj, sec_obj) # if -o elif options.obj: for obj in options.obj: try: token, ips = get_nets([obj], db)[0] except naming.UndefinedAddressError: logging.info('%s is an invalid object', obj) else: logging.info('%s:', token) # convert list of ip objects to strings and sort them ips.sort(key=lambda x: int(x.ip)) p([str(x) for x in ips]) # if -s elif options.svc: try: results = get_ports(options.svc, db) except naming.UndefinedServiceError: logging.info('%s contains an invalid service object', str(options.svc)) else: for result in get_ports(options.svc, db): svc, port = result logging.info('%s:', svc) p(port) # if -p elif options.port: port, protocol, result = get_services(options, db) logging.info('%s/%s:', port, protocol) p(result) # if nothing is passed elif not any((options.cmp, options.ip, options.token, options.obj, options.svc, options.port)): parser.print_help() logging.info('') def check_encapsulated(obj_type, first_obj, second_obj, db): """Checks if a network/service object is entirely contained within another. Args: obj_type: "network" or "service" first_obj: The name of the first network/service object second_obj: The name of the secondnetwork/service object db: The network and service definitions Returns: Error or bool: ValueError if an invalid object type is passed True if the first_obj is entirely within second_obj, otherwise False Raises: ValueError: When value is not a network or service. """ if obj_type == 'network': # the indexing is to get the list of networks out of the tuple[1] and # list[0] returned by get_nets first = get_nets([first_obj], db)[0][1] second = get_nets([second_obj], db)[0][1] elif obj_type == 'service': first = get_ports([first_obj], db)[0][1] second = get_ports([second_obj], db)[0][1] else: raise ValueError("check_encapsulated() currently only supports " "'network' and 'service' for the obj_type parameter") # iterates over each object in the first group, and then each obj in the # second group, making sure each one in the first is contained # somewhere in the second. for obj in first: for sec_obj in second: if obj.version == sec_obj.version: if obj.subnet_of(sec_obj): break # if we got through every object in the second group, and didn't have # a match, then the first group is not entirely contained. else: return False # if we got here, then the group was fully contained. return True def print_diff(ip, common, diff1, diff2): """Print out the common, added, and removed network objects between 2 IPs. Args: ip: the IP being compared against common: the network objects shared between the two IPs ('ip' and the other passed into options.cmp) diff1: the network objects present in 'ip' but not in the other IP passed into options.cmp diff2: the network objects not present in 'ip' but are present in the other IP passed into options.cmp """ logging.info('IP: %s', ip) if common: common = [' {0}'.format(elem) for elem in common] logging.info('\n'.join(common)) if diff1: diff = ['+ {0}'.format(elem) for elem in diff1] logging.info('\n'.join(diff)) if diff2: diff = ['- {0}'.format(elem) for elem in diff2] logging.info('\n'.join(diff)) def group_diff(options, db): """Diffs two different group objects. Args: options: the options sent to the script db : network and service definitions Returns: tuple: the common lines, the differences from 1 to 2, and the differences from 2 to 1 """ nested_rvals = [] for ip in options.gmp: nested_rvals.append(get_ip_parents(ip, db)) # get just the list of groups, stripping out the networks. group1 = [x[0] for x in nested_rvals[0]] group2 = [x[0] for x in nested_rvals[1]] common = sorted(list(set(group1) & set(group2))) diff1 = sorted(list(set(group1) - set(group2))) diff2 = sorted(list(set(group2) - set(group1))) return common, diff1, diff2 def get_ip_parents(ip, db): """Gets a list of all network objects that include an IP. Args: ip: the IP we're looking for the parents of db: network and service definitions Returns: results: a list of all groups that include the IP, in the format: [("Group", ["networks", "matched"]), (etc)] """ results = [] rval = db.GetIpParents(ip) for v in rval: nested = db.GetNetParents(v) prefix_and_nets = get_nets_and_highest_prefix(ip, v, db) if nested: for n in nested: results.append(('%s -> %s' % (n, v), prefix_and_nets)) else: results.append((v, prefix_and_nets)) # sort the results by prefix length descending results = sorted(results, key=lambda x: x[1][0], reverse=True) # strip out the no longer needed prefix lengths before handing off for index, group in enumerate(results): results[index] = (group[0], group[1][1]) return results def get_nets_and_highest_prefix(ip, net_group, db): """Find the highest prefix length in all networks given it contains the IP. Args: ip: the IP address contained in net_group net_group: the name of the network object we'll be looking through db: network and service definitions Returns: highest_prefix_length, networks as tuple highest_prefix_length : the longest prefix length found, networks : network objects """ highest_prefix_length = 0 networks = [] ip = nacaddr.IP(ip) # loop through all the networks in the net_group for net in get_nets([net_group], db)[0][1]: # find the highest prefix length for the networks that contain the IP if ip.version == net.version: if ip.subnet_of(net): networks.append(str(net)) if net.prefixlen > highest_prefix_length: highest_prefix_length = net.prefixlen return highest_prefix_length, networks def get_nets(objects, db): """Gets a list of all networks that are inside of a network object. Args: objects: network objects db: network and service definitions Returns: results : all networks inside a network object """ results = [] for obj in objects: net = db.GetNet(obj) results.append((obj, net)) return results def compare_tokens(options, db): """Compares to network objects against each other. Args: options: the options sent to the script db: network and service definitions Returns: meta, results : ((first object, second object, union of those two), diff of those two network objects) """ t1, t2 = options.cmp d1 = db.GetNet(t1) d2 = db.GetNet(t2) union = list(set(d1 + d2)) meta = (t1, t2, union) results = [] for el in set(d1 + d2): el = nacaddr.IP(el) if el in d1 and el in d2: results.append(str(el)) elif el in d1: results.append(str(el)) elif el in d2: results.append(str(el)) return meta, results def compare_ip_token(options, db): """Looks to see if a network IP is contained in a network object. Args: options: the options sent to the script db: network and service definitions Returns: results : end-user string stating the results """ token = options.token results = [] for ip in options.ip: rval = db.GetIpParents(ip) if token in rval: results = '%s is in %s' % (ip, token) else: results = '%s is _not_ in %s' % (ip, token) return results def get_ports(svc_group, db): """Gets the ports and protocols defined in a service group. Args: svc_group: a list of strings for each service group db: network and service definitions Returns: results: a list of tuples for each service defined, in the format: (service name, "/") """ results = [] for svc in svc_group: port = db.GetService(svc) results.append((svc, port)) return results def get_services(options, db): """Finds any services with that include a specific port/protocol pair. Args: options: the options sent to the script db: network and service definitions Returns: port, protocol, results as tuple in the format: (port, protocol, list of the services containing this pair) """ results = [] port, protocol = options.port # swap values if they were passed in wrong order if port.isalpha() and protocol.isdigit(): port, protocol = protocol, port results = db.GetPortParents(port, protocol) return port, protocol, results if __name__ == '__main__': app.run(main, argv=sys.argv[:1]) capirca-2.0.9/tools/current_lint_errors.txt000066400000000000000000002605461437377527500211720ustar00rootroot00000000000000************* Module aclcheck_cmdline C0103: 30 main: Invalid variable name "_parser"[invalid-name] C0103: 45 main: Invalid variable name "FLAGS"[invalid-name] ************* Module aclgen W0212: 124 : Access to a protected member _deepcopy_dispatch of a client class[protected-access] R0204: 246 RenderFile: Redefinition of acl_obj type from lib.juniper.Juniper to lib.junipersrx.JuniperSRX[redefined-variable-type] C0103: 122 _deepcopy_method: Invalid function name "_deepcopy_method"[invalid-name] C0411: 54 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module cgrep C0325: 135 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 142 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 145 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 149 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 155 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 159 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 164 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 170 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 171 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 173 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 174 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 177 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 179 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 182 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 184 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 192 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 194 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 204 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 208 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 214 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 221 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 277 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 280 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 283 : Unnecessary parens after 'print' keyword[superfluous-parens] C0325: 286 : Unnecessary parens after 'print' keyword[superfluous-parens] C0111: 1 : Missing module docstring[missing-docstring] ************* Module definate C0111: 1 : Missing module docstring[missing-docstring] ************* Module definate.yaml_validator C0123: 47 YamlValidator.CheckConfigurationItem: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 71 YamlValidator.CheckConfiguration: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 74 YamlValidator.CheckConfiguration: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 78 YamlValidator.CheckConfiguration: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 80 YamlValidator.CheckConfiguration: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] ************* Module lib.aclcheck C0123: 112 AclCheck.__init__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] W0622: 181 AclCheck.ActionMatch: Redefining built-in 'next'[redefined-builtin] W0622: 195 AclCheck.DescribeMatches: Redefining built-in 'next'[redefined-builtin] W0622: 203 AclCheck.__str__: Redefining built-in 'next'[redefined-builtin] W0622: 251 AclCheck._AddrInside: Redefining built-in 'next'[redefined-builtin] ************* Module lib.aclgenerator C0200: 428 WrapWords: Consider using enumerate instead of iterating with range and len[consider-using-enumerate] ************* Module lib.ciscoasa C0305: 459 : Trailing newlines[trailing-newlines] W1401: 392 : Anomalous backslash in string: '\s'. String constant might be missing an r prefix.[anomalous-backslash-in-string] W0231: 62 Term.__init__: __init__ method from base class 'Term' is not called[super-init-not-called] W0622: 100 Term.__str__: Redefining built-in 'next'[redefined-builtin] C0123: 178 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 179 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] R0101: 169 Term.__str__: Too many nested blocks (8/5)[too-many-nested-blocks] C0123: 182 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 183 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] R0101: 169 Term.__str__: Too many nested blocks (8/5)[too-many-nested-blocks] C0103: 199 Term._TermPortToProtocol: Invalid argument name "portNumber"[invalid-name] C0111: 199 Term._TermPortToProtocol: Missing method docstring[missing-docstring] C0103: 200 Term._TermPortToProtocol: Invalid variable name "_ASA_PORTS_TCP"[invalid-name] C0103: 248 Term._TermPortToProtocol: Invalid variable name "_ASA_PORTS_UDP"[invalid-name] C0103: 281 Term._TermPortToProtocol: Invalid variable name "_ASA_TYPES_ICMP"[invalid-name] C0123: 332 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 332 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 337 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 337 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 343 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 343 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 348 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 348 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] W0612: 417 CiscoASA._TranslatePolicy: Unused variable 'filter_options'[unused-variable] W0612: 437 CiscoASA.__str__: Unused variable 'target_header'[unused-variable] ************* Module lib.cisco C0123: 122 TermStandard.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] R0101: 228 ObjectGroup.__str__: Too many nested blocks (6/5)[too-many-nested-blocks] E0602: 513 Term.__str__: Undefined variable 'ExtendedAclTermError'[undefined-variable] C0123: 563 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 563 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 568 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 568 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 574 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 574 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 579 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 579 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 585 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 587 Term._TermletToStr: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0411: 29 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.demo W0231: 42 Term.__init__: __init__ method from base class 'Term' is not called[super-init-not-called] C0111: 137 Term._Group: Missing method docstring[missing-docstring] ************* Module lib.iptables R0204: 103 Term.__init__: Redefinition of self._all_ips type from lib.nacaddr.IPv6 to lib.nacaddr.IPv4[redefined-variable-type] R0101: 672 Iptables._TranslatePolicy: Too many nested blocks (6/5)[too-many-nested-blocks] C0411: 26 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.juniper C0111: 639 Term._Comment: Missing method docstring[missing-docstring] C0411: 27 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.junipersrx R0204: 136 Term.__str__: Redefinition of daddr_check type from list to set[redefined-variable-type] R0101: 626 JuniperSRX._GenerateAddressBook: Too many nested blocks (6/5)[too-many-nested-blocks] R0204: 651 JuniperSRX._GenerateAddressBook: Redefinition of address_book_groups_dict type from dict to collections.OrderedDict[redefined-variable-type] C0111: 696 JuniperSRX._GenerateApplications: Missing method docstring[missing-docstring] R0101: 701 JuniperSRX._GenerateApplications: Too many nested blocks (8/5)[too-many-nested-blocks] R0101: 701 JuniperSRX._GenerateApplications: Too many nested blocks (7/5)[too-many-nested-blocks] C0411: 28 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.nacaddr W0212: 212 CollapseAddrList: Access to a protected member _get_networks_key of a client class[protected-access] W0212: 212 CollapseAddrList: Access to a protected member _BaseNet of a client class[protected-access] W0212: 217 SortAddrList: Access to a protected member _get_networks_key of a client class[protected-access] W0212: 217 SortAddrList: Access to a protected member _BaseNet of a client class[protected-access] C0103: 268 : Invalid constant name "ExcludeAddrs"[invalid-name] ************* Module lib.naming C0123: 159 Naming.GetIpParents: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 159 Naming.GetIpParents: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 163 Naming.GetIpParents: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 163 Naming.GetIpParents: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] W0622: 413 Naming.GetNet: Redefining built-in 'next'[redefined-builtin] R0101: 560 Naming._ParseLine: Too many nested blocks (6/5)[too-many-nested-blocks] ************* Module lib.nftables R0204: 70 Term.__init__: Redefinition of self.all_ips type from lib.nacaddr.IPv6 to lib.nacaddr.IPv4[redefined-variable-type] C0111: 186 Term._FormatMatch: Missing method docstring[missing-docstring] C0411: 32 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.nsxv W0231: 105 Term.__init__: __init__ method from base class 'Term' is not called[super-init-not-called] C0123: 258 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 269 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 285 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 296 Term.__str__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0411: 23 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.packetfilter R0204: 177 Term.__str__: Redefinition of source_port type from list to str[redefined-variable-type] R0204: 179 Term.__str__: Redefinition of destination_port type from list to str[redefined-variable-type] C0111: 326 Term._GenerateProtoStatement: Missing method docstring[missing-docstring] C0111: 340 Term._GenerateAddrStatement: Missing method docstring[missing-docstring] C0411: 25 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.pcap C0111: 239 Term._GenerateAddrStatement: Missing method docstring[missing-docstring] C0111: 265 Term._GeneratePortStatement: Missing method docstring[missing-docstring] C0111: 278 Term._GenerateTcpOptions: Missing method docstring[missing-docstring] C0411: 34 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.policy W0110: 860 Term.GetAddressOfVersion: map/filter on lambda could be replaced by comprehension[deprecated-lambda] C0123: 876 Term.AddObject: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:1333 Header.AddObject: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] W0110:1349 Header.platforms: map/filter on lambda could be replaced by comprehension[deprecated-lambda] C0123:1630 p_target: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:1649 p_header_spec: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:1669 p_terms: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:1714 p_term_spec: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:1786 p_one_or_more_dscps: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:2018 p_one_or_more_strings: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:2030 p_one_or_more_ints: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123:2044 p_strings_or_ints: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] R0204:2196 : Redefinition of ret type from int to bool[redefined-variable-type] C0103: 106 TranslatePorts: Invalid function name "TranslatePorts"[invalid-name] C0103:2072 _ReadFile: Invalid function name "_ReadFile"[invalid-name] C0103:2096 _Preprocess: Invalid function name "_Preprocess"[invalid-name] C0103:2131 ParseFile: Invalid function name "ParseFile"[invalid-name] C0103:2153 ParsePolicy: Invalid function name "ParsePolicy"[invalid-name] C0411: 31 : standard import "import logging" comes before "from lib import nacaddr"[wrong-import-order] ************* Module lib.policyreader W0622: 202 Policy.Matches: Redefining built-in 'next'[redefined-builtin] ************* Module lib.policy_simple C0123: 44 Field.__eq__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 391 Block.__eq__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 523 BlankLine.__eq__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 539 CommentLine.__eq__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] C0123: 557 Include.__eq__: Using type() instead of isinstance() for a typecheck.[unidiomatic-typecheck] ************* Module lib.port C0325: 100 : Unnecessary parens after 'return' keyword[superfluous-parens] W0702: 108 PPP.__lt__: No exception type(s) specified[bare-except] W0702: 117 PPP.__gt__: No exception type(s) specified[bare-except] W0702: 126 PPP.__le__: No exception type(s) specified[bare-except] W0702: 135 PPP.__ge__: No exception type(s) specified[bare-except] W0702: 147 PPP.__eq__: No exception type(s) specified[bare-except] ************* Module lib.setup C0111: 1 : Missing module docstring[missing-docstring] E0602: 25 : Undefined variable 'ipaddr'[undefined-variable] W0611: 20 : Unused import capirca[unused-import] ************* Module lib.windows_advfirewall C0200: 73 Term._HandleIcmpTypes: Consider using enumerate instead of iterating with range and len[consider-using-enumerate] C0122: 102 Term._ComposeRule: Comparison should be self.filter.lower() == 'in'[misplaced-comparison-constant] C0411: 22 : standard import "import logging" comes before "from lib import windows"[wrong-import-order] ************* Module lib.windows_ipsec C0411: 21 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module lib.windows R0204: 60 Term.__init__: Redefinition of self._all_ips type from lib.nacaddr.IPv6 to lib.nacaddr.IPv4[redefined-variable-type] R0101: 254 WindowsGenerator._TranslatePolicy: Too many nested blocks (6/5)[too-many-nested-blocks] C0411: 22 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module setup C0330: 49 : Wrong hanging indentation (add 2 spaces). 'python-gflags', 'ply', 'ipaddr', 'mock'] ^ |[bad-continuation] C0111: 1 : Missing module docstring[missing-docstring] ************* Module tests.cgrep_test C0330: 266 : Wrong continued indentation (add 1 space). ('GOOGLE_DNS -> GOOGLE_PUBLIC_DNS_ANYCAST', ^|[bad-continuation] C0111: 214 CgrepTest: Missing class docstring[missing-docstring] C0111: 332 CgrepTest.test_compare_same_token: Missing method docstring[missing-docstring] C0111: 384 CgrepTest.test_group_diff: Missing method docstring[missing-docstring] C0111: 397 CgrepTest.test_group_diff_identical: Missing method docstring[missing-docstring] C0111: 414 CgrepTest.test_token_to_ips: Missing method docstring[missing-docstring] C0111: 434 CgrepTest.test_token_to_ip_fail: Missing method docstring[missing-docstring] C0111: 456 CgrepTest.test_svc_to_port: Missing method docstring[missing-docstring] C0111: 472 CgrepTest.test_svc_to_port_fail: Missing method docstring[missing-docstring] ************* Module tests.integration.aclgen_test C0111: 1 : Missing module docstring[missing-docstring] C0111: 52 TestAclGenDemo.test_smoke_test_generates_successfully: Missing method docstring[missing-docstring] C0111: 87 TestAclGenDemo.test_generate_single_policy: Missing method docstring[missing-docstring] C0111: 140 AclGenArgumentsTests.test_missing_defs_folder_raises_error: Missing method docstring[missing-docstring] C0111: 160 AclGenCharacterizationTests.test_characterization: Missing method docstring[missing-docstring] ************* Module tests.lib.aclcheck_test C0111: 63 AclCheckTest: Missing class docstring[missing-docstring] C0103: 77 AclCheckTest.testExactMatches: Invalid method name "testExactMatches"[invalid-name] C0103: 83 AclCheckTest.testAclCheck: Invalid method name "testAclCheck"[invalid-name] C0111: 83 AclCheckTest.testAclCheck: Missing method docstring[missing-docstring] C0103: 115 AclCheckTest.testExceptions: Invalid method name "testExceptions"[invalid-name] C0111: 115 AclCheckTest.testExceptions: Missing method docstring[missing-docstring] ************* Module tests.lib.aclgenerator_test C0111: 90 ACLGeneratorTest: Missing class docstring[missing-docstring] C0103: 95 ACLGeneratorTest.testEstablishedNostate: Invalid method name "testEstablishedNostate"[invalid-name] C0103: 105 ACLGeneratorTest.testSupportedAF: Invalid method name "testSupportedAF"[invalid-name] C0103: 114 ACLGeneratorTest.testTermNameBelowLimit: Invalid method name "testTermNameBelowLimit"[invalid-name] C0103: 130 ACLGeneratorTest.testLongTermAbbreviation: Invalid method name "testLongTermAbbreviation"[invalid-name] C0103: 141 ACLGeneratorTest.testTermNameTruncation: Invalid method name "testTermNameTruncation"[invalid-name] C0103: 151 ACLGeneratorTest.testLongTermName: Invalid method name "testLongTermName"[invalid-name] C0103: 161 ACLGeneratorTest.testAddRepositoryTags: Invalid method name "testAddRepositoryTags"[invalid-name] C0111: 161 ACLGeneratorTest.testAddRepositoryTags: Missing method docstring[missing-docstring] ************* Module tests.lib.arista_test C0103: 51 AristaTest.testExtendedEosSyntax: Invalid method name "testExtendedEosSyntax"[invalid-name] ************* Module tests.lib.aruba_test C0111: 72 ArubaTest: Missing class docstring[missing-docstring] C0103: 77 ArubaTest.testNetdestination: Invalid method name "testNetdestination"[invalid-name] C0103: 88 ArubaTest.testNetdestination6: Invalid method name "testNetdestination6"[invalid-name] C0103: 99 ArubaTest.testActionUnsupported: Invalid method name "testActionUnsupported"[invalid-name] C0111: 99 ArubaTest.testActionUnsupported: Missing method docstring[missing-docstring] ************* Module tests.lib.brocade_test C0111: 43 BrocadeTest: Missing class docstring[missing-docstring] C0103: 48 BrocadeTest.testTcpEstablished: Invalid method name "testTcpEstablished"[invalid-name] C0103: 54 BrocadeTest.testNoTermRemark: Invalid method name "testNoTermRemark"[invalid-name] ************* Module tests.lib.cisco_test C0330: 313 : Wrong hanging indentation (remove 4 spaces). 'CONSECUTIVE_PORTS', 'tcp') | ^[bad-continuation] C0111: 270 CiscoTest: Missing class docstring[missing-docstring] C0103: 275 CiscoTest.testIPVersion: Invalid method name "testIPVersion"[invalid-name] C0103: 286 CiscoTest.testOptions: Invalid method name "testOptions"[invalid-name] C0111: 286 CiscoTest.testOptions: Missing method docstring[missing-docstring] C0103: 300 CiscoTest.testExpandingConsequtivePorts: Invalid method name "testExpandingConsequtivePorts"[invalid-name] C0111: 300 CiscoTest.testExpandingConsequtivePorts: Missing method docstring[missing-docstring] C0103: 315 CiscoTest.testDSCP: Invalid method name "testDSCP"[invalid-name] C0103: 321 CiscoTest.testTermAndFilterName: Invalid method name "testTermAndFilterName"[invalid-name] C0103: 327 CiscoTest.testRemark: Invalid method name "testRemark"[invalid-name] C0111: 327 CiscoTest.testRemark: Missing method docstring[missing-docstring] C0103: 344 CiscoTest.testTcpEstablished: Invalid method name "testTcpEstablished"[invalid-name] C0103: 350 CiscoTest.testLogging: Invalid method name "testLogging"[invalid-name] C0103: 356 CiscoTest.testVerbatimTerm: Invalid method name "testVerbatimTerm"[invalid-name] C0103: 364 CiscoTest.testBadStandardTerm: Invalid method name "testBadStandardTerm"[invalid-name] C0103: 373 CiscoTest.testStandardTermHost: Invalid method name "testStandardTermHost"[invalid-name] C0103: 384 CiscoTest.testStandardTermNet: Invalid method name "testStandardTermNet"[invalid-name] C0103: 395 CiscoTest.testNamedStandard: Invalid method name "testNamedStandard"[invalid-name] C0103: 406 CiscoTest.testNoIPv6InOutput: Invalid method name "testNoIPv6InOutput"[invalid-name] C0103: 416 CiscoTest.testStandardFilterName: Invalid method name "testStandardFilterName"[invalid-name] C0103: 426 CiscoTest.testStandardFilterRange: Invalid method name "testStandardFilterRange"[invalid-name] C0103: 436 CiscoTest.testActionsSupport: Invalid method name "testActionsSupport"[invalid-name] W0212: 437 CiscoTest.testActionsSupport: Access to a protected member _ACTION_TABLE of a client class[protected-access] C0103: 440 CiscoTest.testObjectGroup: Invalid method name "testObjectGroup"[invalid-name] C0111: 440 CiscoTest.testObjectGroup: Missing method docstring[missing-docstring] C0103: 477 CiscoTest.testInet6: Invalid method name "testInet6"[invalid-name] C0111: 477 CiscoTest.testInet6: Missing method docstring[missing-docstring] C0103: 493 CiscoTest.testMixed: Invalid method name "testMixed"[invalid-name] C0111: 493 CiscoTest.testMixed: Missing method docstring[missing-docstring] C0103: 515 CiscoTest.testDsmo: Invalid method name "testDsmo"[invalid-name] C0111: 515 CiscoTest.testDsmo: Missing method docstring[missing-docstring] C0103: 528 CiscoTest.testUdpEstablished: Invalid method name "testUdpEstablished"[invalid-name] C0103: 534 CiscoTest.testIcmpTypes: Invalid method name "testIcmpTypes"[invalid-name] C0111: 534 CiscoTest.testIcmpTypes: Missing method docstring[missing-docstring] C0103: 547 CiscoTest.testIpv6IcmpTypes: Invalid method name "testIpv6IcmpTypes"[invalid-name] C0111: 547 CiscoTest.testIpv6IcmpTypes: Missing method docstring[missing-docstring] C0103: 561 CiscoTest.testIcmpv6InetMismatch: Invalid method name "testIcmpv6InetMismatch"[invalid-name] C0103: 573 CiscoTest.testIcmpInet6Mismatch: Invalid method name "testIcmpInet6Mismatch"[invalid-name] C0103: 584 CiscoTest.testUnsupportedKeywordsError: Invalid method name "testUnsupportedKeywordsError"[invalid-name] C0103: 594 CiscoTest.testDefaultInet6Protocol: Invalid method name "testDefaultInet6Protocol"[invalid-name] C0103: 600 CiscoTest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 609 CiscoTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 619 CiscoTest.testTermHopByHop: Invalid method name "testTermHopByHop"[invalid-name] C0103: 624 CiscoTest.testOwnerTerm: Invalid method name "testOwnerTerm"[invalid-name] C0103: 630 CiscoTest.testRemoveTrailingCommentWhitespace: Invalid method name "testRemoveTrailingCommentWhitespace"[invalid-name] W0612: 632 CiscoTest.testRemoveTrailingCommentWhitespace: Unused variable 'acl'[unused-variable] ************* Module tests.lib.ciscoxr_test C0111: 66 CiscoXRTest: Missing class docstring[missing-docstring] C0103: 71 CiscoXRTest.testStandardTermHost: Invalid method name "testStandardTermHost"[invalid-name] C0103: 82 CiscoXRTest.testStandardTermHostIPv6: Invalid method name "testStandardTermHostIPv6"[invalid-name] C0111: 82 CiscoXRTest.testStandardTermHostIPv6: Missing method docstring[missing-docstring] ************* Module tests.lib.gce_test C0111: 297 GCETest: Missing class docstring[missing-docstring] C0103: 306 GCETest.testGenericTerm: Invalid method name "testGenericTerm"[invalid-name] C0111: 306 GCETest.testGenericTerm: Missing method docstring[missing-docstring] C0103: 320 GCETest.testGenericTermWithoutNetwork: Invalid method name "testGenericTermWithoutNetwork"[invalid-name] C0111: 320 GCETest.testGenericTermWithoutNetwork: Missing method docstring[missing-docstring] C0103: 334 GCETest.testGenericTermWithExclude: Invalid method name "testGenericTermWithExclude"[invalid-name] C0111: 334 GCETest.testGenericTermWithExclude: Missing method docstring[missing-docstring] C0103: 350 GCETest.testGenericTermWithExcludeRange: Invalid method name "testGenericTermWithExcludeRange"[invalid-name] C0111: 350 GCETest.testGenericTermWithExcludeRange: Missing method docstring[missing-docstring] C0103: 367 GCETest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 378 GCETest.testSourceNetworkSplit: Invalid method name "testSourceNetworkSplit"[invalid-name] C0111: 378 GCETest.testSourceNetworkSplit: Missing method docstring[missing-docstring] C0103: 398 GCETest.testRaisesWithUnsupportedAction: Invalid method name "testRaisesWithUnsupportedAction"[invalid-name] C0111: 398 GCETest.testRaisesWithUnsupportedAction: Missing method docstring[missing-docstring] C0103: 413 GCETest.testRaisesWithoutSource: Invalid method name "testRaisesWithoutSource"[invalid-name] C0111: 413 GCETest.testRaisesWithoutSource: Missing method docstring[missing-docstring] C0103: 426 GCETest.testRaisesWithOnlySourceExclusion: Invalid method name "testRaisesWithOnlySourceExclusion"[invalid-name] C0111: 426 GCETest.testRaisesWithOnlySourceExclusion: Missing method docstring[missing-docstring] C0103: 442 GCETest.testRaisesNoSourceAfterExclude: Invalid method name "testRaisesNoSourceAfterExclude"[invalid-name] C0111: 442 GCETest.testRaisesNoSourceAfterExclude: Missing method docstring[missing-docstring] C0103: 462 GCETest.testRaisesWithSourcePort: Invalid method name "testRaisesWithSourcePort"[invalid-name] C0111: 462 GCETest.testRaisesWithSourcePort: Missing method docstring[missing-docstring] C0103: 477 GCETest.testRaisesWithLongTermName: Invalid method name "testRaisesWithLongTermName"[invalid-name] C0111: 477 GCETest.testRaisesWithLongTermName: Missing method docstring[missing-docstring] C0103: 491 GCETest.testRaisesWithIcmpAndDestinationPort: Invalid method name "testRaisesWithIcmpAndDestinationPort"[invalid-name] C0111: 491 GCETest.testRaisesWithIcmpAndDestinationPort: Missing method docstring[missing-docstring] ************* Module tests.lib.ipset_test C0305: 193 : Trailing newlines[trailing-newlines] C0111: 60 IpsetTest: Missing class docstring[missing-docstring] C0103: 65 IpsetTest.testMarkers: Invalid method name "testMarkers"[invalid-name] C0103: 76 IpsetTest.testGenerateSetName: Invalid method name "testGenerateSetName"[invalid-name] C0111: 76 IpsetTest.testGenerateSetName: Missing method docstring[missing-docstring] W0212: 83 IpsetTest.testGenerateSetName: Access to a protected member _GenerateSetName of a client class[protected-access] W0212: 85 IpsetTest.testGenerateSetName: Access to a protected member _GenerateSetName of a client class[protected-access] W0212: 89 IpsetTest.testGenerateSetName: Access to a protected member _GenerateSetName of a client class[protected-access] W0212: 91 IpsetTest.testGenerateSetName: Access to a protected member _GenerateSetName of a client class[protected-access] C0103: 95 IpsetTest.testOneSourceAddress: Invalid method name "testOneSourceAddress"[invalid-name] C0103: 106 IpsetTest.testOneDestinationAddress: Invalid method name "testOneDestinationAddress"[invalid-name] C0103: 117 IpsetTest.testOneSourceAndDestinationAddress: Invalid method name "testOneSourceAndDestinationAddress"[invalid-name] C0111: 117 IpsetTest.testOneSourceAndDestinationAddress: Missing method docstring[missing-docstring] C0103: 134 IpsetTest.testManySourceAddresses: Invalid method name "testManySourceAddresses"[invalid-name] C0111: 134 IpsetTest.testManySourceAddresses: Missing method docstring[missing-docstring] C0103: 150 IpsetTest.testManyDestinationAddresses: Invalid method name "testManyDestinationAddresses"[invalid-name] C0111: 150 IpsetTest.testManyDestinationAddresses: Missing method docstring[missing-docstring] C0103: 166 IpsetTest.testManySourceAndDestinationAddresses: Invalid method name "testManySourceAndDestinationAddresses"[invalid-name] C0111: 166 IpsetTest.testManySourceAndDestinationAddresses: Missing method docstring[missing-docstring] ************* Module tests.lib.iptables_test C0330: 831 : Wrong hanging indentation (remove 4 spaces). 'FOURTEEN_PORTS', 'tcp') | ^[bad-continuation] C0330: 845 : Wrong hanging indentation (remove 4 spaces). 'FIFTEEN_PORTS_WITH_RANGES', 'tcp') | ^[bad-continuation] C0111: 412 AclCheckTest: Missing class docstring[missing-docstring] C0103: 418 AclCheckTest.testChainFilter: Invalid method name "testChainFilter"[invalid-name] C0111: 418 AclCheckTest.testChainFilter: Missing method docstring[missing-docstring] C0103: 434 AclCheckTest.testUnsupportedTargetOption: Invalid method name "testUnsupportedTargetOption"[invalid-name] C0103: 439 AclCheckTest.testGoodPolicy: Invalid method name "testGoodPolicy"[invalid-name] C0103: 451 AclCheckTest.testCustomChain: Invalid method name "testCustomChain"[invalid-name] C0103: 458 AclCheckTest.testChainNoTarget: Invalid method name "testChainNoTarget"[invalid-name] C0103: 470 AclCheckTest.testCustomChainNoTarget: Invalid method name "testCustomChainNoTarget"[invalid-name] C0103: 481 AclCheckTest.testExcludeReturnsPolicy: Invalid method name "testExcludeReturnsPolicy"[invalid-name] C0111: 481 AclCheckTest.testExcludeReturnsPolicy: Missing method docstring[missing-docstring] C0103: 507 AclCheckTest.testExcludeAddressesPolicy: Invalid method name "testExcludeAddressesPolicy"[invalid-name] C0111: 507 AclCheckTest.testExcludeAddressesPolicy: Missing method docstring[missing-docstring] C0103: 528 AclCheckTest.testAddExcludeSourceForLengthPolicy: Invalid method name "testAddExcludeSourceForLengthPolicy"[invalid-name] C0111: 528 AclCheckTest.testAddExcludeSourceForLengthPolicy: Missing method docstring[missing-docstring] C0103: 570 AclCheckTest.testAddExcludeDestForLengthPolicy: Invalid method name "testAddExcludeDestForLengthPolicy"[invalid-name] C0111: 570 AclCheckTest.testAddExcludeDestForLengthPolicy: Missing method docstring[missing-docstring] C0103: 612 AclCheckTest.testOptions: Invalid method name "testOptions"[invalid-name] C0111: 612 AclCheckTest.testOptions: Missing method docstring[missing-docstring] C0103: 628 AclCheckTest.testRejectReset: Invalid method name "testRejectReset"[invalid-name] C0103: 635 AclCheckTest.testReject: Invalid method name "testReject"[invalid-name] C0103: 642 AclCheckTest.testRejectIpv6: Invalid method name "testRejectIpv6"[invalid-name] C0103: 650 AclCheckTest.testIPv6Headers: Invalid method name "testIPv6Headers"[invalid-name] C0103: 659 AclCheckTest.testNextTerm: Invalid method name "testNextTerm"[invalid-name] C0103: 666 AclCheckTest.testProtocols: Invalid method name "testProtocols"[invalid-name] C0111: 666 AclCheckTest.testProtocols: Missing method docstring[missing-docstring] C0103: 678 AclCheckTest.testVerbatimTerm: Invalid method name "testVerbatimTerm"[invalid-name] C0103: 690 AclCheckTest.testCommentReflowing: Invalid method name "testCommentReflowing"[invalid-name] C0103: 701 AclCheckTest.testLongTermName: Invalid method name "testLongTermName"[invalid-name] C0103: 706 AclCheckTest.testLongTermAbbreviation: Invalid method name "testLongTermAbbreviation"[invalid-name] C0103: 713 AclCheckTest.testLongTermTruncation: Invalid method name "testLongTermTruncation"[invalid-name] C0103: 722 AclCheckTest.testFragmentOptions: Invalid method name "testFragmentOptions"[invalid-name] C0103: 733 AclCheckTest.testIcmpMatching: Invalid method name "testIcmpMatching"[invalid-name] C0111: 733 AclCheckTest.testIcmpMatching: Missing method docstring[missing-docstring] C0103: 746 AclCheckTest.testConntrackUDP: Invalid method name "testConntrackUDP"[invalid-name] C0103: 757 AclCheckTest.testConntrackAll: Invalid method name "testConntrackAll"[invalid-name] C0103: 766 AclCheckTest.testTcpEstablishedNostate: Invalid method name "testTcpEstablishedNostate"[invalid-name] C0111: 766 AclCheckTest.testTcpEstablishedNostate: Missing method docstring[missing-docstring] C0103: 779 AclCheckTest.testUdpEstablishedNostate: Invalid method name "testUdpEstablishedNostate"[invalid-name] C0103: 788 AclCheckTest.testEstablishedNostate: Invalid method name "testEstablishedNostate"[invalid-name] C0103: 795 AclCheckTest.testUnsupportedFilter: Invalid method name "testUnsupportedFilter"[invalid-name] C0103: 800 AclCheckTest.testUnknownTermKeyword: Invalid method name "testUnknownTermKeyword"[invalid-name] C0103: 807 AclCheckTest.testProtocolExceptUnsupported: Invalid method name "testProtocolExceptUnsupported"[invalid-name] C0103: 812 AclCheckTest.testTermNameConflict: Invalid method name "testTermNameConflict"[invalid-name] C0103: 818 AclCheckTest.testMultiPort: Invalid method name "testMultiPort"[invalid-name] C0111: 818 AclCheckTest.testMultiPort: Missing method docstring[missing-docstring] C0103: 833 AclCheckTest.testMultiPortWithRanges: Invalid method name "testMultiPortWithRanges"[invalid-name] C0111: 833 AclCheckTest.testMultiPortWithRanges: Missing method docstring[missing-docstring] C0103: 847 AclCheckTest.testMultiportSwap: Invalid method name "testMultiportSwap"[invalid-name] C0103: 861 AclCheckTest.testMultiportLargePortCount: Invalid method name "testMultiportLargePortCount"[invalid-name] C0111: 861 AclCheckTest.testMultiportLargePortCount: Missing method docstring[missing-docstring] C0103: 874 AclCheckTest.testMultiportDualLargePortCount: Invalid method name "testMultiportDualLargePortCount"[invalid-name] C0111: 874 AclCheckTest.testMultiportDualLargePortCount: Missing method docstring[missing-docstring] C0103: 897 AclCheckTest.testGeneratePortBadArguments: Invalid method name "testGeneratePortBadArguments"[invalid-name] W0212: 901 AclCheckTest.testGeneratePortBadArguments: Access to a protected member _GeneratePortStatement of a client class[protected-access] C0103: 904 AclCheckTest.testGeneratePortNotImplemented: Invalid method name "testGeneratePortNotImplemented"[invalid-name] W0212: 908 AclCheckTest.testGeneratePortNotImplemented: Access to a protected member _GeneratePortStatement of a client class[protected-access] C0103: 911 AclCheckTest.testLogging: Invalid method name "testLogging"[invalid-name] C0103: 920 AclCheckTest.testSourceInterface: Invalid method name "testSourceInterface"[invalid-name] C0103: 927 AclCheckTest.testDestinationInterface: Invalid method name "testDestinationInterface"[invalid-name] C0103: 936 AclCheckTest.testExpired: Invalid method name "testExpired"[invalid-name] C0103: 945 AclCheckTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 955 AclCheckTest.testIPv6Icmp: Invalid method name "testIPv6Icmp"[invalid-name] C0103: 966 AclCheckTest.testIPv6IcmpOrder: Invalid method name "testIPv6IcmpOrder"[invalid-name] C0111: 966 AclCheckTest.testIPv6IcmpOrder: Missing method docstring[missing-docstring] C0103: 980 AclCheckTest.testIcmpv6InetMismatch: Invalid method name "testIcmpv6InetMismatch"[invalid-name] C0103: 992 AclCheckTest.testIcmpInet6Mismatch: Invalid method name "testIcmpInet6Mismatch"[invalid-name] C0103:1004 AclCheckTest.testOwner: Invalid method name "testOwner"[invalid-name] C0103:1012 AclCheckTest.testSetTarget: Invalid method name "testSetTarget"[invalid-name] C0103:1020 AclCheckTest.testSetCustomTarget: Invalid method name "testSetCustomTarget"[invalid-name] ************* Module tests.lib.junipersrx_test C0330: 739 : Wrong hanging indentation (remove 4 spaces). [mock.call('SOME_HOST')] * 2) | ^[bad-continuation] C0330: 741 : Wrong hanging indentation (remove 4 spaces). [mock.call('SMTP', 'tcp')] * 2) | ^[bad-continuation] C0330: 757 : Wrong hanging indentation (remove 4 spaces). [mock.call('SOME_HOST')] * 2) | ^[bad-continuation] C0330: 759 : Wrong hanging indentation (remove 4 spaces). [mock.call('SMTP', 'tcp')] * 2) | ^[bad-continuation] C0111: 313 JuniperSRXTest: Missing class docstring[missing-docstring] C0103: 318 JuniperSRXTest.testHeaderComment: Invalid method name "testHeaderComment"[invalid-name] C0103: 323 JuniperSRXTest.testHeaderApplyGroups: Invalid method name "testHeaderApplyGroups"[invalid-name] C0103: 329 JuniperSRXTest.testHeaderApplyGroupsExcept: Invalid method name "testHeaderApplyGroupsExcept"[invalid-name] C0103: 335 JuniperSRXTest.testLongComment: Invalid method name "testLongComment"[invalid-name] C0111: 335 JuniperSRXTest.testLongComment: Missing method docstring[missing-docstring] C0103: 352 JuniperSRXTest.testTermAndFilterName: Invalid method name "testTermAndFilterName"[invalid-name] C0111: 352 JuniperSRXTest.testTermAndFilterName: Missing method docstring[missing-docstring] C0103: 364 JuniperSRXTest.testVpnWithoutPolicy: Invalid method name "testVpnWithoutPolicy"[invalid-name] C0103: 374 JuniperSRXTest.testVpnWithPolicy: Invalid method name "testVpnWithPolicy"[invalid-name] C0103: 385 JuniperSRXTest.testVpnWithDrop: Invalid method name "testVpnWithDrop"[invalid-name] C0103: 396 JuniperSRXTest.testDefaultDeny: Invalid method name "testDefaultDeny"[invalid-name] C0103: 402 JuniperSRXTest.testIcmpTypes: Invalid method name "testIcmpTypes"[invalid-name] C0103: 412 JuniperSRXTest.testLoggingBoth: Invalid method name "testLoggingBoth"[invalid-name] C0103: 419 JuniperSRXTest.testOwnerTerm: Invalid method name "testOwnerTerm"[invalid-name] C0103: 426 JuniperSRXTest.testBadICMP: Invalid method name "testBadICMP"[invalid-name] C0103: 431 JuniperSRXTest.testICMPProtocolOnly: Invalid method name "testICMPProtocolOnly"[invalid-name] C0103: 436 JuniperSRXTest.testMultipleProtocolGrouping: Invalid method name "testMultipleProtocolGrouping"[invalid-name] C0111: 436 JuniperSRXTest.testMultipleProtocolGrouping: Missing method docstring[missing-docstring] C0103: 450 JuniperSRXTest.testGlobalPolicyHeader: Invalid method name "testGlobalPolicyHeader"[invalid-name] C0103: 457 JuniperSRXTest.testBadGlobalPolicyHeaderZoneBook: Invalid method name "testBadGlobalPolicyHeaderZoneBook"[invalid-name] C0103: 463 JuniperSRXTest.testBadGlobalPolicyHeaderNameAll: Invalid method name "testBadGlobalPolicyHeaderNameAll"[invalid-name] C0103: 469 JuniperSRXTest.testBadHeaderType: Invalid method name "testBadHeaderType"[invalid-name] C0103: 480 JuniperSRXTest.testBadHeaderMultiAF: Invalid method name "testBadHeaderMultiAF"[invalid-name] C0111: 480 JuniperSRXTest.testBadHeaderMultiAF: Missing method docstring[missing-docstring] C0103: 493 JuniperSRXTest.testBadHeaderMultiAB: Invalid method name "testBadHeaderMultiAB"[invalid-name] C0111: 493 JuniperSRXTest.testBadHeaderMultiAB: Missing method docstring[missing-docstring] C0103: 507 JuniperSRXTest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 516 JuniperSRXTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 527 JuniperSRXTest.testTimeout: Invalid method name "testTimeout"[invalid-name] C0103: 532 JuniperSRXTest.testIcmpV6: Invalid method name "testIcmpV6"[invalid-name] C0103: 537 JuniperSRXTest.testReplaceStatement: Invalid method name "testReplaceStatement"[invalid-name] C0111: 537 JuniperSRXTest.testReplaceStatement: Missing method docstring[missing-docstring] C0103: 550 JuniperSRXTest.testAdressBookBothAFs: Invalid method name "testAdressBookBothAFs"[invalid-name] C0111: 550 JuniperSRXTest.testAdressBookBothAFs: Missing method docstring[missing-docstring] C0103: 564 JuniperSRXTest.testAdressBookIPv4: Invalid method name "testAdressBookIPv4"[invalid-name] C0111: 564 JuniperSRXTest.testAdressBookIPv4: Missing method docstring[missing-docstring] C0103: 578 JuniperSRXTest.testAdressBookIPv6: Invalid method name "testAdressBookIPv6"[invalid-name] C0111: 578 JuniperSRXTest.testAdressBookIPv6: Missing method docstring[missing-docstring] C0103: 592 JuniperSRXTest.testAddressBookContainsSmallerPrefix: Invalid method name "testAddressBookContainsSmallerPrefix"[invalid-name] C0111: 592 JuniperSRXTest.testAddressBookContainsSmallerPrefix: Missing method docstring[missing-docstring] C0103: 610 JuniperSRXTest.testAddressBookContainsLargerPrefix: Invalid method name "testAddressBookContainsLargerPrefix"[invalid-name] C0111: 610 JuniperSRXTest.testAddressBookContainsLargerPrefix: Missing method docstring[missing-docstring] C0103: 628 JuniperSRXTest.testZoneAdressBookBothAFs: Invalid method name "testZoneAdressBookBothAFs"[invalid-name] C0111: 628 JuniperSRXTest.testZoneAdressBookBothAFs: Missing method docstring[missing-docstring] C0103: 642 JuniperSRXTest.testZoneAdressBookIPv4: Invalid method name "testZoneAdressBookIPv4"[invalid-name] C0111: 642 JuniperSRXTest.testZoneAdressBookIPv4: Missing method docstring[missing-docstring] C0103: 656 JuniperSRXTest.testZoneAdressBookIPv6: Invalid method name "testZoneAdressBookIPv6"[invalid-name] C0111: 656 JuniperSRXTest.testZoneAdressBookIPv6: Missing method docstring[missing-docstring] C0103: 688 JuniperSRXTest.testAddressBookOrderingSuccess: Invalid method name "testAddressBookOrderingSuccess"[invalid-name] C0111: 688 JuniperSRXTest.testAddressBookOrderingSuccess: Missing method docstring[missing-docstring] W0212: 695 JuniperSRXTest.testAddressBookOrderingSuccess: Access to a protected member _GenerateAddressBook of a client class[protected-access] C0103: 700 JuniperSRXTest.testAddressBookOrderingAlreadyOrdered: Invalid method name "testAddressBookOrderingAlreadyOrdered"[invalid-name] C0111: 700 JuniperSRXTest.testAddressBookOrderingAlreadyOrdered: Missing method docstring[missing-docstring] W0212: 708 JuniperSRXTest.testAddressBookOrderingAlreadyOrdered: Access to a protected member _GenerateAddressBook of a client class[protected-access] C0103: 725 JuniperSRXTest.testApplicationsOrderingSuccess: Invalid method name "testApplicationsOrderingSuccess"[invalid-name] C0111: 725 JuniperSRXTest.testApplicationsOrderingSuccess: Missing method docstring[missing-docstring] W0212: 732 JuniperSRXTest.testApplicationsOrderingSuccess: Access to a protected member _GenerateApplications of a client class[protected-access] C0103: 743 JuniperSRXTest.testApplicationsOrderingAlreadyOrdered: Invalid method name "testApplicationsOrderingAlreadyOrdered"[invalid-name] C0111: 743 JuniperSRXTest.testApplicationsOrderingAlreadyOrdered: Missing method docstring[missing-docstring] W0212: 750 JuniperSRXTest.testApplicationsOrderingAlreadyOrdered: Access to a protected member _GenerateApplications of a client class[protected-access] C0103: 761 JuniperSRXTest.testDscpWithByte: Invalid method name "testDscpWithByte"[invalid-name] C0103: 770 JuniperSRXTest.testDscpWithClass: Invalid method name "testDscpWithClass"[invalid-name] C0111: 770 JuniperSRXTest.testDscpWithClass: Missing method docstring[missing-docstring] C0103: 782 JuniperSRXTest.testLargeTermSplitting: Invalid method name "testLargeTermSplitting"[invalid-name] C0111: 782 JuniperSRXTest.testLargeTermSplitting: Missing method docstring[missing-docstring] C0103: 811 JuniperSRXTest.testLargeTermSplittingV6: Invalid method name "testLargeTermSplittingV6"[invalid-name] C0111: 811 JuniperSRXTest.testLargeTermSplittingV6: Missing method docstring[missing-docstring] C0103: 842 JuniperSRXTest.testDuplicateTermsInDifferentZones: Invalid method name "testDuplicateTermsInDifferentZones"[invalid-name] C0111: 842 JuniperSRXTest.testDuplicateTermsInDifferentZones: Missing method docstring[missing-docstring] ************* Module tests.lib.juniper_test C0330: 437 : Wrong hanging indentation (remove 4 spaces). [nacaddr.IPv4('10.0.0.0/8', comment=long_comment)]) | ^[bad-continuation] C0330: 681 : Wrong hanging indentation (remove 4 spaces). nacaddr.IPv4('127.0.0.1'), nacaddr.IPv6('::1/128')] | ^[bad-continuation] C0330: 803 : Wrong hanging indentation (remove 4 spaces). 'Term icmptype-mismatch will not be rendered,' | ^[bad-continuation] W1401: 533 : Anomalous backslash in string: '\W'. String constant might be missing an r prefix.[anomalous-backslash-in-string] W1401: 533 : Anomalous backslash in string: '\W'. String constant might be missing an r prefix.[anomalous-backslash-in-string] W1401: 535 : Anomalous backslash in string: '\W'. String constant might be missing an r prefix.[anomalous-backslash-in-string] W1401: 535 : Anomalous backslash in string: '\W'. String constant might be missing an r prefix.[anomalous-backslash-in-string] W1401: 535 : Anomalous backslash in string: '\W'. String constant might be missing an r prefix.[anomalous-backslash-in-string] C0111: 372 JuniperTest: Missing class docstring[missing-docstring] C0103: 377 JuniperTest.testOptions: Invalid method name "testOptions"[invalid-name] C0111: 377 JuniperTest.testOptions: Missing method docstring[missing-docstring] C0103: 392 JuniperTest.testTermAndFilterName: Invalid method name "testTermAndFilterName"[invalid-name] C0111: 392 JuniperTest.testTermAndFilterName: Missing method docstring[missing-docstring] C0103: 405 JuniperTest.testBadFilterType: Invalid method name "testBadFilterType"[invalid-name] C0103: 416 JuniperTest.testBridgeFilterType: Invalid method name "testBridgeFilterType"[invalid-name] C0111: 416 JuniperTest.testBridgeFilterType: Missing method docstring[missing-docstring] C0103: 429 JuniperTest.testCommentShrinking: Invalid method name "testCommentShrinking"[invalid-name] C0111: 429 JuniperTest.testCommentShrinking: Missing method docstring[missing-docstring] C0103: 448 JuniperTest.testDefaultDeny: Invalid method name "testDefaultDeny"[invalid-name] C0103: 454 JuniperTest.testIcmpType: Invalid method name "testIcmpType"[invalid-name] C0111: 454 JuniperTest.testIcmpType: Missing method docstring[missing-docstring] C0103: 467 JuniperTest.testInet6: Invalid method name "testInet6"[invalid-name] C0111: 467 JuniperTest.testInet6: Missing method docstring[missing-docstring] C0103: 480 JuniperTest.testNotInterfaceSpecificHeader: Invalid method name "testNotInterfaceSpecificHeader"[invalid-name] C0111: 480 JuniperTest.testNotInterfaceSpecificHeader: Missing method docstring[missing-docstring] C0103: 493 JuniperTest.testInterfaceSpecificHeader: Invalid method name "testInterfaceSpecificHeader"[invalid-name] C0111: 493 JuniperTest.testInterfaceSpecificHeader: Missing method docstring[missing-docstring] C0103: 505 JuniperTest.testHopLimit: Invalid method name "testHopLimit"[invalid-name] C0103: 512 JuniperTest.testProtocolExcept: Invalid method name "testProtocolExcept"[invalid-name] C0103: 518 JuniperTest.testIcmpv6Except: Invalid method name "testIcmpv6Except"[invalid-name] C0103: 524 JuniperTest.testProtocolCase: Invalid method name "testProtocolCase"[invalid-name] C0103: 530 JuniperTest.testPrefixList: Invalid method name "testPrefixList"[invalid-name] C0103: 540 JuniperTest.testEtherType: Invalid method name "testEtherType"[invalid-name] C0103: 546 JuniperTest.testTrafficType: Invalid method name "testTrafficType"[invalid-name] C0103: 552 JuniperTest.testVerbatimTerm: Invalid method name "testVerbatimTerm"[invalid-name] C0103: 561 JuniperTest.testDscpByte: Invalid method name "testDscpByte"[invalid-name] C0103: 572 JuniperTest.testDscpClass: Invalid method name "testDscpClass"[invalid-name] C0111: 572 JuniperTest.testDscpClass: Missing method docstring[missing-docstring] C0103: 585 JuniperTest.testDscpIPv6: Invalid method name "testDscpIPv6"[invalid-name] C0111: 585 JuniperTest.testDscpIPv6: Missing method docstring[missing-docstring] C0103: 599 JuniperTest.testSimplifiedThenStatement: Invalid method name "testSimplifiedThenStatement"[invalid-name] C0111: 599 JuniperTest.testSimplifiedThenStatement: Missing method docstring[missing-docstring] C0103: 611 JuniperTest.testSimplifiedThenStatementWithSingleAction: Invalid method name "testSimplifiedThenStatementWithSingleAction"[invalid-name] C0103: 622 JuniperTest.testSimplifiedThenStatementWithSingleActionDiscardIPv4: Invalid method name "testSimplifiedThenStatementWithSingleActionDiscardIPv4"[invalid-name] C0111: 622 JuniperTest.testSimplifiedThenStatementWithSingleActionDiscardIPv4: Missing method docstring[missing-docstring] C0103: 634 JuniperTest.testSimplifiedThenStatementWithSingleActionDiscardIPv6: Invalid method name "testSimplifiedThenStatementWithSingleActionDiscardIPv6"[invalid-name] C0103: 645 JuniperTest.testSimplifiedThenStatementWithSingleActionRejectIPv6: Invalid method name "testSimplifiedThenStatementWithSingleActionRejectIPv6"[invalid-name] C0111: 645 JuniperTest.testSimplifiedThenStatementWithSingleActionRejectIPv6: Missing method docstring[missing-docstring] C0103: 657 JuniperTest.testTcpEstablished: Invalid method name "testTcpEstablished"[invalid-name] C0103: 668 JuniperTest.testNonTcpWithTcpEstablished: Invalid method name "testNonTcpWithTcpEstablished"[invalid-name] C0103: 679 JuniperTest.testBridgeFilterInetType: Invalid method name "testBridgeFilterInetType"[invalid-name] C0103: 690 JuniperTest.testDsmo: Invalid method name "testDsmo"[invalid-name] C0111: 690 JuniperTest.testDsmo: Missing method docstring[missing-docstring] C0103: 705 JuniperTest.testDsmoJuniperFriendly: Invalid method name "testDsmoJuniperFriendly"[invalid-name] C0111: 705 JuniperTest.testDsmoJuniperFriendly: Missing method docstring[missing-docstring] C0103: 717 JuniperTest.testDsmoExclude: Invalid method name "testDsmoExclude"[invalid-name] C0111: 717 JuniperTest.testDsmoExclude: Missing method docstring[missing-docstring] C0103: 735 JuniperTest.testTermTypeIndexKeys: Invalid method name "testTermTypeIndexKeys"[invalid-name] W0212: 737 JuniperTest.testTermTypeIndexKeys: Access to a protected member _TERM_TYPE of a client class[protected-access] C0103: 740 JuniperTest.testRoutingInstance: Invalid method name "testRoutingInstance"[invalid-name] C0103: 746 JuniperTest.testLossPriority: Invalid method name "testLossPriority"[invalid-name] C0103: 752 JuniperTest.testPrecedence: Invalid method name "testPrecedence"[invalid-name] C0103: 762 JuniperTest.testMultiplePrecedence: Invalid method name "testMultiplePrecedence"[invalid-name] C0103: 772 JuniperTest.testArbitraryOptions: Invalid method name "testArbitraryOptions"[invalid-name] C0103: 783 JuniperTest.testIcmpv6InetMismatch: Invalid method name "testIcmpv6InetMismatch"[invalid-name] C0103: 795 JuniperTest.testIcmpInet6Mismatch: Invalid method name "testIcmpInet6Mismatch"[invalid-name] C0103: 808 JuniperTest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 817 JuniperTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 827 JuniperTest.testOwnerTerm: Invalid method name "testOwnerTerm"[invalid-name] C0103: 835 JuniperTest.testAddressExclude: Invalid method name "testAddressExclude"[invalid-name] C0111: 835 JuniperTest.testAddressExclude: Missing method docstring[missing-docstring] C0103: 857 JuniperTest.testMinimizePrefixes: Invalid method name "testMinimizePrefixes"[invalid-name] C0111: 857 JuniperTest.testMinimizePrefixes: Missing method docstring[missing-docstring] C0103: 885 JuniperTest.testConfigHelper: Invalid method name "testConfigHelper"[invalid-name] C0111: 885 JuniperTest.testConfigHelper: Missing method docstring[missing-docstring] C0103: 911 JuniperTest.testForwardingClass: Invalid method name "testForwardingClass"[invalid-name] C0103: 918 JuniperTest.testLongPolicer: Invalid method name "testLongPolicer"[invalid-name] C0103: 932 JuniperTest.testNextIp: Invalid method name "testNextIp"[invalid-name] C0103: 943 JuniperTest.testNextIpFormat: Invalid method name "testNextIpFormat"[invalid-name] C0111: 943 JuniperTest.testNextIpFormat: Missing method docstring[missing-docstring] C0103: 956 JuniperTest.testNextIpv6: Invalid method name "testNextIpv6"[invalid-name] C0103: 967 JuniperTest.testFailNextIpMultipleIP: Invalid method name "testFailNextIpMultipleIP"[invalid-name] C0103: 976 JuniperTest.testFailNextIpNetworkIP: Invalid method name "testFailNextIpNetworkIP"[invalid-name] C0411: 29 : standard import "import logging" comes before "from lib import aclgenerator"[wrong-import-order] ************* Module tests.lib.nacaddr_test C0103: 36 NacaddrUnitTest.testCollapsing: Invalid method name "testCollapsing"[invalid-name] C0111: 36 NacaddrUnitTest.testCollapsing: Missing method docstring[missing-docstring] R0204: 67 NacaddrUnitTest.testCollapsing: Redefinition of ip2 type from lib.nacaddr.IPv4 to lib.nacaddr.IPv6[redefined-variable-type] R0204: 68 NacaddrUnitTest.testCollapsing: Redefinition of ip3 type from lib.nacaddr.IPv4 to lib.nacaddr.IPv6[redefined-variable-type] R0204: 66 NacaddrUnitTest.testCollapsing: Redefinition of ip1 type from lib.nacaddr.IPv4 to lib.nacaddr.IPv6[redefined-variable-type] C0103: 73 NacaddrUnitTest.testNacaddrV4Comment: Invalid method name "testNacaddrV4Comment"[invalid-name] C0103: 76 NacaddrUnitTest.testNacaddrV6Comment: Invalid method name "testNacaddrV6Comment"[invalid-name] C0103: 79 NacaddrUnitTest.testSupernetting: Invalid method name "testSupernetting"[invalid-name] C0111: 79 NacaddrUnitTest.testSupernetting: Missing method docstring[missing-docstring] C0103: 96 NacaddrUnitTest.testAddressListExclusion: Invalid method name "testAddressListExclusion"[invalid-name] C0111: 96 NacaddrUnitTest.testAddressListExclusion: Missing method docstring[missing-docstring] C0103: 119 NacaddrUnitTest.testComplexAddressListExcludesion: Invalid method name "testComplexAddressListExcludesion"[invalid-name] C0111: 119 NacaddrUnitTest.testComplexAddressListExcludesion: Missing method docstring[missing-docstring] C0103: 155 NacaddrUnitTest.testAddressListExcludeCaseOne: Invalid method name "testAddressListExcludeCaseOne"[invalid-name] C0103: 165 NacaddrUnitTest.testAddressListExcludeCaseTwo: Invalid method name "testAddressListExcludeCaseTwo"[invalid-name] C0103: 178 NacaddrUnitTest.testAddressListExcludeCaseThree: Invalid method name "testAddressListExcludeCaseThree"[invalid-name] C0103: 191 NacaddrUnitTest.testAddressListExcludeCaseFour: Invalid method name "testAddressListExcludeCaseFour"[invalid-name] C0103: 199 NacaddrUnitTest.testAddressListExcludeCaseFive: Invalid method name "testAddressListExcludeCaseFive"[invalid-name] C0103: 207 NacaddrUnitTest.testAddressListExcludeCaseSix: Invalid method name "testAddressListExcludeCaseSix"[invalid-name] ************* Module tests.lib.naming_test C0103: 61 NamingUnitTest.testCommentedServices: Invalid method name "testCommentedServices"[invalid-name] C0103: 66 NamingUnitTest.testBadGetRequest: Invalid method name "testBadGetRequest"[invalid-name] C0103: 72 NamingUnitTest.testGetServiceRecursion: Invalid method name "testGetServiceRecursion"[invalid-name] C0103: 77 NamingUnitTest.testGetService: Invalid method name "testGetService"[invalid-name] C0103: 82 NamingUnitTest.testBadProtocol: Invalid method name "testBadProtocol"[invalid-name] C0103: 86 NamingUnitTest.testGetServiceByProto: Invalid method name "testGetServiceByProto"[invalid-name] C0103: 90 NamingUnitTest.testGetServiceByProtoWithoutProtocols: Invalid method name "testGetServiceByProtoWithoutProtocols"[invalid-name] C0103: 94 NamingUnitTest.testNetworkComment: Invalid method name "testNetworkComment"[invalid-name] C0103: 97 NamingUnitTest.testNestedNetworkComment: Invalid method name "testNestedNetworkComment"[invalid-name] C0103: 100 NamingUnitTest.testUndefinedAddress: Invalid method name "testUndefinedAddress"[invalid-name] C0103: 103 NamingUnitTest.testNamespaceCollisionError: Invalid method name "testNamespaceCollisionError"[invalid-name] C0103: 111 NamingUnitTest.testNetworkAddress: Invalid method name "testNetworkAddress"[invalid-name] C0103: 115 NamingUnitTest.testInet6Address: Invalid method name "testInet6Address"[invalid-name] C0103: 120 NamingUnitTest.testMixedAddresses: Invalid method name "testMixedAddresses"[invalid-name] C0103: 129 NamingUnitTest.testNestedServices: Invalid method name "testNestedServices"[invalid-name] C0103: 133 NamingUnitTest.testServiceParents: Invalid method name "testServiceParents"[invalid-name] C0103: 138 NamingUnitTest.testNetParents: Invalid method name "testNetParents"[invalid-name] C0103: 144 NamingUnitTest.testGetIpParents: Invalid method name "testGetIpParents"[invalid-name] C0103: 149 NamingUnitTest.testUndefinedTokenNesting: Invalid method name "testUndefinedTokenNesting"[invalid-name] W0212: 156 NamingUnitTest.testUndefinedTokenNesting: Access to a protected member _CheckUnseen of a client class[protected-access] W0212: 158 NamingUnitTest.testUndefinedTokenNesting: Access to a protected member _CheckUnseen of a client class[protected-access] C0103: 160 NamingUnitTest.testParseNetFile: Invalid method name "testParseNetFile"[invalid-name] W0212: 163 NamingUnitTest.testParseNetFile: Access to a protected member _ParseFile of a client class[protected-access] C0103: 166 NamingUnitTest.testParseServiceFile: Invalid method name "testParseServiceFile"[invalid-name] W0212: 169 NamingUnitTest.testParseServiceFile: Access to a protected member _ParseFile of a client class[protected-access] ************* Module tests.lib.nftables_test C0111: 174 NftablesTest: Missing class docstring[missing-docstring] C0103: 179 NftablesTest.testBadHeader: Invalid method name "testBadHeader"[invalid-name] C0103: 198 NftablesTest.testGoodHeader: Invalid method name "testGoodHeader"[invalid-name] C0111: 198 NftablesTest.testGoodHeader: Missing method docstring[missing-docstring] C0103: 216 NftablesTest.testExpired: Invalid method name "testExpired"[invalid-name] C0103: 224 NftablesTest.testExpiring: Invalid method name "testExpiring"[invalid-name] C0103: 235 NftablesTest.testIcmpv6InetMismatch: Invalid method name "testIcmpv6InetMismatch"[invalid-name] C0103: 244 NftablesTest.testSingleSourceDestIp: Invalid method name "testSingleSourceDestIp"[invalid-name] C0103: 253 NftablesTest.testMultipleSourceDestIp: Invalid method name "testMultipleSourceDestIp"[invalid-name] C0103: 265 NftablesTest.testSingleProtocol: Invalid method name "testSingleProtocol"[invalid-name] C0103: 270 NftablesTest.testMultiProtocol: Invalid method name "testMultiProtocol"[invalid-name] C0103: 275 NftablesTest.testSingleDport: Invalid method name "testSingleDport"[invalid-name] C0103: 282 NftablesTest.testMultiDport: Invalid method name "testMultiDport"[invalid-name] C0103: 289 NftablesTest.testSingleSport: Invalid method name "testSingleSport"[invalid-name] C0103: 296 NftablesTest.testMultiSport: Invalid method name "testMultiSport"[invalid-name] C0103: 303 NftablesTest.testIcmpType: Invalid method name "testIcmpType"[invalid-name] C0103: 308 NftablesTest.testAction: Invalid method name "testAction"[invalid-name] C0103: 322 NftablesTest.testCommentOwner: Invalid method name "testCommentOwner"[invalid-name] C0103: 328 NftablesTest.testVerbatimTerm: Invalid method name "testVerbatimTerm"[invalid-name] C0103: 336 NftablesTest.testSourceDestExclude: Invalid method name "testSourceDestExclude"[invalid-name] C0111: 336 NftablesTest.testSourceDestExclude: Missing method docstring[missing-docstring] C0103: 352 NftablesTest.testSourceDestExcludeFromAllIps: Invalid method name "testSourceDestExcludeFromAllIps"[invalid-name] C0111: 352 NftablesTest.testSourceDestExcludeFromAllIps: Missing method docstring[missing-docstring] C0411: 26 : standard import "import logging" comes before "from lib import nacaddr"[wrong-import-order] ************* Module tests.lib.nsxv_functtest C0301: 33 : Line too long (87/80)[line-too-long] C0330: 41 : Wrong continued indentation (add 1 space). help='definitions directory', default='../def') ^|[bad-continuation] C0326: 77 : Exactly one space required after assignment protocol = int(root.find('./rule/services/service/protocol').text) ^[bad-whitespace] C0326: 100 : Exactly one space required after assignment protocol = int(root.find('./rule/services/service/protocol').text) ^[bad-whitespace] C0301: 113 : Line too long (84/80)[line-too-long] C0301: 117 : Line too long (84/80)[line-too-long] C0103: 39 NsxvFunctionalTest.setUp: Invalid variable name "_parser"[invalid-name] C0103: 42 NsxvFunctionalTest.setUp: Invalid variable name "FLAGS"[invalid-name] W0612: 42 NsxvFunctionalTest.setUp: Unused variable 'args'[unused-variable] C0103: 48 NsxvFunctionalTest.runTest: Invalid method name "runTest"[invalid-name] C0111: 51 NsxvFunctionalTest.test_nsxv_policy: Missing method docstring[missing-docstring] C0111: 84 NsxvFunctionalTest.test_nsxv_nosectiondid: Missing method docstring[missing-docstring] W0612: 108 NsxvFunctionalTest.test_nsxv_nofiltertype.test_nofiltertype: Unused variable 'fw'[unused-variable] W0612: 116 NsxvFunctionalTest.test_nsxv_incorrectfiltertype.test_incorrectfiltertype: Unused variable 'fw'[unused-variable] W0612: 125 NsxvFunctionalTest.test_nsxv_optionkywd.test_optionkywd: Unused variable 'output'[unused-variable] ************* Module tests.lib.nsxv_mocktest C0326: 18 : Exactly one space required around assignment INET_TERM="""\ ^[bad-whitespace] C0326: 27 : Exactly one space required around assignment INET6_TERM="""\ ^[bad-whitespace] C0326: 35 : Exactly one space required before assignment INET_FILTER= """\ ^[bad-whitespace] C0326: 52 : Exactly one space required before assignment INET6_FILTER= """\ ^[bad-whitespace] C0326: 66 : Exactly one space required before assignment MIXED_FILTER= """\ ^[bad-whitespace] C0326: 81 : Exactly one space required before assignment POLICY= """\ ^[bad-whitespace] C0326: 95 : Exactly one space required before assignment POLICY_NO_SECTION_ID= """\ ^[bad-whitespace] C0326: 106 : Exactly one space required before assignment POLICY_NO_FILTERTYPE= """\ ^[bad-whitespace] C0326: 117 : Exactly one space required before assignment POLICY_INCORRECT_FILTERTYPE= """\ ^[bad-whitespace] C0326: 128 : Exactly one space required before assignment POLICY_OPTION_KYWD= """\ ^[bad-whitespace] ************* Module tests.lib.nsxv_test C0330: 197 : Wrong hanging indentation (remove 4 spaces). [mock.call('NTP', 'udp')] * 2) | ^[bad-continuation] C0330: 244 : Wrong hanging indentation (remove 4 spaces). [mock.call('NTP', 'udp')] * 2) | ^[bad-continuation] C0111: 114 TermTest: Missing class docstring[missing-docstring] C0103: 119 TermTest.testInitForinet: Invalid method name "testInitForinet"[invalid-name] C0103: 125 TermTest.testInitForinet6: Invalid method name "testInitForinet6"[invalid-name] C0103: 131 TermTest.testServiceToStr: Invalid method name "testServiceToStr"[invalid-name] W0212: 139 TermTest.testServiceToStr: Access to a protected member _ServiceToString of a client class[protected-access] C0103: 144 TermTest.testStrForinet: Invalid method name "testStrForinet"[invalid-name] C0103: 199 TermTest.testStrForinet6: Invalid method name "testStrForinet6"[invalid-name] C0103: 224 TermTest.testTranslatePolicy: Invalid method name "testTranslatePolicy"[invalid-name] C0103: 246 TermTest.testNsxvStr: Invalid method name "testNsxvStr"[invalid-name] ************* Module tests.lib.nsxv_unittest C0330: 33 : Wrong continued indentation (add 1 space). help='definitions directory', default='../def') ^|[bad-continuation] C0301: 66 : Line too long (148/80)[line-too-long] C0326: 71 : Exactly one space required around assignment af=4 ^[bad-whitespace] W0311: 73 : Bad indentation. Found 7 spaces, expected 6[bad-indentation] C0326: 73 : Exactly one space required before assignment nsxv_term= nsxv.Term(terms[0], af) ^[bad-whitespace] W0311: 74 : Bad indentation. Found 7 spaces, expected 6[bad-indentation] C0326: 82 : Exactly one space required after comma exp_sourceaddr = ['10.0.0.1','10.0.0.2'] ^[bad-whitespace] C0326: 90 : Exactly one space required after comma exp_destaddr = ['10.0.0.0/8','172.16.0.0/12','192.168.0.0/16'] ^[bad-whitespace] C0326: 90 : Exactly one space required after comma exp_destaddr = ['10.0.0.0/8','172.16.0.0/12','192.168.0.0/16'] ^[bad-whitespace] C0326: 98 : Exactly one space required after assignment protocol = int(root.find('./services/service/protocol').text) ^[bad-whitespace] C0326: 110 : Exactly one space required after assignment notes = root.find('notes').text ^[bad-whitespace] C0326: 116 : Exactly one space required around assignment af=6 ^[bad-whitespace] W0311: 119 : Bad indentation. Found 7 spaces, expected 6[bad-indentation] C0326: 119 : Exactly one space required before assignment nsxv_term= nsxv.Term(terms[0], filter_type, af) ^[bad-whitespace] W0311: 120 : Bad indentation. Found 7 spaces, expected 6[bad-indentation] C0326: 149 : No space allowed before comma self.assertEqual(len(terms) , 1) ^[bad-whitespace] C0326: 168 : Exactly one space required after comma exp_ipv4dest = ['8.8.4.4','8.8.8.8'] ^[bad-whitespace] C0326: 169 : Exactly one space required after comma exp_ipv6dest = ['2001:4860:4860::8844','2001:4860:4860::8888'] ^[bad-whitespace] C0326: 183 : Exactly one space required after assignment protocol = int(root.find('./rule/services/service/protocol').text) ^[bad-whitespace] C0326: 191 : Exactly one space required after assignment notes = root.find('./rule/notes').text ^[bad-whitespace] C0111: 27 TermTest: Missing class docstring[missing-docstring] C0103: 31 TermTest.setUp: Invalid variable name "_parser"[invalid-name] E0602: 31 TermTest.setUp: Undefined variable 'OptionParser'[undefined-variable] C0103: 34 TermTest.setUp: Invalid variable name "FLAGS"[invalid-name] W0612: 34 TermTest.setUp: Unused variable 'args'[unused-variable] C0103: 40 TermTest.runTest: Invalid method name "runTest"[invalid-name] C0103: 57 TermTest.test_ServiceToStr: Invalid method name "test_ServiceToStr"[invalid-name] W0212: 65 TermTest.test_ServiceToStr: Access to a protected member _ServiceToString of a client class[protected-access] W0612: 72 TermTest.test_str_forinet: Unused variable 'header'[unused-variable] W0612: 118 TermTest.test_str_forinet6: Unused variable 'header'[unused-variable] C0103: 138 TermTest.test_TranslatePolicy: Invalid method name "test_TranslatePolicy"[invalid-name] W0612: 145 TermTest.test_TranslatePolicy: Unused variable 'header'[unused-variable] W0622: 172 TermTest.test_nsxv_str: Redefining built-in 'type'[redefined-builtin] ************* Module tests.lib.packetfilter_test C0301: 680 : Line too long (81/80)[line-too-long] C0301: 725 : Line too long (82/80)[line-too-long] C0111: 295 PacketFilterTest: Missing class docstring[missing-docstring] C0103: 300 PacketFilterTest.testTcp: Invalid method name "testTcp"[invalid-name] C0111: 300 PacketFilterTest.testTcp: Missing method docstring[missing-docstring] C0103: 319 PacketFilterTest.testLog: Invalid method name "testLog"[invalid-name] C0103: 331 PacketFilterTest.testIcmp: Invalid method name "testIcmp"[invalid-name] C0103: 342 PacketFilterTest.testIcmpTypes: Invalid method name "testIcmpTypes"[invalid-name] C0103: 353 PacketFilterTest.testIcmpv6: Invalid method name "testIcmpv6"[invalid-name] C0103: 364 PacketFilterTest.testBadIcmp: Invalid method name "testBadIcmp"[invalid-name] C0103: 370 PacketFilterTest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 380 PacketFilterTest.testExpiredTerm2: Invalid method name "testExpiredTerm2"[invalid-name] C0103: 390 PacketFilterTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 401 PacketFilterTest.testBadAction: Invalid method name "testBadAction"[invalid-name] C0103: 406 PacketFilterTest.testMultiprotocol: Invalid method name "testMultiprotocol"[invalid-name] C0103: 417 PacketFilterTest.testNextTerm: Invalid method name "testNextTerm"[invalid-name] C0103: 427 PacketFilterTest.testNextLogTerm: Invalid method name "testNextLogTerm"[invalid-name] C0103: 437 PacketFilterTest.testPortRange: Invalid method name "testPortRange"[invalid-name] C0111: 437 PacketFilterTest.testPortRange: Missing method docstring[missing-docstring] C0103: 453 PacketFilterTest.testFlags: Invalid method name "testFlags"[invalid-name] C0103: 464 PacketFilterTest.testInvalidFlags: Invalid method name "testInvalidFlags"[invalid-name] C0103: 469 PacketFilterTest.testMultilineComment: Invalid method name "testMultilineComment"[invalid-name] C0103: 478 PacketFilterTest.testStateless: Invalid method name "testStateless"[invalid-name] C0111: 478 PacketFilterTest.testStateless: Missing method docstring[missing-docstring] C0103: 497 PacketFilterTest.testInet4: Invalid method name "testInet4"[invalid-name] C0103: 509 PacketFilterTest.testInet6: Invalid method name "testInet6"[invalid-name] C0103: 521 PacketFilterTest.testDirectional: Invalid method name "testDirectional"[invalid-name] C0111: 521 PacketFilterTest.testDirectional: Missing method docstring[missing-docstring] C0103: 540 PacketFilterTest.testMultipleHeader: Invalid method name "testMultipleHeader"[invalid-name] C0111: 540 PacketFilterTest.testMultipleHeader: Missing method docstring[missing-docstring] C0103: 555 PacketFilterTest.testDirectionalStateless: Invalid method name "testDirectionalStateless"[invalid-name] C0111: 555 PacketFilterTest.testDirectionalStateless: Missing method docstring[missing-docstring] C0103: 575 PacketFilterTest.testStatelessEstablished: Invalid method name "testStatelessEstablished"[invalid-name] C0103: 587 PacketFilterTest.testBadFlags: Invalid method name "testBadFlags"[invalid-name] C0103: 604 PacketFilterTest.testUdpStatelessEstablished: Invalid method name "testUdpStatelessEstablished"[invalid-name] C0103: 616 PacketFilterTest.testStatefulBlock: Invalid method name "testStatefulBlock"[invalid-name] C0103: 627 PacketFilterTest.testTcpEstablished: Invalid method name "testTcpEstablished"[invalid-name] C0103: 639 PacketFilterTest.testTableCreation: Invalid method name "testTableCreation"[invalid-name] C0111: 639 PacketFilterTest.testTableCreation: Missing method docstring[missing-docstring] C0103: 673 PacketFilterTest.testTableNameShortened: Invalid method name "testTableNameShortened"[invalid-name] C0111: 673 PacketFilterTest.testTableNameShortened: Missing method docstring[missing-docstring] C0103: 696 PacketFilterTest.testTableDuplicateShortNameError: Invalid method name "testTableDuplicateShortNameError"[invalid-name] C0111: 696 PacketFilterTest.testTableDuplicateShortNameError: Missing method docstring[missing-docstring] C0103: 718 PacketFilterTest.testTableSameLongNameSameFilter: Invalid method name "testTableSameLongNameSameFilter"[invalid-name] C0111: 718 PacketFilterTest.testTableSameLongNameSameFilter: Missing method docstring[missing-docstring] C0103: 751 PacketFilterTest.testTableSameLongNameDiffFilter: Invalid method name "testTableSameLongNameDiffFilter"[invalid-name] C0111: 751 PacketFilterTest.testTableSameLongNameDiffFilter: Missing method docstring[missing-docstring] C0103: 785 PacketFilterTest.testTableDiffObjectsShortenedAndNonShortened: Invalid method name "testTableDiffObjectsShortenedAndNonShortened"[invalid-name] C0111: 785 PacketFilterTest.testTableDiffObjectsShortenedAndNonShortened: Missing method docstring[missing-docstring] C0103: 810 PacketFilterTest.testTableDuplicateShortNameErrorDiffFilter: Invalid method name "testTableDuplicateShortNameErrorDiffFilter"[invalid-name] C0111: 810 PacketFilterTest.testTableDuplicateShortNameErrorDiffFilter: Missing method docstring[missing-docstring] C0103: 835 PacketFilterTest.testTermNameConflict: Invalid method name "testTermNameConflict"[invalid-name] C0103: 845 PacketFilterTest.testBadProtoError: Invalid method name "testBadProtoError"[invalid-name] ************* Module tests.lib.pcap_test C0111: 171 PcapFilter: Missing class docstring[missing-docstring] C0103: 176 PcapFilter.testTcp: Invalid method name "testTcp"[invalid-name] C0111: 176 PcapFilter.testTcp: Missing method docstring[missing-docstring] C0103: 190 PcapFilter.testLog: Invalid method name "testLog"[invalid-name] C0103: 198 PcapFilter.testIcmp: Invalid method name "testIcmp"[invalid-name] C0103: 206 PcapFilter.testIcmpTypes: Invalid method name "testIcmpTypes"[invalid-name] C0103: 215 PcapFilter.testIcmpv6: Invalid method name "testIcmpv6"[invalid-name] C0103: 223 PcapFilter.testBadIcmp: Invalid method name "testBadIcmp"[invalid-name] C0103: 230 PcapFilter.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 239 PcapFilter.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 249 PcapFilter.testMultiprotocol: Invalid method name "testMultiprotocol"[invalid-name] C0103: 257 PcapFilter.testNextTerm: Invalid method name "testNextTerm"[invalid-name] C0103: 264 PcapFilter.testTcpOptions: Invalid method name "testTcpOptions"[invalid-name] C0103: 272 PcapFilter.testVrrpTerm: Invalid method name "testVrrpTerm"[invalid-name] C0103: 280 PcapFilter.testMultiHeader: Invalid method name "testMultiHeader"[invalid-name] C0103: 289 PcapFilter.testDirectional: Invalid method name "testDirectional"[invalid-name] C0103: 299 PcapFilter.testUnicastIPv6: Invalid method name "testUnicastIPv6"[invalid-name] C0111: 299 PcapFilter.testUnicastIPv6: Missing method docstring[missing-docstring] C0103: 311 PcapFilter.testHbh: Invalid method name "testHbh"[invalid-name] ************* Module tests.lib.policy_simple_test C0111: 1 : Missing module docstring[missing-docstring] C0111: 21 FieldTest: Missing class docstring[missing-docstring] C0103: 26 FieldTest.testAppendAppends: Invalid method name "testAppendAppends"[invalid-name] C0103: 31 FieldTest.testStr: Invalid method name "testStr"[invalid-name] C0103: 35 FieldTest.testStrIndents: Invalid method name "testStrIndents"[invalid-name] C0103: 39 FieldTest.testIntegerField: Invalid method name "testIntegerField"[invalid-name] C0103: 46 FieldTest.testNamingFieldRejectsBad: Invalid method name "testNamingFieldRejectsBad"[invalid-name] C0103: 55 FieldTest.testNamingFieldAcceptsGood: Invalid method name "testNamingFieldAcceptsGood"[invalid-name] C0103: 68 FieldTest.testNamingFieldAppendRejectsBad: Invalid method name "testNamingFieldAppendRejectsBad"[invalid-name] C0103: 78 FieldTest.testNamingFieldAppendAcceptsGood: Invalid method name "testNamingFieldAppendAcceptsGood"[invalid-name] C0103: 92 FieldTest.testNamingFieldDedupes: Invalid method name "testNamingFieldDedupes"[invalid-name] C0103: 98 FieldTest.testNamingFieldStr: Invalid method name "testNamingFieldStr"[invalid-name] C0111: 105 BlockTest: Missing class docstring[missing-docstring] C0103: 110 BlockTest.testRejectsNonField: Invalid method name "testRejectsNonField"[invalid-name] C0103: 116 BlockTest.testFieldsWithType: Invalid method name "testFieldsWithType"[invalid-name] C0111: 116 BlockTest.testFieldsWithType: Missing method docstring[missing-docstring] C0103: 128 BlockTest.testIter: Invalid method name "testIter"[invalid-name] C0111: 138 PolicyTest: Missing class docstring[missing-docstring] C0103: 143 PolicyTest.testAddMember: Invalid method name "testAddMember"[invalid-name] C0111: 143 PolicyTest.testAddMember: Missing method docstring[missing-docstring] C0103: 160 PolicyTest.testIter: Invalid method name "testIter"[invalid-name] C0111: 170 PolicyParserTest: Missing class docstring[missing-docstring] C0103: 178 PolicyParserTest.testParseCommentLine: Invalid method name "testParseCommentLine"[invalid-name] C0103: 185 PolicyParserTest.testParseBlankLine: Invalid method name "testParseBlankLine"[invalid-name] C0103: 192 PolicyParserTest.testParseInclude: Invalid method name "testParseInclude"[invalid-name] C0103: 199 PolicyParserTest.testParseHeader: Invalid method name "testParseHeader"[invalid-name] C0103: 207 PolicyParserTest.testParseTerm: Invalid method name "testParseTerm"[invalid-name] C0103: 215 PolicyParserTest.testParseTermBadField: Invalid method name "testParseTermBadField"[invalid-name] C0103: 219 PolicyParserTest.testUnfinishedBlock: Invalid method name "testUnfinishedBlock"[invalid-name] C0411: 18 : standard import "import logging" comes before "from lib import policy_simple"[wrong-import-order] ************* Module tests.lib.policy_test C0111: 449 PolicyTest: Missing class docstring[missing-docstring] C0103: 455 PolicyTest.testIncludes: Invalid method name "testIncludes"[invalid-name] C0103: 476 PolicyTest.testGoodPol: Invalid method name "testGoodPol"[invalid-name] C0111: 476 PolicyTest.testGoodPol: Missing method docstring[missing-docstring] C0103: 495 PolicyTest.testBadPol: Invalid method name "testBadPol"[invalid-name] C0103: 499 PolicyTest.testMissingHeader: Invalid method name "testMissingHeader"[invalid-name] C0103: 503 PolicyTest.testService: Invalid method name "testService"[invalid-name] C0111: 503 PolicyTest.testService: Missing method docstring[missing-docstring] C0103: 518 PolicyTest.testInvalidKeyword: Invalid method name "testInvalidKeyword"[invalid-name] C0103: 522 PolicyTest.testNumericProtocol: Invalid method name "testNumericProtocol"[invalid-name] C0103: 529 PolicyTest.testHopLimitSingle: Invalid method name "testHopLimitSingle"[invalid-name] C0103: 536 PolicyTest.testHopLimitRange: Invalid method name "testHopLimitRange"[invalid-name] C0103: 543 PolicyTest.testBadPortProtocols: Invalid method name "testBadPortProtocols"[invalid-name] C0103: 549 PolicyTest.testBadPortProtocols2: Invalid method name "testBadPortProtocols2"[invalid-name] C0103: 554 PolicyTest.testMinimumTerm: Invalid method name "testMinimumTerm"[invalid-name] C0103: 562 PolicyTest.testPortCollapsing: Invalid method name "testPortCollapsing"[invalid-name] C0111: 562 PolicyTest.testPortCollapsing: Missing method docstring[missing-docstring] C0103: 576 PolicyTest.testPortCollapsing2: Invalid method name "testPortCollapsing2"[invalid-name] C0103: 589 PolicyTest.testMinimumTerm2: Invalid method name "testMinimumTerm2"[invalid-name] C0103: 597 PolicyTest.testTermEquality: Invalid method name "testTermEquality"[invalid-name] C0111: 597 PolicyTest.testTermEquality: Missing method docstring[missing-docstring] C0103: 637 PolicyTest.testIpAndPortContains: Invalid method name "testIpAndPortContains"[invalid-name] C0111: 637 PolicyTest.testIpAndPortContains: Missing method docstring[missing-docstring] C0103: 660 PolicyTest.testEmptyIpContains: Invalid method name "testEmptyIpContains"[invalid-name] C0111: 660 PolicyTest.testEmptyIpContains: Missing method docstring[missing-docstring] C0103: 683 PolicyTest.testIpExcludeContains: Invalid method name "testIpExcludeContains"[invalid-name] C0111: 683 PolicyTest.testIpExcludeContains: Missing method docstring[missing-docstring] C0103: 705 PolicyTest.testIpDualExcludeContains: Invalid method name "testIpDualExcludeContains"[invalid-name] C0111: 705 PolicyTest.testIpDualExcludeContains: Missing method docstring[missing-docstring] C0103: 729 PolicyTest.testOptionsContains: Invalid method name "testOptionsContains"[invalid-name] C0111: 729 PolicyTest.testOptionsContains: Missing method docstring[missing-docstring] C0103: 749 PolicyTest.testPrecedenceContains: Invalid method name "testPrecedenceContains"[invalid-name] C0103: 761 PolicyTest.testProtocolExceptContains: Invalid method name "testProtocolExceptContains"[invalid-name] C0103: 770 PolicyTest.testGoodDestAddrExcludes: Invalid method name "testGoodDestAddrExcludes"[invalid-name] C0111: 770 PolicyTest.testGoodDestAddrExcludes: Missing method docstring[missing-docstring] C0103: 785 PolicyTest.testGoodSrcAddrExcludes: Invalid method name "testGoodSrcAddrExcludes"[invalid-name] C0111: 785 PolicyTest.testGoodSrcAddrExcludes: Missing method docstring[missing-docstring] C0103: 800 PolicyTest.testGoodAddrExcludes: Invalid method name "testGoodAddrExcludes"[invalid-name] C0111: 800 PolicyTest.testGoodAddrExcludes: Missing method docstring[missing-docstring] C0103: 815 PolicyTest.testGoodAddrExcludesFlatten: Invalid method name "testGoodAddrExcludesFlatten"[invalid-name] C0111: 815 PolicyTest.testGoodAddrExcludesFlatten: Missing method docstring[missing-docstring] C0103: 842 PolicyTest.testGoodAddrExcludesFlattenMultiple: Invalid method name "testGoodAddrExcludesFlattenMultiple"[invalid-name] C0111: 842 PolicyTest.testGoodAddrExcludesFlattenMultiple: Missing method docstring[missing-docstring] C0103: 862 PolicyTest.testGoodAddrExcludesFlattenAll: Invalid method name "testGoodAddrExcludesFlattenAll"[invalid-name] C0111: 862 PolicyTest.testGoodAddrExcludesFlattenAll: Missing method docstring[missing-docstring] C0103: 879 PolicyTest.testLogging: Invalid method name "testLogging"[invalid-name] C0103: 886 PolicyTest.testBadLogging: Invalid method name "testBadLogging"[invalid-name] C0103: 891 PolicyTest.testBadAction: Invalid method name "testBadAction"[invalid-name] C0103: 896 PolicyTest.testMultifilter: Invalid method name "testMultifilter"[invalid-name] C0103: 901 PolicyTest.testBadMultifilter: Invalid method name "testBadMultifilter"[invalid-name] C0103: 906 PolicyTest.testICMPTypes: Invalid method name "testICMPTypes"[invalid-name] C0103: 913 PolicyTest.testBadICMPTypes: Invalid method name "testBadICMPTypes"[invalid-name] C0103: 918 PolicyTest.testReservedWordTermName: Invalid method name "testReservedWordTermName"[invalid-name] C0103: 926 PolicyTest.testMultiPortLines: Invalid method name "testMultiPortLines"[invalid-name] C0103: 939 PolicyTest.testErrorLineNumber: Invalid method name "testErrorLineNumber"[invalid-name] C0103: 945 PolicyTest.testPrefixList: Invalid method name "testPrefixList"[invalid-name] C0111: 945 PolicyTest.testPrefixList: Missing method docstring[missing-docstring] C0103: 962 PolicyTest.testEtherTypes: Invalid method name "testEtherTypes"[invalid-name] C0103: 971 PolicyTest.testTrafficTypes: Invalid method name "testTrafficTypes"[invalid-name] C0103: 980 PolicyTest.testBadProtocolEtherTypes: Invalid method name "testBadProtocolEtherTypes"[invalid-name] C0103: 985 PolicyTest.testVerbatimTerm: Invalid method name "testVerbatimTerm"[invalid-name] C0103: 993 PolicyTest.testVerbatimMixed: Invalid method name "testVerbatimMixed"[invalid-name] C0103: 997 PolicyTest.testIntegerFilterName: Invalid method name "testIntegerFilterName"[invalid-name] C0103:1002 PolicyTest.testPrecedence: Invalid method name "testPrecedence"[invalid-name] C0103:1009 PolicyTest.testLossPriority: Invalid method name "testLossPriority"[invalid-name] C0103:1019 PolicyTest.testRoutingInstance: Invalid method name "testRoutingInstance"[invalid-name] C0103:1029 PolicyTest.testSourceInterface: Invalid method name "testSourceInterface"[invalid-name] C0103:1040 PolicyTest.testShadingDetection: Invalid method name "testShadingDetection"[invalid-name] C0111:1040 PolicyTest.testShadingDetection: Missing method docstring[missing-docstring] C0103:1055 PolicyTest.testVpnConfigWithoutPairPolicy: Invalid method name "testVpnConfigWithoutPairPolicy"[invalid-name] C0103:1061 PolicyTest.testVpnConfigWithPairPolicy: Invalid method name "testVpnConfigWithPairPolicy"[invalid-name] C0103:1067 PolicyTest.testForwardingClassPolicy: Invalid method name "testForwardingClassPolicy"[invalid-name] C0103:1071 PolicyTest.testForwardingClassEqual: Invalid method name "testForwardingClassEqual"[invalid-name] C0103:1079 PolicyTest.testTagSupportAndNetworkHeaderParsing: Invalid method name "testTagSupportAndNetworkHeaderParsing"[invalid-name] C0103:1088 PolicyTest.testEq: Invalid method name "testEq"[invalid-name] C0103:1097 PolicyTest.testNextIP: Invalid method name "testNextIP"[invalid-name] C0103:1110 PolicyTest.testStr: Invalid method name "testStr"[invalid-name] C0411: 25 : standard import "import logging" comes before "from lib import nacaddr"[wrong-import-order] ************* Module tests.lib.speedway_test C0305: 70 : Trailing newlines[trailing-newlines] C0103: 49 SpeedwayTest.testSpeedwayOutputFormat: Invalid method name "testSpeedwayOutputFormat"[invalid-name] C0111: 49 SpeedwayTest.testSpeedwayOutputFormat: Missing method docstring[missing-docstring] C0122: 54 SpeedwayTest.testSpeedwayOutputFormat: Comparison should be result[0] == '*filter'[misplaced-comparison-constant] C0122: 64 SpeedwayTest.testSpeedwayOutputFormat: Comparison should be result[(len(result)) - (2)] == 'COMMIT'[misplaced-comparison-constant] ************* Module tests.lib.srxlo_test C0111: 50 SRXloTest: Missing class docstring[missing-docstring] C0103: 55 SRXloTest.testIcmpv6: Invalid method name "testIcmpv6"[invalid-name] C0103: 61 SRXloTest.testIcmpv6Type: Invalid method name "testIcmpv6Type"[invalid-name] ************* Module tests.lib.summarizer_test C0305: 151 : Trailing newlines[trailing-newlines] C0111: 30 SummarizerTest: Missing class docstring[missing-docstring] C0103: 43 SummarizerTest.testToDottedQuad: Invalid method name "testToDottedQuad"[invalid-name] C0111: 43 SummarizerTest.testToDottedQuad: Missing method docstring[missing-docstring] C0103: 67 SummarizerTest.testInt32ToDottedQuad: Invalid method name "testInt32ToDottedQuad"[invalid-name] W0212: 68 SummarizerTest.testInt32ToDottedQuad: Access to a protected member _Int32ToDottedQuad of a client class[protected-access] C0103: 71 SummarizerTest.testSummarizeEmptyList: Invalid method name "testSummarizeEmptyList"[invalid-name] C0103: 76 SummarizerTest.testSummarizeNoNetworks: Invalid method name "testSummarizeNoNetworks"[invalid-name] C0103: 86 SummarizerTest.testSummarizeSomeNetworks: Invalid method name "testSummarizeSomeNetworks"[invalid-name] C0111: 86 SummarizerTest.testSummarizeSomeNetworks: Missing method docstring[missing-docstring] C0103: 107 SummarizerTest.testSummarizeAllNetworks: Invalid method name "testSummarizeAllNetworks"[invalid-name] C0103: 117 SummarizerTest.testSummarizeToAllSpace: Invalid method name "testSummarizeToAllSpace"[invalid-name] C0103: 126 SummarizerTest.testIpaddrToTuple: Invalid method name "testIpaddrToTuple"[invalid-name] W0212: 128 SummarizerTest.testIpaddrToTuple: Access to a protected member _IpaddrToTuple of a client class[protected-access] C0103: 130 SummarizerTest.testToPrettyBinaryFormat: Invalid method name "testToPrettyBinaryFormat"[invalid-name] C0111: 130 SummarizerTest.testToPrettyBinaryFormat: Missing method docstring[missing-docstring] W0212: 132 SummarizerTest.testToPrettyBinaryFormat: Access to a protected member _ToPrettyBinaryFormat of a client class[protected-access] W0212: 135 SummarizerTest.testToPrettyBinaryFormat: Access to a protected member _ToPrettyBinaryFormat of a client class[protected-access] W0212: 138 SummarizerTest.testToPrettyBinaryFormat: Access to a protected member _ToPrettyBinaryFormat of a client class[protected-access] W0212: 142 SummarizerTest.testToPrettyBinaryFormat: Access to a protected member _ToPrettyBinaryFormat of a client class[protected-access] C0411: 27 : standard import "import logging" comes before "from lib import summarizer"[wrong-import-order] ************* Module tests.lib.windows_advfirewall_test C0111: 156 WindowsAdvFirewallTest: Missing class docstring[missing-docstring] C0103: 168 WindowsAdvFirewallTest.testTcp: Invalid method name "testTcp"[invalid-name] C0111: 168 WindowsAdvFirewallTest.testTcp: Missing method docstring[missing-docstring] C0103: 184 WindowsAdvFirewallTest.testIcmp: Invalid method name "testIcmp"[invalid-name] C0103: 194 WindowsAdvFirewallTest.testIcmpTypes: Invalid method name "testIcmpTypes"[invalid-name] C0103: 208 WindowsAdvFirewallTest.testBadIcmp: Invalid method name "testBadIcmp"[invalid-name] C0103: 215 WindowsAdvFirewallTest.testExpiredTerm: Invalid method name "testExpiredTerm"[invalid-name] C0103: 225 WindowsAdvFirewallTest.testExpiringTerm: Invalid method name "testExpiringTerm"[invalid-name] C0103: 236 WindowsAdvFirewallTest.testMultiprotocol: Invalid method name "testMultiprotocol"[invalid-name] C0103: 250 WindowsAdvFirewallTest.testDirectionIn: Invalid method name "testDirectionIn"[invalid-name] C0111: 250 WindowsAdvFirewallTest.testDirectionIn: Missing method docstring[missing-docstring] ************* Module tests.lib.windows_ipsec_test C0111: 84 WindowsIPSecTest: Missing class docstring[missing-docstring] C0111: 97 WindowsIPSecTest.testPolicy: Missing method docstring[missing-docstring] C0111: 112 WindowsIPSecTest.testTcp: Missing method docstring[missing-docstring] capirca-2.0.9/tools/get-country-zones.pl000066400000000000000000000030661437377527500202660ustar00rootroot00000000000000#!/usr/bin/perl # # Author: Paul Armstrong # # Downloads maps of countries to CIDR netblocks for the world and then turns # them into definition files usable by Capirca use strict; use warnings; use File::Find; my @files; my $destination = '../def/'; my $extension = '.net'; system("wget http://www.ipdeny.com/ipblocks/data/countries/all-zones.tar.gz") == 0 or die "Unable to get all-zones.tar.gz: $?\n"; system("tar -zxf all-zones.tar.gz") == 0 or die "Unable to untar all-zones.tar.gz: $?\n"; # We don't need these lying around unlink("Copyrights.txt"); unlink("MD5SUM"); unlink("all-zones.tar.gz"); sub zone_files { push @files, $File::Find::name if(/\.zone$/i); } find(\&zone_files, $ENV{PWD}); for my $file (@files) { if($file =~ /^.*\/([a-z]{2})\.zone/) { my $country = $1; my $new_name = "$destination$country$extension"; my $country_uc = uc($country); die "$file is zero bytes\n" if(!-s $file); open(OLDFILE, $file) or die "Unable to open $file: $!\n"; open(NEWFILE, ">$new_name") or die "Unable to open $new_name: $!\n"; while() { chomp; if ($. == 1) { print NEWFILE "${country_uc}_NETBLOCKS = $_\n" or die "Unable to print to $new_name: $!\n"; } else { print NEWFILE " $_\n" or die "Unable to print to $new_name: $!\n"; } } close(NEWFILE) or die "$new_name didn't close properly: $!\n"; close(OLDFILE); die "$new_name is zero bytes\n" if(!-s $new_name); unlink($file); # clean up the originals. } } capirca-2.0.9/tools/iputilstools.py000066400000000000000000000035761437377527500174470ustar00rootroot00000000000000import random import ipaddress import itertools as it def write_excludes_testcase(ipstr, excludelist='', max_prefix_range=8, max_random_subnets=30): """ Writes a testcase to the tests/utils/address_exclude_test_cases.txt file. Note that the number of prefixes to consider grows exponentially, so unless you *do* want to consider a large pool to randomly select from, keep it at the default Args: ipstr: the ip network as a string (v4 or v6) to base the test on. excludelist: optional comma-separated string of ip networks to exclude max_prefix_range: the largest number of prefixes to consider. max_random_subnets: the number of subnets to do exclusion tests for, if randomly generating Returns: None """ ip = ipaddress.ip_network(ipstr) if len(excludelist) == 0: # empty excludelist, making a random one prefixrange = min(max_prefix_range, ip.max_prefixlen - ip.prefixlen) excludelist = it.chain.from_iterable(ip.subnets(i) for i in range(1, prefixrange+1)) total_ips = 2**prefixrange ip_positions = set( random.choices( range(total_ips), k=min( max_random_subnets, total_ips ) ) ) compress_map = (1 if i in ip_positions else 0 for i in range(total_ips)) excludelist = list(it.compress(excludelist, compress_map)) else: excludelist = list(map(ipaddress.ip_network, excludelist.split(','))) result_list = [] for address in excludelist: result_list.append(ip.address_exclude(address)) ipst = str(ip) exst = ",".join(map(str, excludelist)) rest = ";".join(",".join(map(str, sorted(result))) for result in result_list) with open('tests/utils/address_exclude_test_cases.txt', 'a') as f: f.write("%s %s %s\n" % (ipst, exst, rest)) capirca-2.0.9/tools/pylintrc000066400000000000000000000160401437377527500161000ustar00rootroot00000000000000# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. [REPORTS] # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells whether to display a full report or only the messages. reports=no # Disable the report(s) with the given id(s). disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923 [MESSAGES CONTROL] # List of checkers and warnings to enable. enable=old-raise-syntax # List of checkers and warnings to disable. disable=abstract-method,access-member-before-definition,arguments-differ,attribute-defined-outside-init,bad-option-value,design,file-ignored,fixme,global-statement,import-error,interface-is-not-class,locally-disabled,locally-enabled,maybe-no-member,method-hidden,missing-interface-method,no-init,no-member,no-name-in-module,no-self-use,pointless-except,relative-import,signature-differs,similarities,star-args,suppressed-message,unresolved-interface,unused-wildcard-import,useless-suppression confidence=HIGH,INFERENCE,UNDEFINED [BASIC] # Regular expression which should only match the name # of functions or classes which do not require a docstring. no-docstring-rgx=(__.*__|main) # Min length in lines of a function that requires a docstring. docstring-min-length=10 # Regular expression which should only match correct module names. The # leading underscore is sanctioned for private modules by Google's style # guide. # # There are exceptions to the basic rule (_?[a-z][a-z0-9_]*) to cover # requirements of Python's module system and of the presubmit framework. module-rgx=^(_?[a-z][a-z0-9_]*)|__init__|PRESUBMIT|PRESUBMIT_unittest$ # Regular expression which should only match correct module level names const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ # Regular expression which should only match correct class attribute class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ # Regular expression which should only match correct class names class-rgx=^_?[A-Z][a-zA-Z0-9]*$ # Regular expression which should only match correct function names. # 'camel_case' and 'snake_case' group names are used for consistency of naming # styles across functions and methods. function-rgx=^(?:(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ # Regular expression which should only match correct method names. # 'camel_case' and 'snake_case' group names are used for consistency of naming # styles across functions and methods. 'exempt' indicates a name which is # consistent with all naming styles. method-rgx=^(?:(?P__[a-z0-9_]+__|next)|(?P_{0,2}[A-Z][a-zA-Z0-9]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ # Regular expression which should only match correct instance attribute names attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ # Regular expression which should only match correct argument names argument-rgx=^[a-z][a-z0-9_]*$ # Regular expression which should only match correct variable names variable-rgx=^[a-z][a-z0-9_]*$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=^[a-z][a-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=main,_ # Bad variable names which should always be refused, separated by a comma bad-names= # List of builtins function names that should not be used, separated by a comma # # TODO(robankeny) is this a valid function to leave in or remove? bad-functions=input,apply,reduce # List of decorators that define properties, such as abc.abstractproperty. property-classes=abc.abstractproperty [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of decorators that create context managers from functions, such as # contextlib.contextmanager. contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching names used for dummy variables (i.e. not used). dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls [EXCEPTIONS] overgeneral-exceptions=StandardError,Exception,BaseException [IMPORTS] # Deprecated modules which should not be used, separated by a comma. # Many of the functions in the string module are deprecated in # favor of string methods, but there are non-deprecated things in that module # too. deprecated-modules=regsub,TERMIOS,Bastion,rexec [FORMAT] # Maximum number of characters on a single line. max-line-length=80 # Regexp for a line that is allowed to be longer than the limit. # This "ignore" regex is today composed of 4 independent parts: # (1) Long import lines # (2) p4 expansion $Id$ lines # (3) URLs in comments or pydocs. Detecting URLs by regex is a hard problem and # no amount of tweaking will make a perfect regex AFAICT. This one is a good # compromise. # (4) Constant string literals at the start of files don't need to be broken # across lines. Allowing long paths, streamz and urls to be on a single # line. Also requires that the string not be a triplequoted string. ignore-long-lines=(?x) (^\s*(import|from)\s |\$Id:\s\/\/depot\/.+#\d+\s\$ |^\s*(\#\ )??$ |^[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*("[^"]\S+"|'[^']\S+') ) # Maximum number of lines in a module max-module-lines=99999 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Do not warn about multiple statements on a single line for constructs like # if test: stmt single-line-if-stmt=y # Make sure : in dicts and trailing commas are checked for whitespace. no-space-check= [LOGGING] logging-modules=logging [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes= capirca-2.0.9/tools/run_lint.sh000077500000000000000000000030241437377527500165000ustar00rootroot00000000000000#!/bin/bash # Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. find . -name "*.py" | sort |\ xargs pylint --rcfile tools/pylintrc --msg-template='{msg_id}:{line:4} {obj}: {msg}[{symbol}]' > ./tools/new_lint_errors.txt # Cannot disable the following errors, seems this is a known issue from searching online. sed -i ':a;N;$!ba;s/R0801.*duplicate-code]//g' ./tools/new_lint_errors.txt sed -i 's/R0904.*too-many-public-methods]//g' ./tools/new_lint_errors.txt sed -i 's/R0912.*too-many-branches]//g' ./tools/new_lint_errors.txt sed -i 's/R0914.*too-many-locals]//g' ./tools/new_lint_errors.txt sed -i 's/R0915.*too-many-statements]//g' ./tools/new_lint_errors.txt sed -i '/^\s*$/d' ./tools/new_lint_errors.txt new_diff=$(diff -u tools/current_lint_errors.txt tools/new_lint_errors.txt | grep -E "^\+[^+]") if [ "$new_diff" == "" ] then echo "[OK] The codebase passes the linter tests!"; else echo "[ERROR] There are additional new lint errors present in your changes." echo "$new_diff" exit 1 fi