pax_global_header00006660000000000000000000000064146450152210014512gustar00rootroot0000000000000052 comment=73a7248786b3fca2c60db86298a2434822f19d3b golang-github-ovn-org-libovsdb-0.7.0/000077500000000000000000000000001464501522100174345ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/.github/000077500000000000000000000000001464501522100207745ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/.github/workflows/000077500000000000000000000000001464501522100230315ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/.github/workflows/ci.yml000066400000000000000000000041741464501522100241550ustar00rootroot00000000000000name: libovsdb-ci on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: name: Build & Unit Test runs-on: ubuntu-latest steps: - name: Set up Go 1.18 uses: actions/setup-go@v2 with: go-version: 1.18 id: go - name: Install benchstat run: go install golang.org/x/perf/cmd/benchstat@latest - name: Check out code into the Go module directory uses: actions/checkout@v2 - name: Generate Code run: make prebuild # This cannot be run in parallel because we require running go-generate first # We need to skip the go installation and caching to avoid "File exists" errors in tbe logs - name: Lint run: make lint - name: Build run: make build - name: Test run: make test - name: Test run: make integration-test - name: Generate coverage run: make coverage - name: Upload test coverage uses: shogo82148/actions-goveralls@v1 with: path-to-profile: profile.cov - name: Benchmark run: make bench - name: Restore Latest Main Benchmark id: old-benchmark uses: actions/cache@v2 with: path: bench-main.out key: benchmark-main-${{ hashfiles('**/*.go') }} restore-keys: | benchmark-main- - name: Compare Benchmarks if: hashFiles('bench-main.out') != '' run: benchstat bench-main.out bench.out - name: Create New Main Benchmark On Cache Miss if: steps.old-benchmark.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' run: cp -f bench.out bench-main.out test: name: Integration Test needs: build strategy: matrix: ovs_version: - 2.15.0 - 2.14.0 - 2.13.0 runs-on: ubuntu-latest steps: - name: Set up Go 1.18 uses: actions/setup-go@v1 with: go-version: 1.18 id: go - name: Check out code into the Go module directory uses: actions/checkout@v2 - name: Integration Test run: make integration-test env: OVS_IMAGE_TAG: ${{ matrix.ovs_version }} golang-github-ovn-org-libovsdb-0.7.0/.github/workflows/codeql-analysis.yml000066400000000000000000000046071464501522100266530ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] schedule: - cron: '28 1 * * 3' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'go' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - name: Checkout repository uses: actions/checkout@v2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1 golang-github-ovn-org-libovsdb-0.7.0/.github/workflows/images.yml000066400000000000000000000031601464501522100250210ustar00rootroot00000000000000name: libovsdb-images on: push: branches: [ main ] schedule: # run weekly to ensure our copy of ovs is up-to-date - cron: '42 0 * * 0' jobs: build: name: Build if: github.repository == 'ovn-org/libovsdb' runs-on: ubuntu-latest strategy: matrix: image: - ovs_version: master tag: latest - ovs_version: v2.15.0 tag: 2.15.0 - ovs_version: v2.14.0 tag: 2.14.0 - ovs_version: v2.13.0 tag: 2.13.0 steps: - name: Check Out Repo uses: actions/checkout@v2 - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v1 - name: Cache Docker layers uses: actions/cache@v2 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- - name: Login to Docker Hub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - name: Build and push id: docker_build uses: docker/build-push-action@v2 with: context: ovs builder: ${{ steps.buildx.outputs.name }} push: true build-args: OVS_VERSION=${{ matrix.image.ovs_version }} tags: libovsdb/ovs:${{ matrix.image.tag }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }}golang-github-ovn-org-libovsdb-0.7.0/.github/workflows/release.yml000066400000000000000000000011651464501522100251770ustar00rootroot00000000000000name: 'libovsdb-release' on: push: tags: - '*' jobs: release: if: startsWith(github.ref, 'refs/tags/') runs-on: ubuntu-latest steps: - name: Build Changelog id: github_release uses: mikepenz/release-changelog-builder-action@v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Release uses: actions/create-release@v1 with: tag_name: ${{ github.ref }} release_name: ${{ github.ref }} body: ${{steps.github_release.outputs.changelog}} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}golang-github-ovn-org-libovsdb-0.7.0/.gitignore000066400000000000000000000014771464501522100214350ustar00rootroot00000000000000### https://raw.github.com/github/gitignore/master/Go.gitignore # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so *.swp # Folders _obj _test bin # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof ### https://raw.github.com/github/gitignore/master/Global/OSX.gitignore .DS_Store .AppleDouble .LSOverride # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear on external disk .Spotlight-V100 .Trashes # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk # Intellij .idea/ *.iml ### Project-Specific *.out *.cov example/vswitchd/ovs.ovsschema example/vswitchd/*.go !example/vswitchd/gen.gogolang-github-ovn-org-libovsdb-0.7.0/.golangci.yml000066400000000000000000000003741464501522100220240ustar00rootroot00000000000000linters: disable-all: true enable: - deadcode - errcheck - gocyclo - goimports - gosimple - govet - ineffassign - misspell - revive - staticcheck - structcheck - typecheck - unused - varcheck golang-github-ovn-org-libovsdb-0.7.0/HACKING.md000066400000000000000000000067131464501522100210310ustar00rootroot00000000000000HACKING ======= ## Getting Set Up Assuming you already have a Go environment set up. go get github.com/ovn-org/libovsdb cd $GOPATH/src/github.com/ovn-org/libovsdb You can use [`hub`](https://hub.github.com) to fork the repo hub fork ... or alternatively, fork ovn-org/libovsdb on GitHub and add your fork as a remote git remote add git@github.com:/libovsdb ## Hacking Pull a local branch before you start developing. Convention for branches is - `bug/1234` for a branch that addresses a specific bug - `feature/awesome` for a branch that implements an awesome feature If your work is a minor, you can call the branch whatever you like (within reason). ## Committing Before you submit code, you must agree to the [Developer Certificate of Origin](http://developercertificate.org) Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. To verify that you agree, you must sign-off your commits. git commit -s This adds the following to the bottom of you commit message Signed-off-by: John Doe The name and email address used in the sign off are taken from your `user.name` and `user.email` settings in `git`. You can change these globally or locally using `git config` or from your `~/.gitconfig` file ## Before Making a Pull Request # Run all the tests fig up -d make test-all # Make sure your code is pretty go fmt ## Make a Pull Request git push hub pull-request ... or if you still aren't using `hub` (which you should be by now) you can head over to [GitHub](http://github.com) and create a PR using the web interface ## Code Review Once your patch has been submitted it will be scrutinized by your peers. To make changes in response to comments... # Assuming you are already on the branch you raise the PR git push --force This will update the pull request, retrigger CI etc... ## Summary We hope you find this guide helpful and are looking forward to your pull requests! golang-github-ovn-org-libovsdb-0.7.0/LICENSE000066400000000000000000000260751464501522100204530ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-github-ovn-org-libovsdb-0.7.0/MAINTAINERS000066400000000000000000000022011464501522100211240ustar00rootroot00000000000000# libovsdb maintainers file # # This file describes who runs the ovn-org/libovsdb project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # [Org] [Org."Core maintainers"] people = [ "amorenoz", "dave-tucker", "hzhou8", "jcaamano" ] [Org.Alumni] people = [ "mavenugo", "shaleman" ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.amorenoz] Name = "Adrián Moreno" GitHub = amorenoz [people.dave-tucker] Name = "Dave Tucker" Email = "dave@dtucker.co.uk" GitHub = "dave-tucker" [people.hzhou8] Name = "Han Zhou" GitHub = "hzhou8" [people.jcaamano] Name = "Jaime Caamaño" GitHub = "jcaamano" [people.mavenugo] Name = "Madhu Venugopal" Email = "madhu@docker.com" GitHub = "mavenugo" [people.shaleman] Name = "Sukhesh Halemane" GitHub = "shaleman" golang-github-ovn-org-libovsdb-0.7.0/Makefile000066400000000000000000000027131464501522100210770ustar00rootroot00000000000000OVS_VERSION ?= v2.16.0 .PHONY: all all: lint build test integration-test coverage .PHONY: modelgen modelgen: @mkdir -p bin @go build -v -o ./bin ./cmd/modelgen .PHONY: prebuild prebuild: modelgen ovsdb/serverdb/_server.ovsschema example/vswitchd/ovs.ovsschema @echo "+ $@" @go generate -v ./... .PHONY: build build: prebuild @echo "+ $@" @go build -v ./... .PHONY: test test: prebuild @echo "+ $@" @go test -race -coverprofile=unit.cov -test.short -timeout 30s -v ./... .PHONY: integration-test integration-test: @echo "+ $@" @go test -race -coverprofile=integration.cov -coverpkg=github.com/ovn-org/libovsdb/... -timeout 60s -v ./test/ovs .PHONY: coverage coverage: test integration-test @sed -i '1d' integration.cov @cat unit.cov integration.cov > profile.cov .PHONY: bench bench: install-deps prebuild @echo "+ $@" @go test -run=XXX -count=3 -bench=. ./... | tee bench.out @benchstat bench.out .PHONY: install-deps install-deps: @echo "+ $@" @go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0 @golangci-lint --version @go install golang.org/x/perf/cmd/benchstat@latest .PHONY: lint lint: install-deps prebuild @echo "+ $@" @golangci-lint run ovsdb/serverdb/_server.ovsschema: @curl -sSL https://raw.githubusercontent.com/openvswitch/ovs/${OVS_VERSION}/ovsdb/_server.ovsschema -o $@ example/vswitchd/ovs.ovsschema: @curl -sSL https://raw.githubusercontent.com/openvswitch/ovs/${OVS_VERSION}/vswitchd/vswitch.ovsschema -o $@ golang-github-ovn-org-libovsdb-0.7.0/NOTICE000066400000000000000000000007561464501522100203500ustar00rootroot00000000000000libovsdb Copyright 2014-2015 Socketplane Inc. Copyright 2015-2018 Docker Inc. This software consists of voluntary contributions made by many individuals. For exact contribution history, see the commit history. Modifications Copyright 2018-2019 eBay Inc. This software contains modifications developed by eBay Inc. and voluntary contributions from other individuals in a fork maintained at https://github.com/eBay/libovsdb For details on these contributions, please consult the git history. golang-github-ovn-org-libovsdb-0.7.0/README.md000066400000000000000000000313121464501522100207130ustar00rootroot00000000000000libovsdb ======== [![libovsdb-ci](https://github.com/ovn-org/libovsdb/actions/workflows/ci.yml/badge.svg)](https://github.com/ovn-org/libovsdb/actions/workflows/ci.yml) [![Coverage Status](https://coveralls.io/repos/github/ovn-org/libovsdb/badge.svg?branch=main)](https://coveralls.io/github/ovn-org/libovsdb?branch=main) [![Go Report Card](https://goreportcard.com/badge/github.com/ovn-org/libovsdb)](https://goreportcard.com/report/github.com/ovn-org/libovsdb) An OVSDB Library written in Go ## What is OVSDB? OVSDB is the Open vSwitch Database Protocol. It's defined in [RFC 7047](http://tools.ietf.org/html/rfc7047) It's used mainly for managing the configuration of Open vSwitch and OVN, but it could also be used to manage your stamp collection. Philatelists Rejoice! ## Quick Overview The API to interact with OVSDB is based on tagged golang structs. We call it a Model. e.g: type MyLogicalSwitch struct { UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory Name string `ovsdb:"name"` Ports []string `ovsdb:"ports"` Config map[string]string `ovsdb:"other_config"` } libovsdb is able to translate a Model in to OVSDB format. To make the API use go idioms, the following mappings occur: 1. OVSDB Set with min 0 and max unlimited = Slice 1. OVSDB Set with min 0 and max 1 = Pointer to scalar type 1. OVSDB Set with min 0 and max N = Array of N 1. OVSDB Enum = Type-aliased Enum Type 1. OVSDB Map = Map 1. OVSDB Scalar Type = Equivalent scalar Go type A Open vSwitch Database is modeled using a ClientDBModel which is a created by assigning table names to pointers to these structs: dbModelReq, _ := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{ "Logical_Switch": &MyLogicalSwitch{}, }) Finally, a client object can be created: ovs, _ := client.Connect(context.Background(), dbModelReq, client.WithEndpoint("tcp:172.18.0.4:6641")) client.MonitorAll(nil) // Only needed if you want to use the built-in cache Once the client object is created, a generic API can be used to interact with the Database. Some API calls can be performed on the generic API: `List`, `Get`, `Create`. Others, have to be called on a `ConditionalAPI` (`Update`, `Delete`, `Mutate`). There are three ways to create a `ConditionalAPI`: **Where()**: `Where()` can be used to create a `ConditionalAPI` based on the index information that the provided Models contain. Example: ls := &LogicalSwitch{UUID: "foo"} ls2 := &LogicalSwitch{UUID: "foo2"} ops, _ := ovs.Where(ls, ls2).Delete() It will check the field corresponding to the `_uuid` column as well as all the other schema-defined or client-defined indexes in that order of priority. The first available index will be used to generate a condition. **WhereAny()**: `WhereAny()` can be used to create a `ConditionalAPI` using a list of Condition objects. Each condition object specifies a field using a pointer to a Model's field, a `ovsdb.ConditionFunction` and a value. The type of the value depends on the type of the field being mutated. Example: ls := &LogicalSwitch{} ops, _ := ovs.WhereAny(ls, client.Condition{ Field: &ls.Config, Function: ovsdb.ConditionIncludes, Value: map[string]string{"foo": "bar"}, }).Delete() The resulting `ConditionalAPI` will create one operation per condition, so all the rows that match *any* of the specified conditions will be affected. **WhereAll()**: `WhereAll()` behaves like `WhereAny()` but with *AND* semantics. The resulting `ConditionalAPI` will put all the conditions into a single operation. Therefore the operation will affect the rows that satisfy *all* the conditions. **WhereCache()**: `WhereCache()` uses a function callback to filter on the local cache. It's primary use is to perform cache operations such as `List()`. However, it can also be used to create server-side operations (such as `Delete()`, `Update()` or `Delete()`). If used this way, it will create an equality condition (using `ovsdb.ConditionEqual`) on the `_uuid` field for every matching row. Example: lsList := []LogicalSwitch{} ovs.WhereCache( func(ls *MyLogicalSwitch) bool { return strings.HasPrefix(ls.Name, "ext_") }).List(&lsList) The table is inferred from the type that the function accepts as only argument. ### Client indexes The client will track schema indexes and use them when appropriate in `Get`, `Where`, and `WhereAll` as explained above. Additional indexes can be specified for a client instance to track. Just as schema indexes, client indexes are specified in sets per table. where each set consists of the columns that compose the index. Unlike schema indexes, a key within a column can be addressed if the column type is a map. Client indexes are leveraged through `Where`, and `WhereAll`. Since client indexes value uniqueness is not enforced as it happens with schema indexes, conditions based on them can match multiple rows. Indexed based operations generally provide better performance than operations based on explicit conditions. As an example, where you would have: // slow predicate run on all the LB table rows... ovn.WhereCache(func (lb *LoadBalancer) bool { return lb.ExternalIds["myIdKey"] == "myIdValue" }).List(ctx, &results) can now be improved with: dbModel, err := nbdb.FullDatabaseModel() dbModel.SetIndexes(map[string][]model.ClientIndex{ "Load_Balancer": {{Columns: []model.ColumnKey{{Column: "external_ids", Key: "myIdKey"}}}}, }) // connect .... lb := &LoadBalancer{ ExternalIds: map[string]string{"myIdKey": "myIdValue"}, } // quick indexed result ovn.Where(lb).List(ctx, &results) ## Documentation This package is divided into several sub-packages. Documentation for each sub-package is available at [pkg.go.dev][doc]: * **client**: ovsdb client and API [![godoc for libovsdb/client][clientbadge]][clientdoc] * **mapper**: mapping from tagged structs to ovsdb types [![godoc for libovsdb/mapper][mapperbadge]][mapperdoc] * **model**: model and database model used for mapping [![godoc for libovsdb/model][modelbadge]][modeldoc] * **ovsdb**: low level OVS types [![godoc for libovsdb/ovsdb][ovsdbbadge]][ovsdbdoc] * **cache**: model-based cache [![godoc for libovsdb/cache][cachebadge]][cachedoc] * **modelgen**: common code-generator functions [![godoc for libovsdb/modelgen][genbadge]][gendoc] * **server**: ovsdb test server [![godoc for libovsdb/server][serverbadge]][serverdoc] * **database**: database related types, interfaces and implementations [![godoc for libovsdb/database][dbbadge]][dbdoc] * **updates**: common code to handle model updates [![godoc for libovsdb/updates][updatesbadge]][updatesdoc] [doc]: https://pkg.go.dev/ [clientbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/client [mapperbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/mapper [modelbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/model [ovsdbbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/ovsdb [cachebadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/cache [genbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/modelgen [serverbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/server [dbbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/database [updatesbadge]: https://pkg.go.dev/badge/github.com/ovn-org/libovsdb/server [clientdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/client [mapperdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/mapper [modeldoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/model [ovsdbdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/ovsdb [cachedoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/cache [gendoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/modelgen [serverdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/server [dbdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/database [updatesdoc]: https://pkg.go.dev/github.com/ovn-org/libovsdb/updates ## Quick API Examples List the content of the database: var lsList *[]MyLogicalSwitch ovs.List(lsList) for _, ls := range lsList { fmt.Printf("%+v\n", ls) } Search the cache for elements that match a certain predicate: var lsList *[]MyLogicalSwitch ovs.WhereCache( func(ls *MyLogicalSwitch) bool { return strings.HasPrefix(ls.Name, "ext_") }).List(&lsList) for _, ls := range lsList { fmt.Printf("%+v\n", ls) } Create a new element ops, _ := ovs.Create(&MyLogicalSwitch{ Name: "foo", }) ovs.Transact(ops...) Get an element: ls := &MyLogicalSwitch{Name: "foo"} // "name" is in the table index list ovs.Get(ls) And update it: ls.Config["foo"] = "bar" ops, _ := ovs.Where(ls).Update(&ls) ovs.Transact(ops...) Or mutate an it: ops, _ := ovs.Where(ls).Mutate(ls, ovs.Mutation { Field: &ls.Config, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"foo": "bar"}, }) ovs.Transact(ops...) Update, Mutate and Delete operations need a condition to be specified. Conditions can be created based on a Model's data: ls := &LogicalSwitch{UUID:"myUUID"} ops, _ := ovs.Where(ls).Delete() ovs.Transact(ops...) They can also be created based on a list of Conditions: ops, _ := ovs.Where(ls, client.Condition{ Field: &ls.Config, Function: ovsdb.ConditionIncludes, Value: map[string]string{"foo": "bar"}, }).Delete() ovs.Transact(ops...) ops, _ := ovs.WhereAll(ls, client.Condition{ Field: &ls.Config, Function: ovsdb.ConditionIncludes, Value: map[string]string{"foo": "bar"}, }, client.Condition{ Field: &ls.Config, Function: ovsdb.ConditionIncludes, Value: map[string]string{"bar": "baz"}, }).Delete() ovs.Transact(ops...) ## Monitor for updates You can also register a notification handler to get notified every time an element is added, deleted or updated from the database. handler := &cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { if table == "Logical_Switch" { fmt.Printf("A new switch named %s was added!!\n!", model.(*MyLogicalSwitch).Name) } }, } ovs.Cache.AddEventHandler(handler) ## modelgen In this repository there is also a code-generator capable of generating all the Model types for a given ovsdb schema (json) file. It can be used as follows: go install github.com/ovn-org/libovsdb/cmd/modelgen $GOPATH/bin/modelgen -p ${PACKAGE_NAME} -o {OUT_DIR} ${OVSDB_SCHEMA} Usage of modelgen: modelgen [flags] OVS_SCHEMA Flags: -d Dry run -o string Directory where the generated files shall be stored (default ".") -p string Package name (default "ovsmodel") The result will be the definition of a Model per table defined in the ovsdb schema file. Additionally, a function called `FullDatabaseModel()` that returns the `ClientDBModel` is created for convenience. Example: Download the schema: ovsdb-client get-schema "tcp:localhost:6641" > mypackage/ovs-nb.ovsschema Run `go generate` cat < mypackage/gen.go package mypackage // go:generate modelgen -p mypackage -o . ovs-nb.ovsschema EOF go generate ./... In your application, load the ClientDBModel, connect to the server and start interacting with the database: import ( "fmt" "github.com/ovn-org/libovsdb/client" generated "example.com/example/mypackage" ) func main() { dbModelReq, _ := generated.FullDatabaseModel() ovs, _ := client.Connect(context.Background(), dbModelReq, client.WithEndpoint("tcp:localhost:6641")) ovs.MonitorAll() // Create a *LogicalRouter, as a pointer to a Model is required by the API lr := &generated.LogicalRouter{ Name: "myRouter", } ovs.Get(lr) fmt.Printf("My Router has UUID: %s and %d Ports\n", lr.UUID, len(lr.Ports)) } ## Running the tests To run integration tests, you'll need access to docker to run an Open vSwitch container. Mac users can use [boot2docker](http://boot2docker.io) export DOCKER_IP=$(boot2docker ip) docker-compose run test /bin/sh # make test-local ... # exit docker-compose down By invoking the command **make**, you will automatically get the same behavior as what is shown above. In other words, it will start the two containers and execute **make test-local** from the test container. ## Contact The libovsdb community is part of ovn-org and can be contacted in the *#libovsdb* channel in [ovn-org Slack server](https://ovn-org.slack.com) golang-github-ovn-org-libovsdb-0.7.0/cache/000077500000000000000000000000001464501522100204775ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/cache/cache.go000066400000000000000000001106561464501522100221020ustar00rootroot00000000000000package cache import ( "bytes" "crypto/sha256" "encoding/gob" "encoding/hex" "fmt" "log" "os" "reflect" "sort" "strings" "sync" "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/updates" ) const ( updateEvent = "update" addEvent = "add" deleteEvent = "delete" bufferSize = 65536 columnDelimiter = "," keyDelimiter = "|" ) // ErrCacheInconsistent is an error that can occur when an operation // would cause the cache to be inconsistent type ErrCacheInconsistent struct { details string } // Error implements the error interface func (e *ErrCacheInconsistent) Error() string { msg := "cache inconsistent" if e.details != "" { msg += ": " + e.details } return msg } func NewErrCacheInconsistent(details string) *ErrCacheInconsistent { return &ErrCacheInconsistent{ details: details, } } // ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes type ErrIndexExists struct { Table string Value interface{} Index string New string Existing []string } func (e *ErrIndexExists) Error() string { return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value) } func NewIndexExistsError(table string, value interface{}, index string, new string, existing []string) *ErrIndexExists { return &ErrIndexExists{ table, value, index, new, existing, } } // map of unique values to uuids type valueToUUIDs map[interface{}]uuidset // map of column name(s) to unique values, to UUIDs type columnToValue map[index]valueToUUIDs // index is the type used to implement multiple cache indexes type index string // indexType is the type of index type indexType uint const ( schemaIndexType indexType = iota clientIndexType ) // indexSpec contains details about an index type indexSpec struct { index index columns []model.ColumnKey indexType indexType } func (s indexSpec) isClientIndex() bool { return s.indexType == clientIndexType } func (s indexSpec) isSchemaIndex() bool { return s.indexType == schemaIndexType } // newIndex builds a index from a list of columns func newIndexFromColumns(columns ...string) index { sort.Strings(columns) return index(strings.Join(columns, columnDelimiter)) } // newIndexFromColumnKeys builds a index from a list of column keys func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index { // RFC 7047 says that Indexes is a [] and "Each is a set of // columns whose values, taken together within any given row, must be // unique within the table". We'll store the column names, separated by comma // as we'll assume (RFC is not clear), that comma isn't valid in a columns := make([]string, 0, len(columnsKeys)) columnsMap := map[string]struct{}{} for _, columnKey := range columnsKeys { var column string if columnKey.Key != nil { column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key) } else { column = columnKey.Column } if _, found := columnsMap[column]; !found { columns = append(columns, column) columnsMap[column] = struct{}{} } } return newIndexFromColumns(columns...) } // newColumnKeysFromColumns builds a list of column keys from a list of columns func newColumnKeysFromColumns(columns ...string) []model.ColumnKey { columnKeys := make([]model.ColumnKey, len(columns)) for i, column := range columns { columnKeys[i] = model.ColumnKey{Column: column} } return columnKeys } // RowCache is a collections of Models hashed by UUID type RowCache struct { name string dbModel model.DatabaseModel dataType reflect.Type cache map[string]model.Model indexSpecs []indexSpec indexes columnToValue mutex sync.RWMutex } // rowByUUID returns one model from the cache by UUID. Caller must hold the row // cache lock. func (r *RowCache) rowByUUID(uuid string) model.Model { if row, ok := r.cache[uuid]; ok { return model.Clone(row) } return nil } // Row returns one model from the cache by UUID func (r *RowCache) Row(uuid string) model.Model { r.mutex.RLock() defer r.mutex.RUnlock() return r.rowByUUID(uuid) } func (r *RowCache) HasRow(uuid string) bool { r.mutex.RLock() defer r.mutex.RUnlock() _, found := r.cache[uuid] return found } // rowsByModels searches the cache to find all rows matching any of the provided // models, either by UUID or indexes. An error is returned if the model schema // has no UUID field, or if the provided models are not all the same type. func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) { r.mutex.RLock() defer r.mutex.RUnlock() results := make(map[string]model.Model, len(models)) for _, m := range models { if reflect.TypeOf(m) != r.dataType { return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType) } info, _ := r.dbModel.NewModelInfo(m) field, err := info.FieldByColumn("_uuid") if err != nil { return nil, err } if uuid := field.(string); uuid != "" { if _, ok := results[uuid]; !ok { if row := r.rowByUUID(uuid); row != nil { results[uuid] = row continue } } } // indexSpecs are ordered, schema indexes go first, then client indexes for _, indexSpec := range r.indexSpecs { if indexSpec.isClientIndex() && !useClientIndexes { // Given the ordered indexSpecs, we can break here if we reach the // first client index break } val, err := valueFromIndex(info, indexSpec.columns) if err != nil { continue } vals := r.indexes[indexSpec.index] if uuids, ok := vals[val]; ok { for uuid := range uuids { if _, ok := results[uuid]; !ok { results[uuid] = r.rowByUUID(uuid) } } // Break after handling the first found index // to ensure we preserve index order preference break } } } if len(results) == 0 { return nil, nil } return results, nil } // RowByModel searches the cache by UUID and schema indexes. UUID search is // performed first. Then schema indexes are evaluated in turn by the same order // with which they are defined in the schema. The model for the first matching // index is returned along with its UUID. An empty string and nil is returned if // no Model is found. func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) { models, err := r.rowsByModels([]model.Model{m}, false) if err != nil { return "", nil, err } for uuid, model := range models { return uuid, model, nil } return "", nil, nil } // RowsByModels searches the cache by UUID, schema indexes and client indexes. // UUID search is performed first. Schema indexes are evaluated next in turn by // the same order with which they are defined in the schema. Finally, client // indexes are evaluated in turn by the same order with which they are defined // in the client DB model. The models for the first matching index are returned, // which might be more than 1 if they were found through a client index since in // that case uniqueness is not enforced. Nil is returned if no Model is found. func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) { return r.rowsByModels(models, true) } // Create writes the provided content to the cache func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error { r.mutex.Lock() defer r.mutex.Unlock() if _, ok := r.cache[uuid]; ok { return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid)) } if reflect.TypeOf(m) != r.dataType { return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String()) } info, err := r.dbModel.NewModelInfo(m) if err != nil { return err } addIndexes := r.newIndexes() for _, indexSpec := range r.indexSpecs { index := indexSpec.index val, err := valueFromIndex(info, indexSpec.columns) if err != nil { return err } uuidset := newUUIDSet(uuid) vals := r.indexes[index] existing := vals[val] if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { return NewIndexExistsError(r.name, val, string(index), uuid, existing.list()) } addIndexes[index][val] = uuidset } // write indexes for _, indexSpec := range r.indexSpecs { index := indexSpec.index for k, v := range addIndexes[index] { if indexSpec.isSchemaIndex() { r.indexes[index][k] = v } else { r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) } } } r.cache[uuid] = model.Clone(m) return nil } // Update updates the content in the cache and returns the original (pre-update) model func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) { r.mutex.Lock() defer r.mutex.Unlock() if _, ok := r.cache[uuid]; !ok { return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid)) } oldRow := model.Clone(r.cache[uuid]) oldInfo, err := r.dbModel.NewModelInfo(oldRow) if err != nil { return nil, err } newInfo, err := r.dbModel.NewModelInfo(m) if err != nil { return nil, err } addIndexes := r.newIndexes() removeIndexes := r.newIndexes() var errs []error for _, indexSpec := range r.indexSpecs { index := indexSpec.index var err error oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) if err != nil { return nil, err } newVal, err := valueFromIndex(newInfo, indexSpec.columns) if err != nil { return nil, err } // if old and new values are the same, don't worry if oldVal == newVal { continue } // old and new values are NOT the same uuidset := newUUIDSet(uuid) // check that there are no conflicts vals := r.indexes[index] existing := vals[newVal] if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { errs = append(errs, NewIndexExistsError( r.name, newVal, string(index), uuid, existing.list(), )) } addIndexes[index][newVal] = uuidset removeIndexes[index][oldVal] = uuidset } if len(errs) > 0 { return nil, fmt.Errorf("%+v", errs) } // write indexes for _, indexSpec := range r.indexSpecs { index := indexSpec.index for k, v := range addIndexes[index] { if indexSpec.isSchemaIndex() { r.indexes[index][k] = v } else { r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) } } for k, v := range removeIndexes[index] { if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() { delete(r.indexes[index], k) } } } r.cache[uuid] = model.Clone(m) return oldRow, nil } // IndexExists checks if any of the schema indexes of the provided model is // already in the cache under a different UUID. func (r *RowCache) IndexExists(row model.Model) error { info, err := r.dbModel.NewModelInfo(row) if err != nil { return err } field, err := info.FieldByColumn("_uuid") if err != nil { return nil } uuid := field.(string) for _, indexSpec := range r.indexSpecs { if !indexSpec.isSchemaIndex() { // Given the ordered indexSpecs, we can break here if we reach the // first non schema index break } index := indexSpec.index val, err := valueFromIndex(info, indexSpec.columns) if err != nil { continue } vals := r.indexes[index] existing := vals[val] if !existing.empty() && !existing.equals(newUUIDSet(uuid)) { return NewIndexExistsError( r.name, val, string(index), uuid, existing.list(), ) } } return nil } // Delete deletes a row from the cache func (r *RowCache) Delete(uuid string) error { r.mutex.Lock() defer r.mutex.Unlock() if _, ok := r.cache[uuid]; !ok { return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid)) } oldRow := r.cache[uuid] oldInfo, err := r.dbModel.NewModelInfo(oldRow) if err != nil { return err } removeIndexes := r.newIndexes() for _, indexSpec := range r.indexSpecs { index := indexSpec.index oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) if err != nil { return err } removeIndexes[index][oldVal] = newUUIDSet(uuid) } // write indexes for _, indexSpec := range r.indexSpecs { index := indexSpec.index for k, v := range removeIndexes[index] { // only remove the index if it is pointing to this uuid // otherwise we can cause a consistency issue if we've processed // updates out of order if substractUUIDSet(r.indexes[index][k], v).empty() { delete(r.indexes[index], k) } } } delete(r.cache, uuid) return nil } // Rows returns a copy of all Rows in the Cache func (r *RowCache) Rows() map[string]model.Model { r.mutex.RLock() defer r.mutex.RUnlock() result := make(map[string]model.Model) for k, v := range r.cache { result[k] = model.Clone(v) } return result } // RowsShallow returns a clone'd list of f all Rows in the cache, but does not // clone the underlying objects. Therefore, the objects returned are READ ONLY. // This is, however, thread safe, as the cached objects are cloned before being updated // when modifications come in. func (r *RowCache) RowsShallow() map[string]model.Model { r.mutex.RLock() defer r.mutex.RUnlock() result := make(map[string]model.Model, len(r.cache)) for k, v := range r.cache { result[k] = v } return result } // uuidsByConditionsAsIndexes checks possible indexes that can be built with a // subset of the provided conditions and returns the uuids for the models that // match that subset of conditions. If no conditions could be used as indexes, // returns nil. Note that this method does not necessarily match all the // provided conditions. Thus the caller is required to evaluate all the // conditions against the returned candidates. This is only useful to obtain, as // quick as possible, via indexes, a reduced list of candidate models that might // match all conditions, which should be better than just evaluating all // conditions against all rows of a table. // //nolint:gocyclo // warns overall function is complex but ignores inner functions func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []interface{}) (uuidset, error) { type indexableCondition struct { column string keys []interface{} nativeValue interface{} } // build an indexable condition, more appropriate for our processing, from // an ovsdb condition. Only equality based conditions can be used as indexes // (or `includes` conditions on map values). toIndexableCondition := func(condition ovsdb.Condition, nativeValue interface{}) *indexableCondition { if condition.Column == "_uuid" { return nil } if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes { return nil } v := reflect.ValueOf(nativeValue) if !v.IsValid() { return nil } isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array if condition.Function == ovsdb.ConditionIncludes && isSet { return nil } keys := []interface{}{} if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes { for _, key := range v.MapKeys() { keys = append(keys, key.Interface()) } } return &indexableCondition{ column: condition.Column, keys: keys, nativeValue: nativeValue, } } // for any given set of conditions, we need to check if an index uses the // same fields as the conditions indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool { columnKeys := []model.ColumnKey{} for _, condition := range conditions { if len(condition.keys) == 0 { columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column}) continue } for _, key := range condition.keys { columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key}) } } index := newIndexFromColumnKeys(columnKeys...) return index == spec.index } // for a specific set of conditions, check if an index can be built from // them and return the associated UUIDs evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) { // build a model with the values from the conditions m, err := r.dbModel.NewModel(r.name) if err != nil { return nil, err } info, err := r.dbModel.NewModelInfo(m) if err != nil { return nil, err } for _, conditions := range conditions { err := info.SetField(conditions.column, conditions.nativeValue) if err != nil { return nil, err } } for _, spec := range r.indexSpecs { if !indexMatchesConditions(spec, conditions) { continue } // if we have an index for those conditions, calculate the index // value. The models mapped to that value match the conditions. v, err := valueFromIndex(info, spec.columns) if err != nil { return nil, err } if v != nil { uuids := r.indexes[spec.index][v] if uuids == nil { // this set of conditions was represented by an index but // had no matches, return an empty set uuids = uuidset{} } return uuids, nil } } return nil, nil } // set of uuids that match the conditions as we evaluate them var matching uuidset // attempt to evaluate a set of conditions via indexes and intersect the // results against matches of previous sets intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) { uuids, err := evaluateConditionSetAsIndex(indexableConditions) if err != nil { return true, err } if matching == nil { matching = uuids } else if uuids != nil { matching = intersectUUIDSets(matching, uuids) } if matching != nil && len(matching) <= 1 { // if we had no matches or a single match, no point in continuing // searching for additional indexes. If we had a single match, it's // cheaper to just evaluate all conditions on it. return true, nil } return false, nil } // First, filter out conditions that cannot be matched against indexes. With // the remaining conditions build all possible subsets (the power set of all // conditions) and for any subset that is an index, intersect the obtained // uuids with the ones obtained from previous subsets matchUUIDsFromConditionsPowerSet := func() error { ps := [][]*indexableCondition{} // prime the power set with a first empty subset ps = append(ps, []*indexableCondition{}) for i, condition := range conditions { nativeValue := nativeValues[i] iCondition := toIndexableCondition(condition, nativeValue) // this is not a condition we can use as an index, skip it if iCondition == nil { continue } // the power set is built appending the subsets that result from // adding each item to each of the previous subsets ss := make([][]*indexableCondition, len(ps)) for j := range ss { ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1) copy(ss[j], ps[j]) ss[j] = append(ss[j], iCondition) // as we add them to the power set, attempt to evaluate this // subset of conditions as indexes stop, err := intersectUUIDsFromConditionSet(ss[j]) if stop || err != nil { return err } } ps = append(ps, ss...) } return nil } // finally err := matchUUIDsFromConditionsPowerSet() return matching, err } // RowsByCondition searches models in the cache that match all conditions func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) { r.mutex.RLock() defer r.mutex.RUnlock() results := make(map[string]model.Model) schema := r.dbModel.Schema.Table(r.name) // no conditions matches all rows if len(conditions) == 0 { for uuid := range r.cache { results[uuid] = r.rowByUUID(uuid) } return results, nil } // one pass to obtain the native values nativeValues := make([]interface{}, 0, len(conditions)) for _, condition := range conditions { tSchema := schema.Column(condition.Column) nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value) if err != nil { return nil, err } nativeValues = append(nativeValues, nativeValue) } // obtain all possible matches using conditions as indexes matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues) if err != nil { return nil, err } // From the matches obtained with indexes, which might have not used all // conditions, continue trimming down the list explicitly evaluating the // conditions. for i, condition := range conditions { matchingCondition := uuidset{} if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) { uuid, ok := nativeValues[i].(string) if !ok { panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i])) } if _, found := r.cache[uuid]; found { matchingCondition.add(uuid) } } else { matchCondition := func(uuid string) error { row := r.cache[uuid] info, err := r.dbModel.NewModelInfo(row) if err != nil { return err } value, err := info.FieldByColumn(condition.Column) if err != nil { return err } ok, err := condition.Function.Evaluate(value, nativeValues[i]) if err != nil { return err } if ok { matchingCondition.add(uuid) } return nil } if matching != nil { // we just need to consider rows that matched previous // conditions for uuid := range matching { err = matchCondition(uuid) if err != nil { return nil, err } } } else { // If this is the first condition we are able to check, just run // it by whole table for uuid := range r.cache { err = matchCondition(uuid) if err != nil { return nil, err } } } } if matching == nil { matching = matchingCondition } else { matching = intersectUUIDSets(matching, matchingCondition) } if matching.empty() { // no models match the conditions checked up to now, no need to // check remaining conditions break } } for uuid := range matching { results[uuid] = r.rowByUUID(uuid) } return results, nil } // Len returns the length of the cache func (r *RowCache) Len() int { r.mutex.RLock() defer r.mutex.RUnlock() return len(r.cache) } func (r *RowCache) Index(columns ...string) (map[interface{}][]string, error) { r.mutex.RLock() defer r.mutex.RUnlock() spec := newIndexFromColumns(columns...) index, ok := r.indexes[spec] if !ok { return nil, fmt.Errorf("%v is not an index", columns) } dbIndex := make(map[interface{}][]string, len(index)) for k, v := range index { dbIndex[k] = v.list() } return dbIndex, nil } // EventHandler can handle events when the contents of the cache changes type EventHandler interface { OnAdd(table string, model model.Model) OnUpdate(table string, old model.Model, new model.Model) OnDelete(table string, model model.Model) } // EventHandlerFuncs is a wrapper for the EventHandler interface // It allows a caller to only implement the functions they need type EventHandlerFuncs struct { AddFunc func(table string, model model.Model) UpdateFunc func(table string, old model.Model, new model.Model) DeleteFunc func(table string, model model.Model) } // OnAdd calls AddFunc if it is not nil func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) { if e.AddFunc != nil { e.AddFunc(table, model) } } // OnUpdate calls UpdateFunc if it is not nil func (e *EventHandlerFuncs) OnUpdate(table string, old, new model.Model) { if e.UpdateFunc != nil { e.UpdateFunc(table, old, new) } } // OnDelete calls DeleteFunc if it is not nil func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) { if e.DeleteFunc != nil { e.DeleteFunc(table, row) } } // TableCache contains a collection of RowCaches, hashed by name, // and an array of EventHandlers that respond to cache updates // It implements the ovsdb.NotificationHandler interface so it may // handle update notifications type TableCache struct { cache map[string]*RowCache eventProcessor *eventProcessor dbModel model.DatabaseModel ovsdb.NotificationHandler mutex sync.RWMutex logger *logr.Logger } // Data is the type for data that can be prepopulated in the cache type Data map[string]map[string]model.Model // NewTableCache creates a new TableCache func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) { if !dbModel.Valid() { return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated") } if logger == nil { l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("cache") logger = &l } else { l := logger.WithName("cache") logger = &l } eventProcessor := newEventProcessor(bufferSize, logger) cache := make(map[string]*RowCache) tableTypes := dbModel.Types() for name := range dbModel.Schema.Tables { cache[name] = newRowCache(name, dbModel, tableTypes[name]) } for table, rowData := range data { if _, ok := dbModel.Schema.Tables[table]; !ok { return nil, fmt.Errorf("table %s is not in schema", table) } rowCache := cache[table] for uuid, row := range rowData { if err := rowCache.Create(uuid, row, true); err != nil { return nil, err } } } return &TableCache{ cache: cache, eventProcessor: eventProcessor, dbModel: dbModel, mutex: sync.RWMutex{}, logger: logger, }, nil } // Mapper returns the mapper func (t *TableCache) Mapper() mapper.Mapper { return t.dbModel.Mapper } // DatabaseModel returns the DatabaseModelRequest func (t *TableCache) DatabaseModel() model.DatabaseModel { return t.dbModel } // Table returns the a Table from the cache with a given name func (t *TableCache) Table(name string) *RowCache { t.mutex.RLock() defer t.mutex.RUnlock() if table, ok := t.cache[name]; ok { return table } return nil } // Tables returns a list of table names that are in the cache func (t *TableCache) Tables() []string { t.mutex.RLock() defer t.mutex.RUnlock() var result []string for k := range t.cache { result = append(result, k) } return result } // Update implements the update method of the NotificationHandler interface // this populates a channel with updates so they can be processed after the initial // state has been Populated func (t *TableCache) Update(context interface{}, tableUpdates ovsdb.TableUpdates) error { if len(tableUpdates) == 0 { return nil } if err := t.Populate(tableUpdates); err != nil { t.logger.Error(err, "during libovsdb cache populate") return err } return nil } // Update2 implements the update method of the NotificationHandler interface // this populates a channel with updates so they can be processed after the initial // state has been Populated func (t *TableCache) Update2(context interface{}, tableUpdates ovsdb.TableUpdates2) error { if len(tableUpdates) == 0 { return nil } if err := t.Populate2(tableUpdates); err != nil { t.logger.Error(err, "during libovsdb cache populate2") return err } return nil } // Locked implements the locked method of the NotificationHandler interface func (t *TableCache) Locked([]interface{}) { } // Stolen implements the stolen method of the NotificationHandler interface func (t *TableCache) Stolen([]interface{}) { } // Echo implements the echo method of the NotificationHandler interface func (t *TableCache) Echo([]interface{}) { } // Disconnected implements the disconnected method of the NotificationHandler interface func (t *TableCache) Disconnected() { } // Populate adds data to the cache and places an event on the channel func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error { t.mutex.Lock() defer t.mutex.Unlock() for table := range t.dbModel.Types() { tu, ok := tableUpdates[table] if !ok { continue } tCache := t.cache[table] for uuid, row := range tu { t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) update := updates.ModelUpdates{} current := tCache.cache[uuid] err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row) if err != nil { return err } err = t.ApplyCacheUpdate(update) if err != nil { return err } } } return nil } // Populate2 adds data to the cache and places an event on the channel func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error { t.mutex.Lock() defer t.mutex.Unlock() for table := range t.dbModel.Types() { tu, ok := tableUpdates[table] if !ok { continue } tCache := t.cache[table] for uuid, row := range tu { t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) update := updates.ModelUpdates{} current := tCache.cache[uuid] if row.Initial == nil && row.Insert == nil && current == nil { return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid)) } err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row) if err != nil { return err } err = t.ApplyCacheUpdate(update) if err != nil { return err } } } return nil } // Purge drops all data in the cache and reinitializes it using the // provided database model func (t *TableCache) Purge(dbModel model.DatabaseModel) { t.mutex.Lock() defer t.mutex.Unlock() t.dbModel = dbModel tableTypes := t.dbModel.Types() for name := range t.dbModel.Schema.Tables { t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name]) } } // AddEventHandler registers the supplied EventHandler to receive cache events func (t *TableCache) AddEventHandler(handler EventHandler) { t.eventProcessor.AddEventHandler(handler) } // Run starts the event processing and update processing loops. // It blocks until the stop channel is closed. // Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection func (t *TableCache) Run(stopCh <-chan struct{}) { wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() t.eventProcessor.Run(stopCh) }() wg.Wait() } // newRowCache creates a new row cache with the provided data // if the data is nil, and empty RowCache will be created func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache { schemaIndexes := dbModel.Schema.Table(name).Indexes clientIndexes := dbModel.Client().Indexes(name) r := &RowCache{ name: name, dbModel: dbModel, indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)), dataType: dataType, cache: make(map[string]model.Model), mutex: sync.RWMutex{}, } // respect the order of indexes, add first schema indexes, then client // indexes indexes := map[index]indexSpec{} for _, columns := range schemaIndexes { columnKeys := newColumnKeysFromColumns(columns...) index := newIndexFromColumnKeys(columnKeys...) spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType} r.indexSpecs = append(r.indexSpecs, spec) indexes[index] = spec } for _, clientIndex := range clientIndexes { columnKeys := clientIndex.Columns index := newIndexFromColumnKeys(columnKeys...) // if this is already a DB index, ignore if _, ok := indexes[index]; ok { continue } spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType} r.indexSpecs = append(r.indexSpecs, spec) indexes[index] = spec } r.indexes = r.newIndexes() return r } func (r *RowCache) newIndexes() columnToValue { c := make(columnToValue) for _, indexSpec := range r.indexSpecs { index := indexSpec.index c[index] = make(valueToUUIDs) } return c } // event encapsulates a cache event type event struct { eventType string table string old model.Model new model.Model } // eventProcessor handles the queueing and processing of cache events type eventProcessor struct { events chan *event // handlersMutex locks the handlers array when we add a handler or dispatch events // we don't need a RWMutex in this case as we only have one thread reading and the write // volume is very low (i.e only when AddEventHandler is called) handlersMutex sync.Mutex handlers []EventHandler logger *logr.Logger } func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor { return &eventProcessor{ events: make(chan *event, capacity), handlers: []EventHandler{}, logger: logger, } } // AddEventHandler registers the supplied EventHandler with the eventProcessor // EventHandlers MUST process events quickly, for example, pushing them to a queue // to be processed by the client. Long Running handler functions adversely affect // other handlers and MAY cause loss of data if the channel buffer is full func (e *eventProcessor) AddEventHandler(handler EventHandler) { e.handlersMutex.Lock() defer e.handlersMutex.Unlock() e.handlers = append(e.handlers, handler) } // AddEvent writes an event to the channel func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, new model.Model) { // We don't need to check for error here since there // is only a single writer. RPC is run in blocking mode event := event{ eventType: eventType, table: table, old: old, new: new, } select { case e.events <- &event: // noop return default: e.logger.V(0).Info("dropping event because event buffer is full") } } // Run runs the eventProcessor loop. // It will block until the stopCh has been closed // Otherwise it will wait for events to arrive on the event channel // Once received, it will dispatch the event to each registered handler func (e *eventProcessor) Run(stopCh <-chan struct{}) { for { select { case <-stopCh: return case event := <-e.events: e.handlersMutex.Lock() for _, handler := range e.handlers { switch event.eventType { case addEvent: handler.OnAdd(event.table, event.new) case updateEvent: handler.OnUpdate(event.table, event.old, event.new) case deleteEvent: handler.OnDelete(event.table, event.old) } } e.handlersMutex.Unlock() } } } type cacheUpdate interface { GetUpdatedTables() []string ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error } func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error { tables := update.GetUpdatedTables() for _, table := range tables { tCache := t.cache[table] err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error { switch { case old == nil && new != nil: t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", new) err := tCache.Create(uuid, new, false) if err != nil { return err } t.eventProcessor.AddEvent(addEvent, table, nil, new) case old != nil && new != nil: t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", new) _, err := tCache.Update(uuid, new, false) if err != nil { return err } t.eventProcessor.AddEvent(updateEvent, table, old, new) case new == nil: t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old) err := tCache.Delete(uuid) if err != nil { return err } t.eventProcessor.AddEvent(deleteEvent, table, old, nil) } return nil }) if err != nil { return err } } return nil } func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (interface{}, error) { if len(columnKeys) > 1 { var buf bytes.Buffer enc := gob.NewEncoder(&buf) for _, columnKey := range columnKeys { val, err := valueFromColumnKey(info, columnKey) if err != nil { return "", err } // if object is nil dont try to encode it value := reflect.ValueOf(val) if value.Kind() == reflect.Invalid { continue } // if object is a nil pointer dont try to encode it if value.Kind() == reflect.Pointer && value.IsNil() { continue } err = enc.Encode(val) if err != nil { return "", err } } h := sha256.New() val := hex.EncodeToString(h.Sum(buf.Bytes())) return val, nil } val, err := valueFromColumnKey(info, columnKeys[0]) if err != nil { return "", err } return val, err } func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (interface{}, error) { val, err := info.FieldByColumn(columnKey.Column) if err != nil { return nil, err } if columnKey.Key != nil { val, err = valueFromMap(val, columnKey.Key) if err != nil { return "", fmt.Errorf("can't get key value from map: %v", err) } } return val, err } func valueFromMap(aMap interface{}, key interface{}) (interface{}, error) { m := reflect.ValueOf(aMap) if m.Kind() != reflect.Map { return nil, fmt.Errorf("expected map but got %s", m.Kind()) } v := m.MapIndex(reflect.ValueOf(key)) if !v.IsValid() { // return the zero value for the map value type return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil } return v.Interface(), nil } golang-github-ovn-org-libovsdb-0.7.0/cache/cache_test.go000066400000000000000000002170621464501522100231400ustar00rootroot00000000000000package cache import ( "encoding/json" "fmt" "math/rand" "testing" "github.com/go-logr/logr" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/test" "github.com/ovn-org/libovsdb/updates" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type testModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar string `ovsdb:"bar"` Baz int `ovsdb:"baz"` Array []string `ovsdb:"array"` Datapath *string `ovsdb:"datapath"` } const testSchemaFmt string = `{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { ` const testSchemaFmt2 string = ` "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": "integer" }, "array": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "datapath": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } } } } } }` func getTestSchema(indexes string) []byte { if len(indexes) > 0 { return []byte(testSchemaFmt + fmt.Sprintf(`"indexes": [%s],`, indexes) + testSchemaFmt2) } return []byte(testSchemaFmt + testSchemaFmt2) } func TestRowCache_Row(t *testing.T) { type fields struct { cache map[string]model.Model } type args struct { uuid string } tests := []struct { name string fields fields args args want model.Model }{ { "returns a row that exists", fields{cache: map[string]model.Model{"test": &testModel{}}}, args{uuid: "test"}, &testModel{}, }, { "returns a nil for a row that does not exist", fields{cache: map[string]model.Model{"test": &testModel{}}}, args{uuid: "foo"}, nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &RowCache{ cache: tt.fields.cache, } got := r.Row(tt.args.uuid) assert.Equal(t, tt.want, got) }) } } func TestRowCache_Rows(t *testing.T) { tests := []struct { name string cache map[string]model.Model want map[string]model.Model }{ { "returns a rows that exist", map[string]model.Model{"test1": &testModel{}, "test2": &testModel{}, "test3": &testModel{}}, map[string]model.Model{"test1": &testModel{}, "test2": &testModel{}, "test3": &testModel{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &RowCache{ cache: tt.cache, } got := r.Rows() assert.Equal(t, tt.want, got) }) } } func TestRowCacheCreate(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{"bar": &testModel{Foo: "bar"}}, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) tests := []struct { name string uuid string model *testModel checkIndex bool wantErr bool }{ { "inserts a new row", "foo", &testModel{Foo: "foo"}, true, false, }, { "error duplicate uuid", "bar", &testModel{Foo: "foo"}, true, true, }, { "error duplicate index", "baz", &testModel{Foo: "bar"}, true, true, }, { "error duplicate uuid, no index check", "bar", &testModel{Foo: "bar"}, false, true, }, { "no error duplicate index", "baz", &testModel{Foo: "bar"}, false, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err := rc.Create(tt.uuid, tt.model, tt.checkIndex) if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) assert.Len(t, rc.indexes["foo"][tt.model.Foo], 1) assert.Equal(t, tt.uuid, rc.indexes["foo"][tt.model.Foo].getAny()) } }) } } func TestRowCacheCreateClientIndex(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foo", }, }, }, }, }) require.Nil(t, err) err = json.Unmarshal(getTestSchema(""), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{"bar": &testModel{Foo: "bar"}}, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tests := []struct { name string uuid string model *testModel wantErr bool expected valueToUUIDs }{ { name: "inserts a new row", uuid: "foo", model: &testModel{Foo: "foo"}, wantErr: false, expected: valueToUUIDs{ "foo": newUUIDSet("foo"), "bar": newUUIDSet("bar"), }, }, { name: "error duplicate uuid", uuid: "bar", model: &testModel{Foo: "foo"}, wantErr: true, }, { name: "inserts duplicate index", uuid: "baz", model: &testModel{Foo: "bar"}, wantErr: false, expected: valueToUUIDs{ "bar": newUUIDSet("bar", "baz"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err = rc.Create(tt.uuid, tt.model, true) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) require.Equal(t, tt.expected, rc.indexes["foo"]) } }) } } func TestRowCacheCreateMultiIndex(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo", "bar", "datapath"]`), &schema) require.Nil(t, err) index := newIndexFromColumns("foo", "bar", "datapath") // Note datapath purposely left empty for initial data to exercise handling of nil pointer testData := Data{ "Open_vSwitch": map[string]model.Model{"bar": &testModel{Foo: "bar", Bar: "bar"}}, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) fakeDatapath := "fakePath" tests := []struct { name string uuid string model *testModel wantErr bool wantIndexExistsErr bool }{ { "inserts a new row", "foo", &testModel{Foo: "foo", Bar: "foo"}, false, false, }, { "error duplicate uuid", "bar", &testModel{Foo: "bar", Bar: "bar"}, true, false, }, { "error duplicate index", "baz", &testModel{Foo: "foo", Bar: "foo"}, true, true, }, { "new row with one duplicate value", "baz", &testModel{Foo: "foo", Bar: "bar"}, false, false, }, { "new row with other duplicate value", "quux", &testModel{Foo: "bar", Bar: "baz"}, false, false, }, { "new row with non nil pointer value, but other column indexes overlap", "quux2", &testModel{Foo: "bar", Bar: "baz", Datapath: &fakeDatapath}, false, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err := rc.Create(tt.uuid, tt.model, true) if tt.wantErr { assert.Error(t, err) if tt.wantIndexExistsErr { assert.IsType(t, &ErrIndexExists{}, err) } } else { assert.Nil(t, err) mapperInfo, err := dbModel.NewModelInfo(tt.model) require.Nil(t, err) h, err := valueFromIndex(mapperInfo, newColumnKeysFromColumns("foo", "bar", "datapath")) require.Nil(t, err) assert.Len(t, rc.indexes[index][h], 1) assert.Equal(t, tt.uuid, rc.indexes[index][h].getAny()) } }) } } func TestRowCacheCreateMultiClientIndex(t *testing.T) { type testModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar map[string]string `ovsdb:"bar"` } var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foo", }, { Column: "bar", Key: "bar", }, }, }, }, }) index := newIndexFromColumnKeys(db.Indexes("Open_vSwitch")[0].Columns...) err = json.Unmarshal([]byte(`{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "columns": { "foo": { "type": "string" }, "bar": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } } } } } }`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{"bar": &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}}, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) type expected struct { index model.Model uuids uuidset } tests := []struct { name string uuid string model *testModel wantErr bool expected []expected }{ { name: "inserts a new row", uuid: "foo", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("foo"), }, }, }, { name: "error duplicate uuid", uuid: "bar", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "bar"}}, wantErr: true, }, { name: "inserts duplicate index", uuid: "baz", model: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "baz"), }, }, }, { name: "new row with one duplicate value", uuid: "baz", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "bar"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("baz"), }, }, }, { name: "new row with other duplicate value", uuid: "baz", model: &testModel{Foo: "bar", Bar: map[string]string{"bar": "foo"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("baz"), }, }, }, { name: "new row with nil map index", uuid: "baz", model: &testModel{Foo: "bar"}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, { index: &testModel{Foo: "bar"}, uuids: newUUIDSet("baz"), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err = rc.Create(tt.uuid, tt.model, true) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) require.Len(t, rc.indexes[index], len(tt.expected)) for _, expected := range tt.expected { mapperInfo, err := dbModel.NewModelInfo(expected.index) require.Nil(t, err) h, err := valueFromIndex(mapperInfo, db.Indexes("Open_vSwitch")[0].Columns) require.Nil(t, err) require.Equal(t, expected.uuids, rc.indexes[index][h], expected.index) } } }) } } func TestRowCacheUpdate(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "bar": &testModel{Foo: "bar"}, "foobar": &testModel{Foo: "foobar"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) tests := []struct { name string uuid string model *testModel checkIndex bool wantErr bool }{ { "error if row does not exist", "foo", &testModel{Foo: "foo"}, true, true, }, { "update", "bar", &testModel{Foo: "baz"}, true, false, }, { "error new index would cause duplicate", "bar", &testModel{Foo: "foobar"}, true, true, }, { "no error new index would cause duplicate", "bar", &testModel{Foo: "foobar"}, false, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) _, err := rc.Update(tt.uuid, tt.model, tt.checkIndex) if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) assert.Len(t, rc.indexes["foo"][tt.model.Foo], 1) assert.Equal(t, tt.uuid, rc.indexes["foo"][tt.model.Foo].getAny()) } }) } } func TestRowCacheUpdateClientIndex(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foo", }, }, }, }, }) err = json.Unmarshal(getTestSchema(""), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": &testModel{Foo: "foo", Bar: "foo"}, "bar": &testModel{Foo: "bar", Bar: "bar"}, "foobar": &testModel{Foo: "bar", Bar: "foobar"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tests := []struct { name string uuid string model *testModel wantErr bool expected valueToUUIDs }{ { name: "error if row does not exist", uuid: "baz", model: &testModel{Foo: "baz"}, wantErr: true, }, { name: "update non-index", uuid: "foo", model: &testModel{Foo: "foo", Bar: "bar"}, wantErr: false, expected: valueToUUIDs{ "foo": newUUIDSet("foo"), "bar": newUUIDSet("bar", "foobar"), }, }, { name: "update unique index to new index", uuid: "foo", model: &testModel{Foo: "baz"}, wantErr: false, expected: valueToUUIDs{ "baz": newUUIDSet("foo"), "bar": newUUIDSet("bar", "foobar"), }, }, { name: "update unique index to existing index", uuid: "foo", model: &testModel{Foo: "bar"}, wantErr: false, expected: valueToUUIDs{ "bar": newUUIDSet("foo", "bar", "foobar"), }, }, { name: "update multi index to different index", uuid: "foobar", model: &testModel{Foo: "foo"}, wantErr: false, expected: valueToUUIDs{ "foo": newUUIDSet("foo", "foobar"), "bar": newUUIDSet("bar"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) _, err = rc.Update(tt.uuid, tt.model, true) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) require.Equal(t, tt.expected, rc.indexes["foo"]) } }) } } func TestRowCacheUpdateMultiIndex(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo", "bar", "datapath"]`), &schema) require.Nil(t, err) index := newIndexFromColumns("foo", "bar", "datapath") testData := Data{ "Open_vSwitch": map[string]model.Model{ "bar": &testModel{Foo: "bar", Bar: "bar"}, "foobar": &testModel{Foo: "foobar", Bar: "foobar"}, "baz": &testModel{Foo: "blah", Bar: "blah"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) assert.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) fakeDatapath := "fakePath" tests := []struct { name string uuid string model *testModel wantErr bool }{ { "error if row does not exist", "foo", &testModel{Foo: "foo", Bar: "foo"}, true, }, { "update both index cols", "bar", &testModel{Foo: "baz", Bar: "baz"}, false, }, { "update single index col", "bar", &testModel{Foo: "baz", Bar: "quux"}, false, }, { "error updating index would cause duplicate, even with nil pointer index value", "baz", &testModel{Foo: "foobar", Bar: "foobar"}, true, }, { "update from nil ptr value to non-nil value for index", "baz", &testModel{Foo: "blah", Bar: "blah", Datapath: &fakeDatapath}, false, }, { "updating overlapping keys with different pointer index value causes no error", "baz", &testModel{Foo: "foobar", Bar: "foobar", Datapath: &fakeDatapath}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) _, err := rc.Update(tt.uuid, tt.model, true) if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) mapperInfo, err := dbModel.NewModelInfo(tt.model) require.Nil(t, err) h, err := valueFromIndex(mapperInfo, newColumnKeysFromColumns("foo", "bar", "datapath")) require.Nil(t, err) assert.Len(t, rc.indexes[index][h], 1) assert.Equal(t, tt.uuid, rc.indexes[index][h].getAny()) } }) } } func TestRowCacheUpdateMultiClientIndex(t *testing.T) { type testModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar map[string]string `ovsdb:"bar"` Baz string `ovsdb:"baz"` } var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foo", }, { Column: "bar", Key: "bar", }, }, }, }, }) index := newIndexFromColumnKeys(db.Indexes("Open_vSwitch")[0].Columns...) err = json.Unmarshal([]byte(`{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "columns": { "foo": { "type": "string" }, "bar": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } }, "baz": { "type": "string" } } } } }`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, "bar": &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, "foobar": &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) type expected struct { index model.Model uuids uuidset } tests := []struct { name string uuid string model *testModel wantErr bool expected []expected }{ { name: "error if row does not exist", uuid: "baz", model: &testModel{Foo: "baz", Bar: map[string]string{"bar": "baz"}}, wantErr: true, }, { name: "update non-index", uuid: "foo", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}, Baz: "bar"}, expected: []expected{ { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("foo"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "foobar"), }, }, }, { name: "update one index column", uuid: "foo", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "baz"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "baz"}}, uuids: newUUIDSet("foo"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "foobar"), }, }, }, { name: "update other index column", uuid: "foo", model: &testModel{Foo: "baz", Bar: map[string]string{"bar": "foo"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "baz", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("foo"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "foobar"), }, }, }, { name: "update both index columns", uuid: "foo", model: &testModel{Foo: "baz", Bar: map[string]string{"bar": "baz"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "baz", Bar: map[string]string{"bar": "baz"}}, uuids: newUUIDSet("foo"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "foobar"), }, }, }, { name: "update unique index to existing index", uuid: "foo", model: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("foo", "bar", "foobar"), }, }, }, { name: "update multi index to different index", uuid: "foobar", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("foo", "foobar"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) _, err = rc.Update(tt.uuid, tt.model, true) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) require.Len(t, rc.indexes[index], len(tt.expected)) for _, expectedUUID := range tt.expected { mapperInfo, err := dbModel.NewModelInfo(expectedUUID.index) require.Nil(t, err) h, err := valueFromIndex(mapperInfo, db.Indexes("Open_vSwitch")[0].Columns) require.Nil(t, err) require.Equal(t, expectedUUID.uuids, rc.indexes[index][h], expectedUUID.index) } } }) } } func TestRowCacheDelete(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "bar": &testModel{Foo: "bar"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) tests := []struct { name string uuid string model *testModel wantErr bool }{ { "deletes a row", "bar", &testModel{Foo: "bar"}, false, }, { "error if row does not exist", "foobar", &testModel{Foo: "bar"}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err := rc.Delete(tt.uuid) if tt.wantErr { assert.Error(t, err) } else { require.Nil(t, err) assert.Nil(t, rc.indexes["foo"][tt.model.Foo]) } }) } } func TestRowCacheDeleteClientIndex(t *testing.T) { type testModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar map[string]string `ovsdb:"bar"` } var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foo", }, { Column: "bar", Key: "bar", }, }, }, }, }) index := newIndexFromColumnKeys(db.Indexes("Open_vSwitch")[0].Columns...) err = json.Unmarshal([]byte(`{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "columns": { "foo": { "type": "string" }, "bar": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } } } } } }`), &schema) require.Nil(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, "bar": &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, "foobar": &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) type expected struct { index model.Model uuids uuidset } tests := []struct { name string uuid string model *testModel wantErr bool expected []expected }{ { name: "error if row does not exist", uuid: "baz", model: &testModel{Foo: "baz", Bar: map[string]string{"bar": "baz"}}, wantErr: true, }, { name: "delete a row with unique index", uuid: "foo", model: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar", "foobar"), }, }, }, { name: "delete a row with duplicated index", uuid: "foobar", model: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, wantErr: false, expected: []expected{ { index: &testModel{Foo: "foo", Bar: map[string]string{"bar": "foo"}}, uuids: newUUIDSet("foo"), }, { index: &testModel{Foo: "bar", Bar: map[string]string{"bar": "bar"}}, uuids: newUUIDSet("bar"), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.Nil(t, err) rc := tc.Table("Open_vSwitch") require.NotNil(t, rc) err = rc.Delete(tt.uuid) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) require.Len(t, rc.indexes[index], len(tt.expected)) for _, expected := range tt.expected { mapperInfo, err := dbModel.NewModelInfo(expected.index) require.Nil(t, err) h, err := valueFromIndex(mapperInfo, db.Indexes("Open_vSwitch")[0].Columns) require.Nil(t, err) require.Equal(t, expected.uuids, rc.indexes[index][h], expected.index) } } }) } } func TestEventHandlerFuncs_OnAdd(t *testing.T) { calls := 0 type fields struct { AddFunc func(table string, row model.Model) UpdateFunc func(table string, old model.Model, new model.Model) DeleteFunc func(table string, row model.Model) } type args struct { table string row model.Model } tests := []struct { name string fields fields args args }{ { "doesn't call nil function", fields{nil, nil, nil}, args{"testTable", &testModel{}}, }, { "calls onadd function", fields{func(string, model.Model) { calls++ }, nil, nil}, args{"testTable", &testModel{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &EventHandlerFuncs{ AddFunc: tt.fields.AddFunc, UpdateFunc: tt.fields.UpdateFunc, DeleteFunc: tt.fields.DeleteFunc, } e.OnAdd(tt.args.table, tt.args.row) if e.AddFunc != nil { assert.Equal(t, 1, calls) } }) } } func TestEventHandlerFuncs_OnUpdate(t *testing.T) { calls := 0 type fields struct { AddFunc func(table string, row model.Model) UpdateFunc func(table string, old model.Model, new model.Model) DeleteFunc func(table string, row model.Model) } type args struct { table string old model.Model new model.Model } tests := []struct { name string fields fields args args }{ { "doesn't call nil function", fields{nil, nil, nil}, args{"testTable", &testModel{}, &testModel{}}, }, { "calls onupdate function", fields{nil, func(string, model.Model, model.Model) { calls++ }, nil}, args{"testTable", &testModel{}, &testModel{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &EventHandlerFuncs{ AddFunc: tt.fields.AddFunc, UpdateFunc: tt.fields.UpdateFunc, DeleteFunc: tt.fields.DeleteFunc, } e.OnUpdate(tt.args.table, tt.args.old, tt.args.new) if e.UpdateFunc != nil { assert.Equal(t, 1, calls) } }) } } func TestEventHandlerFuncs_OnDelete(t *testing.T) { calls := 0 type fields struct { AddFunc func(table string, row model.Model) UpdateFunc func(table string, old model.Model, new model.Model) DeleteFunc func(table string, row model.Model) } type args struct { table string row model.Model } tests := []struct { name string fields fields args args }{ { "doesn't call nil function", fields{nil, nil, nil}, args{"testTable", &testModel{}}, }, { "calls ondelete function", fields{nil, nil, func(string, model.Model) { calls++ }}, args{"testTable", &testModel{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &EventHandlerFuncs{ AddFunc: tt.fields.AddFunc, UpdateFunc: tt.fields.UpdateFunc, DeleteFunc: tt.fields.DeleteFunc, } e.OnDelete(tt.args.table, tt.args.row) if e.DeleteFunc != nil { assert.Equal(t, 1, calls) } }) } } func TestTableCacheTable(t *testing.T) { db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tests := []struct { name string cache map[string]*RowCache table string want *RowCache }{ { "returns nil for an empty table", map[string]*RowCache{"Open_vSwitch": newRowCache("Open_vSwitch", dbModel, nil)}, "foo", nil, }, { "returns valid row cache for valid table", map[string]*RowCache{"Open_vSwitch": newRowCache("Open_vSwitch", dbModel, nil)}, "Open_vSwitch", newRowCache("Open_vSwitch", dbModel, nil), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tr := &TableCache{ cache: tt.cache, } got := tr.Table(tt.table) assert.Equal(t, tt.want, got) }) } } func TestTableCacheTables(t *testing.T) { db, err := model.NewClientDBModel("TestDB", map[string]model.Model{ "test1": &testModel{}, "test2": &testModel{}, "test3": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal([]byte(` {"name": "TestDB", "tables": { "test1": { "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": "integer" }, "array": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "datapath": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } } } }, "test2": { "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": "integer" }, "array": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "datapath": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } } } }, "test3": { "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": "integer" }, "array": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "datapath": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } } } } } } `), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tests := []struct { name string cache map[string]*RowCache want []string }{ { "returns a table that exists", map[string]*RowCache{ "test1": newRowCache("test1", dbModel, nil), "test2": newRowCache("test2", dbModel, nil), "test3": newRowCache("test3", dbModel, nil), }, []string{"test1", "test2", "test3"}, }, { "returns an empty slice if no tables exist", map[string]*RowCache{}, []string{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tr := &TableCache{ cache: tt.cache, } got := tr.Tables() assert.ElementsMatch(t, tt.want, got) }) } } func TestTableCache_populate(t *testing.T) { t.Log("Create") db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) assert.Nil(t, err) testRow := ovsdb.Row(map[string]interface{}{"_uuid": ovsdb.UUID{GoUUID: "test"}, "foo": "bar"}) testRowModel := &testModel{UUID: "test", Foo: "bar"} updates := ovsdb.TableUpdates{ "Open_vSwitch": { "test": &ovsdb.RowUpdate{ Old: nil, New: &testRow, }, }, } err = tc.Populate(updates) require.NoError(t, err) got := tc.Table("Open_vSwitch").Row("test") assert.Equal(t, testRowModel, got) t.Log("Update") updatedRow := ovsdb.Row(map[string]interface{}{"_uuid": ovsdb.UUID{GoUUID: "test"}, "foo": "quux"}) updatedRowModel := &testModel{UUID: "test", Foo: "quux"} updates["Open_vSwitch"]["test"] = &ovsdb.RowUpdate{ Old: &testRow, New: &updatedRow, } err = tc.Populate(updates) require.NoError(t, err) got = tc.cache["Open_vSwitch"].cache["test"] assert.Equal(t, updatedRowModel, got) t.Log("Delete") updates["Open_vSwitch"]["test"] = &ovsdb.RowUpdate{ Old: &updatedRow, New: nil, } err = tc.Populate(updates) require.NoError(t, err) _, ok := tc.cache["Open_vSwitch"].cache["test"] assert.False(t, ok) } func TestTableCachePopulate(t *testing.T) { t.Log("Create") db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) assert.Nil(t, err) testRow := ovsdb.Row(map[string]interface{}{"_uuid": ovsdb.UUID{GoUUID: "test"}, "foo": "bar"}) testRowModel := &testModel{UUID: "test", Foo: "bar"} updates := ovsdb.TableUpdates{ "Open_vSwitch": { "test": &ovsdb.RowUpdate{ Old: nil, New: &testRow, }, }, } err = tc.Populate(updates) require.NoError(t, err) got := tc.Table("Open_vSwitch").Row("test") assert.Equal(t, testRowModel, got) t.Log("Update") updatedRow := ovsdb.Row(map[string]interface{}{"_uuid": ovsdb.UUID{GoUUID: "test"}, "foo": "quux"}) updatedRowModel := &testModel{UUID: "test", Foo: "quux"} updates["Open_vSwitch"]["test"] = &ovsdb.RowUpdate{ Old: &testRow, New: &updatedRow, } err = tc.Populate(updates) require.NoError(t, err) got = tc.cache["Open_vSwitch"].cache["test"] assert.Equal(t, updatedRowModel, got) t.Log("Delete") updates["Open_vSwitch"]["test"] = &ovsdb.RowUpdate{ Old: &updatedRow, New: nil, } err = tc.Populate(updates) require.NoError(t, err) _, ok := tc.cache["Open_vSwitch"].cache["test"] assert.False(t, ok) } func TestTableCachePopulate2(t *testing.T) { db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) assert.Nil(t, err) testRow := ovsdb.Row(map[string]interface{}{"_uuid": "test", "foo": "bar"}) testRowModel := &testModel{UUID: "test", Foo: "bar"} updates := ovsdb.TableUpdates2{ "Open_vSwitch": { "test": &ovsdb.RowUpdate2{ Initial: &testRow, }, }, } t.Log("Initial") err = tc.Populate2(updates) require.NoError(t, err) got := tc.Table("Open_vSwitch").Row("test") assert.Equal(t, testRowModel, got) t.Log("Insert") testRow2 := ovsdb.Row(map[string]interface{}{"_uuid": "test2", "foo": "bar2"}) testRowModel2 := &testModel{UUID: "test2", Foo: "bar2"} updates = ovsdb.TableUpdates2{ "Open_vSwitch": { "test2": &ovsdb.RowUpdate2{ Insert: &testRow2, }, }, } err = tc.Populate2(updates) require.NoError(t, err) got = tc.Table("Open_vSwitch").Row("test2") assert.Equal(t, testRowModel2, got) t.Log("Update") updatedRow := ovsdb.Row(map[string]interface{}{"foo": "quux"}) updatedRowModel := &testModel{UUID: "test", Foo: "quux"} updates = ovsdb.TableUpdates2{ "Open_vSwitch": { "test": &ovsdb.RowUpdate2{ Modify: &updatedRow, }, }, } err = tc.Populate2(updates) require.NoError(t, err) got = tc.cache["Open_vSwitch"].cache["test"] assert.Equal(t, updatedRowModel, got) t.Log("Delete") deletedRow := ovsdb.Row(map[string]interface{}{"_uuid": "test", "foo": "quux"}) updates = ovsdb.TableUpdates2{ "Open_vSwitch": { "test": &ovsdb.RowUpdate2{ Delete: &deletedRow, }, }, } err = tc.Populate2(updates) require.NoError(t, err) _, ok := tc.cache["Open_vSwitch"].cache["test"] assert.False(t, ok) } // ovsdb-server can break index uniqueness inside a monitor update // the cache needs to be able to recover from this func TestTableCachePopulate2BrokenIndexes(t *testing.T) { db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) assert.Nil(t, err) t.Log("Insert") testRow := ovsdb.Row(map[string]interface{}{"_uuid": "test1", "foo": "bar"}) testRowModel := &testModel{UUID: "test1", Foo: "bar"} updates := ovsdb.TableUpdates2{ "Open_vSwitch": { "test1": &ovsdb.RowUpdate2{ Insert: &testRow, }, }, } err = tc.Populate2(updates) require.NoError(t, err) got := tc.Table("Open_vSwitch").Row("test1") assert.Equal(t, testRowModel, got) t.Log("Insert Duplicate Index") testRow2 := ovsdb.Row(map[string]interface{}{"_uuid": "test2", "foo": "bar"}) testRowModel2 := &testModel{UUID: "test2", Foo: "bar"} updates = ovsdb.TableUpdates2{ "Open_vSwitch": { "test2": &ovsdb.RowUpdate2{ Insert: &testRow2, }, }, } err = tc.Populate2(updates) require.NoError(t, err) got = tc.Table("Open_vSwitch").Row("test2") assert.Equal(t, testRowModel2, got) t.Log("Delete") deletedRow := ovsdb.Row(map[string]interface{}{"_uuid": "test1", "foo": "bar"}) updates = ovsdb.TableUpdates2{ "Open_vSwitch": { "test1": &ovsdb.RowUpdate2{ Delete: &deletedRow, }, }, } err = tc.Populate2(updates) require.NoError(t, err) _, ok := tc.cache["Open_vSwitch"].cache["test1"] assert.False(t, ok) t.Log("Lookup Original Insert By Index") _, result, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "bar"}) require.NoError(t, err) require.NotNil(t, result) } func TestEventProcessor_AddEvent(t *testing.T) { logger := logr.Discard() ep := newEventProcessor(16, &logger) var events []event for i := 0; i < 17; i++ { events = append(events, event{ table: "bridge", eventType: addEvent, new: &testModel{ UUID: "unique", Foo: "bar", }, }) } // overfill channel so event 16 is dropped for _, e := range events { ep.AddEvent(e.eventType, e.table, nil, e.new) } // assert channel is full of events assert.Equal(t, 16, len(ep.events)) // read events and ensure they are in FIFO order for i := 0; i < 16; i++ { event := <-ep.events assert.Equal(t, &testModel{UUID: "unique", Foo: "bar"}, event.new) } // assert channel is empty assert.Equal(t, 0, len(ep.events)) } func TestIndex(t *testing.T) { db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) assert.Nil(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "bar", }, }, }, { Columns: []model.ColumnKey{ { Column: "foo", }, { Column: "baz", }, }, }, }, }) var schema ovsdb.DatabaseSchema err = json.Unmarshal(getTestSchema(`["foo"], ["bar","baz"]`), &schema) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) assert.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) assert.Nil(t, err) table := tc.Table("Open_vSwitch") obj := &testModel{ UUID: "test1", Foo: "foo", Bar: "bar", Baz: 42, } err = table.Create(obj.UUID, obj, true) assert.Nil(t, err) obj2 := &testModel{ UUID: "test2", Foo: "foo2", Bar: "bar", Baz: 78, } err = table.Create(obj2.UUID, obj2, true) assert.Nil(t, err) t.Run("Index by single column", func(t *testing.T) { idx, err := table.Index("foo") assert.Nil(t, err) info, err := dbModel.NewModelInfo(obj) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("foo")) assert.Nil(t, err) assert.ElementsMatch(t, idx[v], []string{obj.UUID}) }) t.Run("Index by single column miss", func(t *testing.T) { idx, err := table.Index("foo") assert.Nil(t, err) obj3 := *obj obj3.Foo = "wrong" assert.Nil(t, err) info, err := dbModel.NewModelInfo(&obj3) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("foo")) assert.Nil(t, err) _, ok := idx[v] assert.False(t, ok) }) t.Run("Index by single column wrong", func(t *testing.T) { _, err := table.Index("wrong") assert.NotNil(t, err) }) t.Run("Index by multi-column wrong", func(t *testing.T) { _, err := table.Index("bar", "wrong") assert.NotNil(t, err) }) t.Run("Index by multi-column", func(t *testing.T) { idx, err := table.Index("bar", "baz") assert.Nil(t, err) info, err := dbModel.NewModelInfo(obj) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("bar", "baz")) assert.Nil(t, err) assert.ElementsMatch(t, idx[v], []string{obj.UUID}) }) t.Run("Index by multi-column miss", func(t *testing.T) { idx, err := table.Index("bar", "baz") assert.Nil(t, err) obj3 := *obj obj3.Baz++ info, err := dbModel.NewModelInfo(&obj3) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("bar", "baz")) assert.Nil(t, err) _, ok := idx[v] assert.False(t, ok) }) t.Run("Client index by single column", func(t *testing.T) { idx, err := table.Index("bar") assert.Nil(t, err) info, err := dbModel.NewModelInfo(obj) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("bar")) assert.Nil(t, err) assert.ElementsMatch(t, idx[v], []string{obj.UUID, obj2.UUID}) }) t.Run("Client index by multiple column", func(t *testing.T) { idx, err := table.Index("foo", "baz") assert.Nil(t, err) info, err := dbModel.NewModelInfo(obj) assert.Nil(t, err) v, err := valueFromIndex(info, newColumnKeysFromColumns("foo", "baz")) assert.Nil(t, err) assert.ElementsMatch(t, idx[v], []string{obj.UUID}) }) } func setupRowByModelSingleIndex(t require.TestingT) (*testModel, *TableCache) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo"]`), &schema) require.NoError(t, err) myFoo := &testModel{Foo: "foo", Bar: "foo"} testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": myFoo, "bar": &testModel{Foo: "bar", Bar: "bar"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.NoError(t, err) return myFoo, tc } func TestTableCacheRowByModelSingleIndex(t *testing.T) { myFoo, tc := setupRowByModelSingleIndex(t) t.Run("get foo by index", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "foo"}) assert.NoError(t, err) assert.NotNil(t, foo) assert.Equal(t, myFoo, foo) }) t.Run("get non-existent item by index", func(t *testing.T) { _, baz, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "baz"}) assert.NoError(t, err) assert.Nil(t, baz) }) t.Run("no index data", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Bar: "foo"}) assert.NoError(t, err) assert.Nil(t, foo) }) t.Run("wrong model type", func(t *testing.T) { type badModel struct { UUID string `ovsdb:"_uuid"` Baz string `ovsdb:"baz"` } _, _, err := tc.Table("Open_vSwitch").RowByModel(&badModel{Baz: "baz"}) assert.Error(t, err) }) } func benchmarkDoCreate(b *testing.B, numRows int) (*TableCache, *RowCache) { _, tc := setupRowByModelSingleIndex(b) rc := tc.Table("Open_vSwitch") for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) model := &testModel{Foo: uuid} err := rc.Create(uuid, model, true) require.NoError(b, err) } return tc, rc } const numRows int = 10000 func BenchmarkSingleIndexCreate(b *testing.B) { for n := 0; n < b.N; n++ { _, _ = benchmarkDoCreate(b, numRows) } } func BenchmarkSingleIndexUpdate(b *testing.B) { _, rc := benchmarkDoCreate(b, numRows) b.ResetTimer() for n := 0; n < b.N; n++ { for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) model := &testModel{Foo: fmt.Sprintf("%d-%d", n, i)} _, err := rc.Update(uuid, model, true) require.NoError(b, err) } } } func BenchmarkSingleIndexUpdateArray(b *testing.B) { const numRows int = 1500 _, rc := benchmarkDoCreate(b, numRows) array := make([]string, 0, 500) for i := 0; i < cap(array); i++ { array = append(array, fmt.Sprintf("value%d", i)) } b.ResetTimer() for n := 0; n < b.N; n++ { for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) model := &testModel{Foo: fmt.Sprintf("%d-%d", n, i), Array: array} _, err := rc.Update(uuid, model, true) require.NoError(b, err) } } } func BenchmarkSingleIndexDelete(b *testing.B) { for n := 0; n < b.N; n++ { _, rc := benchmarkDoCreate(b, numRows) for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) err := rc.Delete(uuid) require.NoError(b, err) } } } func BenchmarkIndexExists(b *testing.B) { _, rc := benchmarkDoCreate(b, numRows) b.ResetTimer() for n := 0; n < b.N; n++ { for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) model := &testModel{UUID: uuid, Foo: uuid} err := rc.IndexExists(model) require.NoError(b, err) } } } func BenchmarkPopulate2UpdateArray(b *testing.B) { const numRows int = 500 _, tc := setupRowByModelSingleIndex(b) rc := tc.Table("Open_vSwitch") array := make([]string, 0, 50) for i := 0; i < cap(array); i++ { array = append(array, fmt.Sprintf("value%d", i)) } for i := 0; i < numRows; i++ { uuid := fmt.Sprintf("%d", i) model := &testModel{Foo: uuid, Array: array} err := rc.Create(uuid, model, true) require.NoError(b, err) } updateSet := make([]interface{}, 0, cap(array)/2) for i := cap(array); i < cap(array)+cap(updateSet); i++ { updateSet = append(updateSet, fmt.Sprintf("value%d", i)) } b.ResetTimer() for n := 0; n < b.N; n++ { for i := 0; i < numRows; i++ { updatedRow := ovsdb.Row(map[string]interface{}{"array": ovsdb.OvsSet{GoSet: updateSet}}) err := tc.Populate2(ovsdb.TableUpdates2{ "Open_vSwitch": { "foo": &ovsdb.RowUpdate2{ Modify: &updatedRow, }, }, }) require.NoError(b, err) } } } func TestTableCacheRowByModelTwoIndexes(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo"], ["bar"]`), &schema) require.NoError(t, err) myFoo := &testModel{Foo: "foo", Bar: "foo"} testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": myFoo, "bar": &testModel{Foo: "bar", Bar: "bar"}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.NoError(t, err) t.Run("get foo by Foo index", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "foo"}) assert.NoError(t, err) assert.NotNil(t, foo) assert.Equal(t, myFoo, foo) }) t.Run("get foo by Bar index", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Bar: "foo"}) assert.NoError(t, err) assert.NotNil(t, foo) assert.Equal(t, myFoo, foo) }) t.Run("get non-existent item by index", func(t *testing.T) { _, baz, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "baz"}) assert.NoError(t, err) assert.Nil(t, baz) }) } func TestTableCacheRowByModelMultiIndex(t *testing.T) { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.Nil(t, err) err = json.Unmarshal(getTestSchema(`["foo", "bar"]`), &schema) require.NoError(t, err) myFoo := &testModel{Foo: "foo", Bar: "foo"} testData := Data{ "Open_vSwitch": map[string]model.Model{"foo": myFoo, "bar": &testModel{Foo: "bar", Bar: "bar"}}, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, testData, nil) require.NoError(t, err) t.Run("incomplete index", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "foo"}) assert.NoError(t, err) assert.Nil(t, foo) }) t.Run("get foo by index", func(t *testing.T) { _, foo, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "foo", Bar: "foo"}) assert.NoError(t, err) assert.NotNil(t, foo) assert.Equal(t, myFoo, foo) }) t.Run("get non-existent item by index", func(t *testing.T) { _, baz, err := tc.Table("Open_vSwitch").RowByModel(&testModel{Foo: "baz", Bar: "baz"}) assert.NoError(t, err) assert.Nil(t, baz) }) } func TestTableCacheRowsByModels(t *testing.T) { type testModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar string `ovsdb:"bar"` Baz map[string]string `ovsdb:"baz"` } var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testModel{}}) require.NoError(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "bar", }, }, }, { Columns: []model.ColumnKey{ { Column: "bar", }, { Column: "baz", Key: "baz", }, }, }, }, }) err = json.Unmarshal([]byte(`{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "indexes": [["foo"]], "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } } } } } }`), &schema) require.NoError(t, err) testData := Data{ "Open_vSwitch": map[string]model.Model{ "foo": &testModel{Foo: "foo", Bar: "foo", Baz: map[string]string{"baz": "foo", "other": "other"}}, "bar": &testModel{Foo: "bar", Bar: "bar", Baz: map[string]string{"baz": "bar", "other": "other"}}, "foobar": &testModel{Foo: "foobar", Bar: "bar", Baz: map[string]string{"baz": "foobar", "other": "other"}}, "baz": &testModel{Foo: "baz", Bar: "baz", Baz: map[string]string{"baz": "baz", "other": "other"}}, "quux": &testModel{Foo: "quux", Bar: "quux", Baz: map[string]string{"baz": "quux", "other": "other"}}, "quuz": &testModel{Foo: "quuz", Bar: "quux", Baz: map[string]string{"baz": "quux", "other": "other"}}, }, } dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tests := []struct { name string models []model.Model rows map[string]model.Model }{ { name: "by non index, no result", models: []model.Model{ &testModel{Foo: "no", Bar: "no", Baz: map[string]string{"baz": "no"}}, }, rows: nil, }, { name: "by single column client index, single result", models: []model.Model{ &testModel{Bar: "foo"}, }, rows: map[string]model.Model{ "foo": testData["Open_vSwitch"]["foo"], }, }, { name: "by single column client index, multiple models, multiple results", models: []model.Model{ &testModel{Bar: "foo"}, &testModel{Bar: "baz"}, }, rows: map[string]model.Model{ "foo": testData["Open_vSwitch"]["foo"], "baz": testData["Open_vSwitch"]["baz"], }, }, { name: "by single column client index, multiple results", models: []model.Model{ &testModel{Bar: "bar"}, }, rows: map[string]model.Model{ "bar": testData["Open_vSwitch"]["bar"], "foobar": testData["Open_vSwitch"]["foobar"], }, }, { name: "by multi column client index, single result", models: []model.Model{ &testModel{Bar: "baz", Baz: map[string]string{"baz": "baz"}}, }, rows: map[string]model.Model{ "baz": testData["Open_vSwitch"]["baz"], }, }, { name: "by client index, multiple results", models: []model.Model{ &testModel{Bar: "quux", Baz: map[string]string{"baz": "quux"}}, }, rows: map[string]model.Model{ "quux": testData["Open_vSwitch"]["quux"], "quuz": testData["Open_vSwitch"]["quuz"], }, }, { name: "by client index, multiple models, multiple results", models: []model.Model{ &testModel{Bar: "quux", Baz: map[string]string{"baz": "quux"}}, &testModel{Bar: "bar", Baz: map[string]string{"baz": "foobar"}}, }, rows: map[string]model.Model{ "quux": testData["Open_vSwitch"]["quux"], "quuz": testData["Open_vSwitch"]["quuz"], "foobar": testData["Open_vSwitch"]["foobar"], "bar": testData["Open_vSwitch"]["bar"], }, }, { name: "by schema index prioritized over client index", models: []model.Model{ &testModel{Foo: "foo", Bar: "bar", Baz: map[string]string{"baz": "bar"}}, }, rows: map[string]model.Model{ "foo": testData["Open_vSwitch"]["foo"], }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, testData, nil) require.NoError(t, err) rows, err := tc.Table("Open_vSwitch").RowsByModels(tt.models) require.NoError(t, err) require.Equal(t, tt.rows, rows) }) } } type rowsByConditionTestModel struct { UUID string `ovsdb:"_uuid"` Foo string `ovsdb:"foo"` Bar string `ovsdb:"bar"` Baz string `ovsdb:"baz"` Quux string `ovsdb:"quux"` Quuz string `ovsdb:"quuz"` FooBar map[string]string `ovsdb:"foobar"` Empty string `ovsdb:"empty"` } func setupRowsByConditionCache(t require.TestingT) *TableCache { var schema ovsdb.DatabaseSchema db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &rowsByConditionTestModel{}}) require.NoError(t, err) db.SetIndexes(map[string][]model.ClientIndex{ "Open_vSwitch": { { Columns: []model.ColumnKey{ { Column: "foobar", Key: "foobar", }, }, }, { Columns: []model.ColumnKey{ { Column: "empty", }, }, }, }, }) err = json.Unmarshal([]byte(`{ "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "indexes": [["foo"], ["bar"], ["quux", "quuz"]], "columns": { "foo": { "type": "string" }, "bar": { "type": "string" }, "baz": { "type": "string" }, "quux": { "type": "string" }, "quuz": { "type": "string" }, "foobar": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } }, "empty": { "type": "string" } } } } }`), &schema) require.NoError(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(t, errs) tc, err := NewTableCache(dbModel, nil, nil) require.NoError(t, err) return tc } func TestTableCacheRowsByCondition(t *testing.T) { testData := map[string]*rowsByConditionTestModel{ "foo": {UUID: "foo", Foo: "foo", Bar: "foo", Baz: "foo", Quux: "foo", Quuz: "quuz", FooBar: map[string]string{"foobar": "foo"}}, "bar": {UUID: "bar", Foo: "bar", Bar: "bar", Baz: "bar", Quux: "bar", Quuz: "quuz", FooBar: map[string]string{"foobar": "bar"}}, "baz": {UUID: "baz", Foo: "baz", Bar: "baz", Baz: "baz", Quux: "baz", Quuz: "quuz", FooBar: map[string]string{"foobar": "baz"}}, "quux": {UUID: "quux", Foo: "quux", Bar: "quux", Baz: "quux", Quux: "quux", Quuz: "quuz", FooBar: map[string]string{"foobar": "baz"}}, "quuz": {UUID: "quuz", Foo: "quuz", Bar: "quuz", Baz: "quuz", Quux: "quuz", Quuz: "quuz", FooBar: map[string]string{"foobar": "baz"}}, } tests := []struct { name string conditions []ovsdb.Condition // uuids that could be found evaluating conditions as indexes uuidsByConditionsAsIndexes uuidset // rows that could be found evaluating all conditions rowsByCondition map[string]model.Model }{ { "by equal uuid", []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "foo"}}}, nil, map[string]model.Model{"foo": testData["foo"]}, }, { "by includes uuid", []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionIncludes, Value: ovsdb.UUID{GoUUID: "foo"}}}, nil, map[string]model.Model{"foo": testData["foo"]}, }, { "by non equal uuid, multiple results", []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionNotEqual, Value: ovsdb.UUID{GoUUID: "foo"}}}, nil, map[string]model.Model{ "bar": testData["bar"], "baz": testData["baz"], "quux": testData["quux"], "quuz": testData["quuz"], }, }, { "by excludes uuid, multiple results", []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionExcludes, Value: ovsdb.UUID{GoUUID: "foo"}}}, nil, map[string]model.Model{ "bar": testData["bar"], "baz": testData["baz"], "quux": testData["quux"], "quuz": testData["quuz"], }, }, { "by schema index", []ovsdb.Condition{{Column: "foo", Function: ovsdb.ConditionEqual, Value: "foo"}}, newUUIDSet("foo"), map[string]model.Model{"foo": testData["foo"]}, }, { "by schema index, no results", []ovsdb.Condition{{Column: "foo", Function: ovsdb.ConditionEqual, Value: "foobar"}}, newUUIDSet(), map[string]model.Model{}, }, { "by multi column schema index", []ovsdb.Condition{ {Column: "quux", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: "quuz"}, }, newUUIDSet("foo"), map[string]model.Model{"foo": testData["foo"]}, }, { "by multi column schema index, no results", []ovsdb.Condition{ {Column: "quux", Function: ovsdb.ConditionEqual, Value: "foobar"}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: "quuz"}, }, newUUIDSet(), map[string]model.Model{}, }, { "by client index", []ovsdb.Condition{{Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": "bar"}}}}, newUUIDSet("bar"), map[string]model.Model{"bar": testData["bar"]}, }, { "by client index, no results", []ovsdb.Condition{{Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": "foobar"}}}}, newUUIDSet(), map[string]model.Model{}, }, { "by client index, multiple results", []ovsdb.Condition{{Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": "baz"}}}}, newUUIDSet("baz", "quux", "quuz"), map[string]model.Model{ "baz": testData["baz"], "quux": testData["quux"], "quuz": testData["quuz"], }, }, { "by zero client index, multiple results", []ovsdb.Condition{{Column: "empty", Function: ovsdb.ConditionEqual, Value: ""}}, newUUIDSet("foo", "bar", "baz", "quux", "quuz"), map[string]model.Model{ "foo": testData["foo"], "bar": testData["bar"], "baz": testData["baz"], "quux": testData["quux"], "quuz": testData["quuz"], }, }, { "by non index", []ovsdb.Condition{{Column: "baz", Function: ovsdb.ConditionEqual, Value: "baz"}}, nil, map[string]model.Model{"baz": testData["baz"]}, }, { "by two uuids, no results", []ovsdb.Condition{ {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "foo"}}, {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "bar"}}, }, nil, map[string]model.Model{}, }, { "by uuid and schema index", []ovsdb.Condition{ {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "foo"}}, {Column: "foo", Function: ovsdb.ConditionEqual, Value: "foo"}, }, newUUIDSet("foo"), map[string]model.Model{"foo": testData["foo"]}, }, { "by uuid and schema index, no results", []ovsdb.Condition{ {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "foo"}}, {Column: "foo", Function: ovsdb.ConditionEqual, Value: "bar"}, }, newUUIDSet("bar"), map[string]model.Model{}, }, { "by schema index and non-index", []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "baz", Function: ovsdb.ConditionEqual, Value: "foo"}, }, newUUIDSet("foo"), map[string]model.Model{"foo": testData["foo"]}, }, { "by schema index and non-index, no results", []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "baz", Function: ovsdb.ConditionEqual, Value: "baz"}, }, newUUIDSet("foo"), map[string]model.Model{}, }, { "by uuid, schema index, and non-index", []ovsdb.Condition{ {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: "foo"}}, {Column: "foo", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "bar", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "baz", Function: ovsdb.ConditionEqual, Value: "foo"}, }, newUUIDSet("foo"), map[string]model.Model{"foo": testData["foo"]}, }, { "by client index, and non-index, multiple results", []ovsdb.Condition{ {Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": "baz"}}}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: "quuz"}, }, newUUIDSet("baz", "quux", "quuz"), map[string]model.Model{ "baz": testData["baz"], "quux": testData["quux"], "quuz": testData["quuz"], }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc := setupRowsByConditionCache(t) rc := tc.Table("Open_vSwitch") for _, m := range testData { err := rc.Create(m.UUID, m, true) require.NoError(t, err) } nativeValues := make([]interface{}, 0, len(tt.conditions)) for _, condition := range tt.conditions { cSchema := rc.dbModel.Schema.Tables["Open_vSwitch"].Column(condition.Column) nativeValue, err := ovsdb.OvsToNative(cSchema, condition.Value) require.NoError(t, err) nativeValues = append(nativeValues, nativeValue) } uuids, err := tc.Table("Open_vSwitch").uuidsByConditionsAsIndexes(tt.conditions, nativeValues) require.NoError(t, err) require.Equal(t, tt.uuidsByConditionsAsIndexes, uuids) rows, err := tc.Table("Open_vSwitch").RowsByCondition(tt.conditions) require.NoError(t, err) require.Equal(t, tt.rowsByCondition, rows) }) } } func BenchmarkRowsByCondition(b *testing.B) { tc := setupRowsByConditionCache(b) rc := tc.Table("Open_vSwitch") models := []*rowsByConditionTestModel{} for i := 0; i < numRows; i++ { model := &rowsByConditionTestModel{ UUID: fmt.Sprintf("UUID-%d", i), Foo: fmt.Sprintf("Foo-%d", i), Bar: fmt.Sprintf("Bar-%d", i), Baz: fmt.Sprintf("Baz-%d", i), Quux: fmt.Sprintf("Quux-%d", i), Quuz: fmt.Sprintf("Quuz-%d", i), FooBar: map[string]string{"foobar": fmt.Sprintf("FooBar-%d", i)}, } err := rc.Create(model.UUID, model, true) require.NoError(b, err) models = append(models, model) } rand.Seed(int64(b.N)) benchmarks := []struct { name string prepare func(int) []ovsdb.Condition }{ { name: "by uuid", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: models[i].UUID}}, } }, }, { name: "by single column squema index", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: models[i].Foo}, } }, }, { name: "by single column client index", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": models[i].FooBar["foobar"]}}}, } }, }, { name: "by multi column squema index", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "quux", Function: ovsdb.ConditionEqual, Value: models[i].Quux}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: models[i].Quuz}, } }, }, { name: "by two squema indexes", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: models[i].Foo}, {Column: "bar", Function: ovsdb.ConditionEqual, Value: models[i].Bar}, } }, }, { name: "by squema index and non-index", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: models[i].Foo}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: models[i].Quuz}, } }, }, { name: "by single non index", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "quuz", Function: ovsdb.ConditionEqual, Value: models[i].Quuz}, } }, }, { name: "by multiple non indexes", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "baz", Function: ovsdb.ConditionEqual, Value: models[i].Baz}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: models[i].Quuz}, } }, }, { name: "by many conditions", prepare: func(i int) []ovsdb.Condition { return []ovsdb.Condition{ {Column: "foo", Function: ovsdb.ConditionEqual, Value: models[i].Foo}, {Column: "bar", Function: ovsdb.ConditionEqual, Value: models[i].Bar}, {Column: "baz", Function: ovsdb.ConditionEqual, Value: models[i].Baz}, {Column: "quux", Function: ovsdb.ConditionEqual, Value: models[i].Quux}, {Column: "quuz", Function: ovsdb.ConditionEqual, Value: models[i].Quuz}, {Column: "foobar", Function: ovsdb.ConditionIncludes, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foobar": models[i].FooBar["foobar"]}}}, } }, }, } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { for i := 0; i < b.N; i++ { results, err := rc.RowsByCondition(bm.prepare(rand.Intn(numRows))) require.NoError(b, err) require.Len(b, results, 1) } }) } } func BenchmarkPopulate2SingleModify(b *testing.B) { type testDBModel struct { UUID string `ovsdb:"_uuid"` Set []string `ovsdb:"set"` } aFooSet, _ := ovsdb.NewOvsSet([]string{"foo"}) base := &testDBModel{Set: []string{}} for i := 0; i < 57000; i++ { base.Set = append(base.Set, fmt.Sprintf("foo%d", i)) } db, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &testDBModel{}}) assert.Nil(b, err) var schema ovsdb.DatabaseSchema err = json.Unmarshal([]byte(` { "name": "Open_vSwitch", "tables": { "Open_vSwitch": { "columns": { "set": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } } } } } } `), &schema) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(schema, db) require.Empty(b, errs) caches := make([]*TableCache, b.N) for n := 0; n < b.N; n++ { tc, err := NewTableCache(dbModel, nil, nil) require.NoError(b, err) caches[n] = tc rc := tc.Table("Open_vSwitch") err = rc.Create("uuid", base, true) require.NoError(b, err) } tu := ovsdb.TableUpdates2{ "Open_vSwitch": ovsdb.TableUpdate2{ "uuid": &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{"set": aFooSet}, }, }, } b.ResetTimer() for n := 0; n < b.N; n++ { err = caches[n].Populate2(tu) require.NoError(b, err) } } func TestTableCache_ApplyModelUpdates(t *testing.T) { dbModel, err := test.GetModel() require.NoError(t, err) tests := []struct { name string update ovsdb.RowUpdate current model.Model expected model.Model }{ { name: "create", update: ovsdb.RowUpdate{ New: &ovsdb.Row{"name": "bridge"}, }, expected: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, }, { name: "update", update: ovsdb.RowUpdate{ Old: &ovsdb.Row{"name": "bridge", "datapath_type": "old"}, New: &ovsdb.Row{"name": "bridge", "datapath_type": "new"}, }, current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, expected: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, }, { name: "update noop", update: ovsdb.RowUpdate{ Old: &ovsdb.Row{"name": "bridge", "datapath_type": "same"}, New: &ovsdb.Row{"name": "bridge", "datapath_type": "same"}, }, current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "same", }, expected: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "same", }, }, { name: "delete", update: ovsdb.RowUpdate{ Old: &ovsdb.Row{"name": "bridge"}, }, current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc, err := NewTableCache(dbModel, nil, nil) require.NoError(t, err) rc := tc.Table("Bridge") require.NotNil(t, rc) if tt.current != nil { err = rc.Create("uuid", tt.current, false) require.NoError(t, err) } updates := updates.ModelUpdates{} require.NoError(t, err) err = updates.AddRowUpdate(dbModel, "Bridge", "uuid", tt.current, tt.update) require.NoError(t, err) err = tc.ApplyCacheUpdate(updates) assert.NoError(t, err) model := rc.rowByUUID("uuid") if tt.expected != nil { assert.Equal(t, tt.expected, model) } else { assert.Nil(t, model) } }) } } golang-github-ovn-org-libovsdb-0.7.0/cache/doc.go000066400000000000000000000007301464501522100215730ustar00rootroot00000000000000/* Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server. The cache can be accessed using a simple API: cache.Table("Open_vSwitch").Row("") It implements the ovsdb.NotificationHandler interface such that it can be populated automatically by update notifications It also contains an eventProcessor where callers may registers functions that will get called on every Add/Update/Delete event. */ package cache golang-github-ovn-org-libovsdb-0.7.0/cache/uuidset.go000066400000000000000000000026741464501522100225210ustar00rootroot00000000000000package cache type void struct{} type uuidset map[string]void func newUUIDSet(uuids ...string) uuidset { s := uuidset{} for _, uuid := range uuids { s[uuid] = void{} } return s } func (s uuidset) add(uuid string) { s[uuid] = void{} } func (s uuidset) remove(uuid string) { delete(s, uuid) } func (s uuidset) has(uuid string) bool { _, ok := s[uuid] return ok } func (s uuidset) equals(o uuidset) bool { if len(s) != len(o) { return false } for uuid := range s { if !o.has(uuid) { return false } } return true } func (s uuidset) getAny() string { for k := range s { return k } return "" } func (s uuidset) list() []string { uuids := make([]string, 0, len(s)) for uuid := range s { uuids = append(uuids, uuid) } return uuids } func (s uuidset) empty() bool { return len(s) == 0 } func addUUIDSet(s1, s2 uuidset) uuidset { if len(s2) == 0 { return s1 } if s1 == nil { s1 = uuidset{} } for uuid := range s2 { s1.add(uuid) } return s1 } func substractUUIDSet(s1, s2 uuidset) uuidset { if len(s1) == 0 || len(s2) == 0 { return s1 } for uuid := range s2 { s1.remove(uuid) } return s1 } func intersectUUIDSets(s1, s2 uuidset) uuidset { if len(s1) == 0 || len(s2) == 0 { return nil } var big uuidset var small uuidset if len(s1) > len(s2) { big = s1 small = s2 } else { big = s2 small = s1 } f := uuidset{} for uuid := range small { if big.has(uuid) { f.add(uuid) } } return f } golang-github-ovn-org-libovsdb-0.7.0/client/000077500000000000000000000000001464501522100207125ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/client/api.go000066400000000000000000000427141464501522100220220ustar00rootroot00000000000000package client import ( "context" "errors" "fmt" "reflect" "github.com/go-logr/logr" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // API defines basic operations to interact with the database type API interface { // List populates a slice of Models objects based on their type // The function parameter must be a pointer to a slice of Models // Models can be structs or pointers to structs // If the slice is null, the entire cache will be copied into the slice // If it has a capacity != 0, only 'capacity' elements will be filled in List(ctx context.Context, result interface{}) error // Create a Conditional API from a Function that is used to filter cached data // The function must accept a Model implementation and return a boolean. E.g: // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled }) WhereCache(predicate interface{}) ConditionalAPI // Create a ConditionalAPI from a Model's index data, where operations // apply to elements that match the values provided in one or more // model.Models according to the indexes. All provided Models must be // the same type or an error will be generated when operations are // are performed on the ConditionalAPI. Where(...model.Model) ConditionalAPI // WhereAny creates a ConditionalAPI from a list of Conditions where // operations apply to elements that match any (eg, logical OR) of the // conditions. WhereAny(model.Model, ...model.Condition) ConditionalAPI // WhereAll creates a ConditionalAPI from a list of Conditions where // operations apply to elements that match all (eg, logical AND) of the // conditions. WhereAll(model.Model, ...model.Condition) ConditionalAPI // Get retrieves a model from the cache // The way the object will be fetch depends on the data contained in the // provided model and the indexes defined in the associated schema // For more complex ways of searching for elements in the cache, the // preferred way is Where({condition}).List() Get(context.Context, model.Model) error // Create returns the operation needed to add the model(s) to the Database // Only fields with non-default values will be added to the transaction. If // the field associated with column "_uuid" has some content other than a // UUID, it will be treated as named-uuid Create(...model.Model) ([]ovsdb.Operation, error) } // ConditionalAPI is an interface used to perform operations that require / use Conditions type ConditionalAPI interface { // List uses the condition to search on the cache and populates // the slice of Models objects based on their type List(ctx context.Context, result interface{}) error // Mutate returns the operations needed to perform the mutation specified // By the model and the list of Mutation objects // Depending on the Condition, it might return one or many operations Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error) // Update returns the operations needed to update any number of rows according // to the data in the given model. // By default, all the non-default values contained in model will be updated. // Optional fields can be passed (pointer to fields in the model) to select the // the fields to be updated Update(model.Model, ...interface{}) ([]ovsdb.Operation, error) // Delete returns the Operations needed to delete the models selected via the condition Delete() ([]ovsdb.Operation, error) // Wait returns the operations needed to perform the wait specified // by the until condition, timeout, row and columns based on provided parameters. Wait(ovsdb.WaitCondition, *int, model.Model, ...interface{}) ([]ovsdb.Operation, error) } // ErrWrongType is used to report the user provided parameter has the wrong type type ErrWrongType struct { inputType reflect.Type reason string } func (e *ErrWrongType) Error() string { return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason) } // ErrNotFound is used to inform the object or table was not found in the cache var ErrNotFound = errors.New("object not found") // api struct implements both API and ConditionalAPI // Where() can be used to create a ConditionalAPI api type api struct { cache *cache.TableCache cond Conditional logger *logr.Logger } // List populates a slice of Models given as parameter based on the configured Condition func (a api) List(ctx context.Context, result interface{}) error { resultPtr := reflect.ValueOf(result) if resultPtr.Type().Kind() != reflect.Ptr { return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} } resultVal := reflect.Indirect(resultPtr) if resultVal.Type().Kind() != reflect.Slice { return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} } // List accepts a slice of Models that can be either structs or pointer to // structs var appendValue func(reflect.Value) var m model.Model if resultVal.Type().Elem().Kind() == reflect.Ptr { m = reflect.New(resultVal.Type().Elem().Elem()).Interface() appendValue = func(v reflect.Value) { resultVal.Set(reflect.Append(resultVal, v)) } } else { m = reflect.New(resultVal.Type().Elem()).Interface() appendValue = func(v reflect.Value) { resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v))) } } table, err := a.getTableFromModel(m) if err != nil { return err } if a.cond != nil && a.cond.Table() != table { return &ErrWrongType{resultPtr.Type(), fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())} } tableCache := a.cache.Table(table) if tableCache == nil { return ErrNotFound } var rows map[string]model.Model if a.cond != nil { rows, err = a.cond.Matches() if err != nil { return err } } else { rows = tableCache.Rows() } // If given a null slice, fill it in the cache table completely, if not, just up to // its capability. if resultVal.IsNil() || resultVal.Cap() == 0 { resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows))) } i := resultVal.Len() maxCap := resultVal.Cap() for _, row := range rows { if i >= maxCap { break } appendValue(reflect.ValueOf(row)) i++ } return nil } // Where returns a conditionalAPI based on model indexes. All provided models // must be the same type. func (a api) Where(models ...model.Model) ConditionalAPI { return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger) } // WhereAny returns a conditionalAPI based on a Condition list that matches any // of the conditions individually func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI { return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger) } // WhereAll returns a conditionalAPI based on a Condition list that matches all // of the conditions together func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI { return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger) } // WhereCache returns a conditionalAPI based a Predicate func (a api) WhereCache(predicate interface{}) ConditionalAPI { return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger) } // Conditional interface implementation // FromFunc returns a Condition from a function func (a api) conditionFromFunc(predicate interface{}) Conditional { table, err := a.getTableFromFunc(predicate) if err != nil { return newErrorConditional(err) } condition, err := newPredicateConditional(table, a.cache, predicate) if err != nil { return newErrorConditional(err) } return condition } // conditionFromModels returns a Conditional from one or more models. func (a api) conditionFromModels(models []model.Model) Conditional { if len(models) == 0 { return newErrorConditional(fmt.Errorf("at least one model required")) } tableName, err := a.getTableFromModel(models[0]) if tableName == "" { return newErrorConditional(err) } conditional, err := newEqualityConditional(tableName, a.cache, models) if err != nil { return newErrorConditional(err) } return conditional } // conditionFromExplicitConditions returns a Conditional from a model and a set // of explicit conditions. If matchAll is true, then models that match all the given // conditions are selected by the Conditional. If matchAll is false, then any model // that matches one of the conditions is selected. func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional { if len(cond) == 0 { return newErrorConditional(fmt.Errorf("at least one condition is required")) } tableName, err := a.getTableFromModel(m) if tableName == "" { return newErrorConditional(err) } conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...) if err != nil { return newErrorConditional(err) } return conditional } // Get is a generic Get function capable of returning (through a provided pointer) // a instance of any row in the cache. // 'result' must be a pointer to an Model that exists in the ClientDBModel // // The way the cache is searched depends on the fields already populated in 'result' // Any table index (including _uuid) will be used for comparison func (a api) Get(ctx context.Context, m model.Model) error { table, err := a.getTableFromModel(m) if err != nil { return err } tableCache := a.cache.Table(table) if tableCache == nil { return ErrNotFound } _, found, err := tableCache.RowByModel(m) if err != nil { return err } else if found == nil { return ErrNotFound } model.CloneInto(found, m) return nil } // Create is a generic function capable of creating any row in the DB // A valid Model (pointer to object) must be provided. func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) { var operations []ovsdb.Operation for _, model := range models { var realUUID, namedUUID string var err error tableName, err := a.getTableFromModel(model) if err != nil { return nil, err } // Read _uuid field, and use it as named-uuid info, err := a.cache.DatabaseModel().NewModelInfo(model) if err != nil { return nil, err } if uuid, err := info.FieldByColumn("_uuid"); err == nil { tmpUUID := uuid.(string) if ovsdb.IsNamedUUID(tmpUUID) { namedUUID = tmpUUID } else if ovsdb.IsValidUUID(tmpUUID) { realUUID = tmpUUID } } else { return nil, err } row, err := a.cache.Mapper().NewRow(info) if err != nil { return nil, err } // UUID is given in the operation, not the object delete(row, "_uuid") operations = append(operations, ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: tableName, Row: row, UUID: realUUID, UUIDName: namedUUID, }) } return operations, nil } // Mutate returns the operations needed to transform the one Model into another one func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) { var mutations []ovsdb.Mutation var operations []ovsdb.Operation if len(mutationObjs) < 1 { return nil, fmt.Errorf("at least one Mutation must be provided") } tableName := a.cache.DatabaseModel().FindTable(reflect.ValueOf(model).Type()) if tableName == "" { return nil, fmt.Errorf("table not found for object") } table := a.cache.Mapper().Schema.Table(tableName) if table == nil { return nil, fmt.Errorf("schema error: table not found in Database Model for type %s", reflect.TypeOf(model)) } conditions, err := a.cond.Generate() if err != nil { return nil, err } info, err := a.cache.DatabaseModel().NewModelInfo(model) if err != nil { return nil, err } for _, mobj := range mutationObjs { col, err := info.ColumnByPtr(mobj.Field) if err != nil { return nil, err } mutation, err := a.cache.Mapper().NewMutation(info, col, mobj.Mutator, mobj.Value) if err != nil { return nil, err } mutations = append(mutations, *mutation) } for _, condition := range conditions { operations = append(operations, ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: tableName, Mutations: mutations, Where: condition, }, ) } return operations, nil } // Update is a generic function capable of updating any mutable field in any row in the database // Additional fields can be passed (variadic opts) to indicate fields to be updated // All immutable fields will be ignored func (a api) Update(model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { var operations []ovsdb.Operation table, err := a.getTableFromModel(model) if err != nil { return nil, err } tableSchema := a.cache.Mapper().Schema.Table(table) info, err := a.cache.DatabaseModel().NewModelInfo(model) if err != nil { return nil, err } if len(fields) > 0 { for _, f := range fields { colName, err := info.ColumnByPtr(f) if err != nil { return nil, err } if !tableSchema.Columns[colName].Mutable() { return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, table) } } } conditions, err := a.cond.Generate() if err != nil { return nil, err } row, err := a.cache.Mapper().NewRow(info, fields...) if err != nil { return nil, err } for colName, column := range tableSchema.Columns { if !column.Mutable() { a.logger.V(2).Info("removing immutable field", "name", colName) delete(row, colName) } } delete(row, "_uuid") if len(row) == 0 { return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable") } for _, condition := range conditions { operations = append(operations, ovsdb.Operation{ Op: ovsdb.OperationUpdate, Table: table, Row: row, Where: condition, }, ) } return operations, nil } // Delete returns the Operation needed to delete the selected models from the database func (a api) Delete() ([]ovsdb.Operation, error) { var operations []ovsdb.Operation conditions, err := a.cond.Generate() if err != nil { return nil, err } for _, condition := range conditions { operations = append(operations, ovsdb.Operation{ Op: ovsdb.OperationDelete, Table: a.cond.Table(), Where: condition, }, ) } return operations, nil } func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { var operations []ovsdb.Operation /* Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6 lb := &nbdb.LoadBalancer{} condition := model.Condition{ Field: &lb.Name, Function: ovsdb.ConditionEqual, Value: "lbName", } timeout0 := 0 client.Where(lb, condition).Wait( ovsdb.WaitConditionNotEqual, // Until &timeout0, // Timeout &lb, // Row (and Table) &lb.Name, // Cols (aka fields) ) */ conditions, err := a.cond.Generate() if err != nil { return nil, err } table, err := a.getTableFromModel(model) if err != nil { return nil, err } info, err := a.cache.DatabaseModel().NewModelInfo(model) if err != nil { return nil, err } var columnNames []string if len(fields) > 0 { columnNames = make([]string, 0, len(fields)) for _, f := range fields { colName, err := info.ColumnByPtr(f) if err != nil { return nil, err } columnNames = append(columnNames, colName) } } row, err := a.cache.Mapper().NewRow(info, fields...) if err != nil { return nil, err } rows := []ovsdb.Row{row} for _, condition := range conditions { operation := ovsdb.Operation{ Op: ovsdb.OperationWait, Table: table, Where: condition, Until: string(untilConFun), Columns: columnNames, Rows: rows, } if timeout != nil { operation.Timeout = timeout } operations = append(operations, operation) } return operations, nil } // getTableFromModel returns the table name from a Model object after performing // type verifications on the model func (a api) getTableFromModel(m interface{}) (string, error) { if _, ok := m.(model.Model); !ok { return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"} } table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m)) if table == "" { return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"} } return table, nil } // getTableFromModel returns the table name from a the predicate after performing // type verifications func (a api) getTableFromFunc(predicate interface{}) (string, error) { predType := reflect.TypeOf(predicate) if predType == nil || predType.Kind() != reflect.Func { return "", &ErrWrongType{predType, "Expected function"} } if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool { return "", &ErrWrongType{predType, "Expected func(Model) bool"} } modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem() modelType := predType.In(0) if !modelType.Implements(modelInterface) { return "", &ErrWrongType{predType, fmt.Sprintf("Type %s does not implement Model interface", modelType.String())} } table := a.cache.DatabaseModel().FindTable(modelType) if table == "" { return "", &ErrWrongType{predType, fmt.Sprintf("Model %s not found in Database Model", modelType.String())} } return table, nil } // newAPI returns a new API to interact with the database func newAPI(cache *cache.TableCache, logger *logr.Logger) API { return api{ cache: cache, logger: logger, } } // newConditionalAPI returns a new ConditionalAPI to interact with the database func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger) ConditionalAPI { return api{ cache: cache, cond: cond, logger: logger, } } golang-github-ovn-org-libovsdb-0.7.0/client/api_test.go000066400000000000000000001464041464501522100230620ustar00rootroot00000000000000package client import ( "context" "fmt" "math/rand" "strings" "testing" "github.com/go-logr/logr" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( trueVal = true falseVal = false one = 1 six = 6 ) var discardLogger = logr.Discard() func TestAPIListSimple(t *testing.T) { lscacheList := []model.Model{ &testLogicalSwitch{ UUID: aUUID0, Name: "ls0", ExternalIds: map[string]string{"foo": "bar"}, }, &testLogicalSwitch{ UUID: aUUID1, Name: "ls1", ExternalIds: map[string]string{"foo": "baz"}, }, &testLogicalSwitch{ UUID: aUUID2, Name: "ls2", ExternalIds: map[string]string{"foo": "baz"}, }, &testLogicalSwitch{ UUID: aUUID3, Name: "ls4", ExternalIds: map[string]string{"foo": "baz"}, Ports: []string{"port0", "port1"}, }, } lscache := map[string]model.Model{} for i := range lscacheList { lscache[lscacheList[i].(*testLogicalSwitch).UUID] = lscacheList[i] } testData := cache.Data{ "Logical_Switch": lscache, } tcache := apiTestCache(t, testData) test := []struct { name string initialCap int resultCap int resultLen int content []model.Model err bool }{ { name: "full", initialCap: 0, resultCap: len(lscache), resultLen: len(lscacheList), content: lscacheList, err: false, }, { name: "single", initialCap: 1, resultCap: 1, resultLen: 1, content: lscacheList, err: false, }, { name: "multiple", initialCap: 2, resultCap: 2, resultLen: 2, content: lscacheList, err: false, }, } hasDups := func(a interface{}) bool { l := map[string]struct{}{} switch v := a.(type) { case []testLogicalSwitch: for _, i := range v { if _, ok := l[i.Name]; ok { return ok } } case []*testLogicalSwitch: for _, i := range v { if _, ok := l[i.Name]; ok { return ok } } } return false } for _, tt := range test { t.Run(fmt.Sprintf("ApiList: %s", tt.name), func(t *testing.T) { // test List with a pointer to a slice of Models var result []*testLogicalSwitch if tt.initialCap != 0 { result = make([]*testLogicalSwitch, 0, tt.initialCap) } api := newAPI(tcache, &discardLogger) err := api.List(context.Background(), &result) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Lenf(t, result, tt.resultLen, "Length should match expected") assert.Equal(t, cap(result), tt.resultCap, "Capability should match expected") assert.Subsetf(t, tt.content, result, "Result should be a subset of expected") assert.False(t, hasDups(result), "Result should have no duplicates") } // test List with a pointer to a slice of structs var resultWithNoPtr []testLogicalSwitch if tt.initialCap != 0 { resultWithNoPtr = make([]testLogicalSwitch, 0, tt.initialCap) } contentNoPtr := make([]testLogicalSwitch, 0, len(tt.content)) for i := range tt.content { contentNoPtr = append(contentNoPtr, *tt.content[i].(*testLogicalSwitch)) } err = api.List(context.Background(), &resultWithNoPtr) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Lenf(t, result, tt.resultLen, "Length should match expected") assert.Equal(t, cap(result), tt.resultCap, "Capability should match expected") assert.Subsetf(t, contentNoPtr, resultWithNoPtr, "Result should be a subset of expected") assert.False(t, hasDups(resultWithNoPtr), "Result should have no duplicates") } }) } t.Run("ApiList: Error wrong type", func(t *testing.T) { var result []string api := newAPI(tcache, &discardLogger) err := api.List(context.Background(), &result) assert.NotNil(t, err) }) t.Run("ApiList: Type Selection", func(t *testing.T) { var result []testLogicalSwitchPort api := newAPI(tcache, &discardLogger) err := api.List(context.Background(), &result) assert.Nil(t, err) assert.Len(t, result, 0, "Should be empty since cache is empty") }) t.Run("ApiList: Empty List", func(t *testing.T) { result := []testLogicalSwitch{} api := newAPI(tcache, &discardLogger) err := api.List(context.Background(), &result) assert.Nil(t, err) assert.Len(t, result, len(lscacheList)) }) t.Run("ApiList: fails if conditional is an error", func(t *testing.T) { result := []testLogicalSwitch{} api := newConditionalAPI(tcache, newErrorConditional(fmt.Errorf("error")), &discardLogger) err := api.List(context.Background(), &result) assert.NotNil(t, err) }) } func TestAPIListPredicate(t *testing.T) { lscacheList := []model.Model{ &testLogicalSwitch{ UUID: aUUID0, Name: "ls0", ExternalIds: map[string]string{"foo": "bar"}, }, &testLogicalSwitch{ UUID: aUUID1, Name: "magicLs1", ExternalIds: map[string]string{"foo": "baz"}, }, &testLogicalSwitch{ UUID: aUUID2, Name: "ls2", ExternalIds: map[string]string{"foo": "baz"}, }, &testLogicalSwitch{ UUID: aUUID3, Name: "magicLs2", ExternalIds: map[string]string{"foo": "baz"}, Ports: []string{"port0", "port1"}, }, } lscache := map[string]model.Model{} for i := range lscacheList { lscache[lscacheList[i].(*testLogicalSwitch).UUID] = lscacheList[i] } testData := cache.Data{ "Logical_Switch": lscache, } tcache := apiTestCache(t, testData) test := []struct { name string predicate interface{} content []model.Model err bool }{ { name: "none", predicate: func(t *testLogicalSwitch) bool { return false }, content: []model.Model{}, err: false, }, { name: "all", predicate: func(t *testLogicalSwitch) bool { return true }, content: lscacheList, err: false, }, { name: "nil function must fail", err: true, }, { name: "arbitrary condition", predicate: func(t *testLogicalSwitch) bool { return strings.HasPrefix(t.Name, "magic") }, content: []model.Model{lscacheList[1], lscacheList[3]}, err: false, }, { name: "error wrong type", predicate: func(t testLogicalSwitch) string { return "foo" }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiListPredicate: %s", tt.name), func(t *testing.T) { var result []*testLogicalSwitch api := newAPI(tcache, &discardLogger) cond := api.WhereCache(tt.predicate) err := cond.List(context.Background(), &result) if tt.err { assert.NotNil(t, err) } else { if !assert.Nil(t, err) { t.Log(err) } assert.ElementsMatchf(t, tt.content, result, "Content should match") } }) } } func TestAPIListWhereConditions(t *testing.T) { lscacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", Type: "", }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", Type: "router", }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", Type: "router", }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp3", Type: "localnet", }, } lscache := map[string]model.Model{} for i := range lscacheList { lscache[lscacheList[i].(*testLogicalSwitchPort).UUID] = lscacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lscache, } tcache := apiTestCache(t, testData) test := []struct { desc string matchNames []string matchTypes []string matchAll bool result []model.Model }{ { desc: "any conditions", matchNames: []string{"lsp0"}, matchTypes: []string{"router"}, matchAll: false, result: lscacheList[0:3], }, { desc: "all conditions", matchNames: []string{"lsp1"}, matchTypes: []string{"router"}, matchAll: true, result: lscacheList[1:2], }, } for _, tt := range test { t.Run(fmt.Sprintf("TestAPIListWhereConditions: %s", tt.desc), func(t *testing.T) { var result []*testLogicalSwitchPort api := newAPI(tcache, &discardLogger) testObj := &testLogicalSwitchPort{} conds := []model.Condition{} for _, name := range tt.matchNames { cond := model.Condition{Field: &testObj.Name, Function: ovsdb.ConditionEqual, Value: name} conds = append(conds, cond) } for _, atype := range tt.matchTypes { cond := model.Condition{Field: &testObj.Type, Function: ovsdb.ConditionEqual, Value: atype} conds = append(conds, cond) } var capi ConditionalAPI if tt.matchAll { capi = api.WhereAll(testObj, conds...) } else { capi = api.WhereAny(testObj, conds...) } err := capi.List(context.Background(), &result) assert.NoError(t, err) assert.ElementsMatchf(t, tt.result, result, "Content should match") }) } } func TestAPIListFields(t *testing.T) { lspcacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "magiclsp1", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", ExternalIds: map[string]string{"unique": "id"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "magiclsp2", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &trueVal, }, } lspcache := map[string]model.Model{} for i := range lspcacheList { lspcache[lspcacheList[i].(*testLogicalSwitchPort).UUID] = lspcacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) test := []struct { name string fields []interface{} prepare func(*testLogicalSwitchPort) content []model.Model }{ { name: "No match", prepare: func(t *testLogicalSwitchPort) {}, content: []model.Model{}, }, { name: "List unique by UUID", prepare: func(t *testLogicalSwitchPort) { t.UUID = aUUID0 }, content: []model.Model{lspcache[aUUID0]}, }, { name: "List unique by Index", prepare: func(t *testLogicalSwitchPort) { t.Name = "lsp2" }, content: []model.Model{lspcache[aUUID2]}, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiListFields: %s", tt.name), func(t *testing.T) { var result []*testLogicalSwitchPort // Clean object testObj := testLogicalSwitchPort{} tt.prepare(&testObj) api := newAPI(tcache, &discardLogger) err := api.Where(&testObj).List(context.Background(), &result) assert.Nil(t, err) assert.ElementsMatchf(t, tt.content, result, "Content should match") }) } t.Run("ApiListFields: Wrong table", func(t *testing.T) { var result []testLogicalSwitchPort api := newAPI(tcache, &discardLogger) obj := testLogicalSwitch{ UUID: aUUID0, } err := api.Where(&obj).List(context.Background(), &result) assert.NotNil(t, err) }) } func TestAPIListMulti(t *testing.T) { lspcacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "magiclsp1", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", ExternalIds: map[string]string{"unique": "id"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "magiclsp2", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &trueVal, }, } lspcache := map[string]model.Model{} for i := range lspcacheList { lspcache[lspcacheList[i].(*testLogicalSwitchPort).UUID] = lspcacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) test := []struct { name string models []model.Model matches []model.Model err bool }{ { name: "No match", models: []model.Model{ &testLogicalSwitchPort{UUID: "asdfasdfaf"}, &testLogicalSwitchPort{UUID: "ghghghghgh"}, }, matches: []model.Model{}, err: false, }, { name: "One match", models: []model.Model{ &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitchPort{UUID: "ghghghghgh"}, }, matches: []model.Model{lspcache[aUUID0]}, err: false, }, { name: "Mismatched models", models: []model.Model{ &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitch{UUID: "ghghghghgh"}, }, matches: []model.Model{}, err: true, }, } for _, tt := range test { t.Run(tt.name, func(t *testing.T) { var result []*testLogicalSwitchPort api := newAPI(tcache, &discardLogger) err := api.Where(tt.models...).List(context.Background(), &result) if tt.err { assert.Error(t, err) } else { assert.NoError(t, err) assert.ElementsMatchf(t, tt.matches, result, "Content should match") } }) } } func TestConditionFromFunc(t *testing.T) { test := []struct { name string arg interface{} err bool }{ { name: "wrong function must fail", arg: func(s string) bool { return false }, err: true, }, { name: "wrong function must fail2 ", arg: func(t *testLogicalSwitch) string { return "foo" }, err: true, }, { name: "correct func should succeed", arg: func(t *testLogicalSwitch) bool { return true }, err: false, }, } for _, tt := range test { t.Run(fmt.Sprintf("conditionFromFunc: %s", tt.name), func(t *testing.T) { cache := apiTestCache(t, nil) apiIface := newAPI(cache, &discardLogger) condition := apiIface.(api).conditionFromFunc(tt.arg) if tt.err { assert.IsType(t, &errorConditional{}, condition) } else { assert.IsType(t, &predicateConditional{}, condition) } }) } } func TestConditionFromModel(t *testing.T) { var testObj testLogicalSwitch test := []struct { name string models []model.Model conds []model.Condition err bool }{ { name: "wrong model must fail", models: []model.Model{ &struct{ a string }{}, }, err: true, }, { name: "wrong condition must fail", models: []model.Model{ &struct { a string `ovsdb:"_uuid"` }{}, }, conds: []model.Condition{{Field: "foo"}}, err: true, }, { name: "correct model must succeed", models: []model.Model{ &testLogicalSwitch{}, }, err: false, }, { name: "correct models must succeed", models: []model.Model{ &testLogicalSwitch{}, &testLogicalSwitch{}, }, err: false, }, { name: "correct model with valid condition must succeed", models: []model.Model{&testObj}, conds: []model.Condition{ { Field: &testObj.Name, Function: ovsdb.ConditionEqual, Value: "foo", }, { Field: &testObj.Ports, Function: ovsdb.ConditionIncludes, Value: []string{"foo"}, }, }, err: false, }, } for _, tt := range test { t.Run(fmt.Sprintf("conditionFromModels: %s", tt.name), func(t *testing.T) { cache := apiTestCache(t, nil) apiIface := newAPI(cache, &discardLogger) var condition Conditional if len(tt.conds) > 0 { condition = apiIface.(api).conditionFromExplicitConditions(true, tt.models[0], tt.conds...) } else { condition = apiIface.(api).conditionFromModels(tt.models) } if tt.err { assert.IsType(t, &errorConditional{}, condition) } else { if len(tt.conds) > 0 { assert.IsType(t, &explicitConditional{}, condition) } else { assert.IsType(t, &equalityConditional{}, condition) } } }) } } func TestAPIGet(t *testing.T) { lsCacheList := []model.Model{} lspCacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp0", Type: "foo", ExternalIds: map[string]string{"foo": "bar"}, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp1", Type: "bar", ExternalIds: map[string]string{"foo": "baz"}, }, } lsCache := map[string]model.Model{} lspCache := map[string]model.Model{} for i := range lsCacheList { lsCache[lsCacheList[i].(*testLogicalSwitch).UUID] = lsCacheList[i] } for i := range lspCacheList { lspCache[lspCacheList[i].(*testLogicalSwitchPort).UUID] = lspCacheList[i] } testData := cache.Data{ "Logical_Switch": lsCache, "Logical_Switch_Port": lspCache, } tcache := apiTestCache(t, testData) test := []struct { name string prepare func(model.Model) result model.Model err bool }{ { name: "empty", prepare: func(m model.Model) { }, err: true, }, { name: "non_existing", prepare: func(m model.Model) { m.(*testLogicalSwitchPort).Name = "foo" }, err: true, }, { name: "by UUID", prepare: func(m model.Model) { m.(*testLogicalSwitchPort).UUID = aUUID3 }, result: lspCacheList[1], err: false, }, { name: "by name", prepare: func(m model.Model) { m.(*testLogicalSwitchPort).Name = "lsp0" }, result: lspCacheList[0], err: false, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiGet: %s", tt.name), func(t *testing.T) { var result testLogicalSwitchPort tt.prepare(&result) api := newAPI(tcache, &discardLogger) err := api.Get(context.Background(), &result) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Equalf(t, tt.result, &result, "Result should match") } }) } } func TestAPICreate(t *testing.T) { lsCacheList := []model.Model{} lspCacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp0", Type: "foo", ExternalIds: map[string]string{"foo": "bar"}, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp1", Type: "bar", ExternalIds: map[string]string{"foo": "baz"}, }, } lsCache := map[string]model.Model{} lspCache := map[string]model.Model{} for i := range lsCacheList { lsCache[lsCacheList[i].(*testLogicalSwitch).UUID] = lsCacheList[i] } for i := range lspCacheList { lspCache[lspCacheList[i].(*testLogicalSwitchPort).UUID] = lspCacheList[i] } testData := cache.Data{ "Logical_Switch": lsCache, "Logical_Switch_Port": lspCache, } tcache := apiTestCache(t, testData) rowFoo := ovsdb.Row(map[string]interface{}{"name": "foo"}) rowBar := ovsdb.Row(map[string]interface{}{"name": "bar"}) test := []struct { name string input []model.Model result []ovsdb.Operation err bool }{ { name: "empty", input: []model.Model{&testLogicalSwitch{}}, result: []ovsdb.Operation{{ Op: "insert", Table: "Logical_Switch", Row: ovsdb.Row{}, UUIDName: "", }}, err: false, }, { name: "With some values", input: []model.Model{&testLogicalSwitch{ Name: "foo", }}, result: []ovsdb.Operation{{ Op: "insert", Table: "Logical_Switch", Row: rowFoo, UUIDName: "", }}, err: false, }, { name: "With named UUID ", input: []model.Model{&testLogicalSwitch{ UUID: "foo", }}, result: []ovsdb.Operation{{ Op: "insert", Table: "Logical_Switch", Row: ovsdb.Row{}, UUIDName: "foo", }}, err: false, }, { name: "Multiple", input: []model.Model{ &testLogicalSwitch{ UUID: "fooUUID", Name: "foo", }, &testLogicalSwitch{ UUID: "barUUID", Name: "bar", }, }, result: []ovsdb.Operation{{ Op: "insert", Table: "Logical_Switch", Row: rowFoo, UUIDName: "fooUUID", }, { Op: "insert", Table: "Logical_Switch", Row: rowBar, UUIDName: "barUUID", }}, err: false, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiCreate: %s", tt.name), func(t *testing.T) { api := newAPI(tcache, &discardLogger) op, err := api.Create(tt.input...) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Equalf(t, tt.result, op, "ovsdb.Operation should match") } }) } } func TestAPIMutate(t *testing.T) { lspCache := map[string]model.Model{ aUUID0: &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", Type: "someType", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, Tag: &one, }, aUUID1: &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", Type: "someType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, }, aUUID2: &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", Type: "someOtherType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, }, } testData := cache.Data{ "Logical_Switch_Port": lspCache, } tcache := apiTestCache(t, testData) testObj := testLogicalSwitchPort{} test := []struct { name string condition func(API) ConditionalAPI model model.Model mutations []model.Mutation init map[string]model.Model result []ovsdb.Operation err bool }{ { name: "select by UUID addElement to set", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ UUID: aUUID0, }) }, mutations: []model.Mutation{ { Field: &testObj.Addresses, Mutator: ovsdb.MutateOperationInsert, Value: []string{"1.1.1.1"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "addresses", Mutator: ovsdb.MutateOperationInsert, Value: testOvsSet(t, []string{"1.1.1.1"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, }, err: false, }, { name: "select multiple by UUID addElement to set", condition: func(a API) ConditionalAPI { return a.Where( &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitchPort{UUID: aUUID1}, &testLogicalSwitchPort{UUID: aUUID2}, ) }, mutations: []model.Mutation{ { Field: &testObj.Addresses, Mutator: ovsdb.MutateOperationInsert, Value: []string{"2.2.2.2"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "addresses", Mutator: ovsdb.MutateOperationInsert, Value: testOvsSet(t, []string{"2.2.2.2"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "addresses", Mutator: ovsdb.MutateOperationInsert, Value: testOvsSet(t, []string{"2.2.2.2"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "addresses", Mutator: ovsdb.MutateOperationInsert, Value: testOvsSet(t, []string{"2.2.2.2"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}}}, }, }, err: false, }, { name: "select by name delete element from map with cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "lsp2", }) }, mutations: []model.Mutation{ { Field: &testObj.ExternalIds, Mutator: ovsdb.MutateOperationDelete, Value: []string{"foo"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "external_ids", Mutator: ovsdb.MutateOperationDelete, Value: testOvsSet(t, []string{"foo"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}}}, }, }, err: false, }, { name: "select by name delete element from map with no cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "foo", }) }, mutations: []model.Mutation{ { Field: &testObj.ExternalIds, Mutator: ovsdb.MutateOperationDelete, Value: []string{"foo"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "external_ids", Mutator: ovsdb.MutateOperationDelete, Value: testOvsSet(t, []string{"foo"})}}, Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, }, }, err: false, }, { name: "select single by predicate name insert element in map", condition: func(a API) ConditionalAPI { return a.WhereCache(func(lsp *testLogicalSwitchPort) bool { return lsp.Name == "lsp2" }) }, mutations: []model.Mutation{ { Field: &testObj.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"bar": "baz"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: testOvsMap(t, map[string]string{"bar": "baz"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}}}, }, }, err: false, }, { name: "select many by predicate name insert element in map", condition: func(a API) ConditionalAPI { return a.WhereCache(func(lsp *testLogicalSwitchPort) bool { return lsp.Type == "someType" }) }, mutations: []model.Mutation{ { Field: &testObj.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"bar": "baz"}, }, }, result: []ovsdb.Operation{ { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: testOvsMap(t, map[string]string{"bar": "baz"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, { Op: ovsdb.OperationMutate, Table: "Logical_Switch_Port", Mutations: []ovsdb.Mutation{{Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: testOvsMap(t, map[string]string{"bar": "baz"})}}, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "No mutations should error", condition: func(a API) ConditionalAPI { return a.WhereCache(func(lsp *testLogicalSwitchPort) bool { return lsp.Type == "someType" }) }, mutations: []model.Mutation{}, err: true, }, { name: "multiple different selected models must fail", condition: func(a API) ConditionalAPI { return a.Where( &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitchPort{UUID: aUUID1}, &testLogicalSwitch{UUID: aUUID2}, ) }, err: true, }, { name: "fails if conditional is an error", condition: func(a API) ConditionalAPI { return newConditionalAPI(nil, newErrorConditional(fmt.Errorf("error")), &discardLogger) }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiMutate: %s", tt.name), func(t *testing.T) { api := newAPI(tcache, &discardLogger) cond := tt.condition(api) ops, err := cond.Mutate(&testObj, tt.mutations...) if tt.err { require.Error(t, err) } else { require.NoError(t, err) assert.ElementsMatchf(t, tt.result, ops, "ovsdb.Operations should match") } }) } } func TestAPIUpdate(t *testing.T) { lspCache := map[string]model.Model{ aUUID0: &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", Type: "someType", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, Tag: &one, }, aUUID1: &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", Type: "someType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, Enabled: &trueVal, }, aUUID2: &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", Type: "someOtherType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, }, } testData := cache.Data{ "Logical_Switch_Port": lspCache, } tcache := apiTestCache(t, testData) testObj := testLogicalSwitchPort{} testRow := ovsdb.Row(map[string]interface{}{"type": "somethingElse", "tag": testOvsSet(t, []int{6})}) tagRow := ovsdb.Row(map[string]interface{}{"tag": testOvsSet(t, []int{6})}) var nilInt *int testNilRow := ovsdb.Row(map[string]interface{}{"type": "somethingElse", "tag": testOvsSet(t, nilInt)}) typeRow := ovsdb.Row(map[string]interface{}{"type": "somethingElse"}) fields := []interface{}{&testObj.Tag, &testObj.Type} test := []struct { name string condition func(API) ConditionalAPI prepare func(t *testLogicalSwitchPort) result []ovsdb.Operation fields []interface{} err bool }{ { name: "select by UUID change multiple field", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitch{ UUID: aUUID0, }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, }, err: false, }, { name: "select by UUID change multiple field with nil pointer/empty set", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitch{ UUID: aUUID0, }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = nilInt }, fields: fields, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testNilRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, }, err: false, }, { name: "select by UUID with no fields does not change multiple field with nil pointer/empty set", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitch{ UUID: aUUID0, }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = nilInt }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: typeRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, }, err: false, }, { name: "select by index change multiple field with no cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "foo", }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testRow, Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, }, }, err: false, }, { name: "select by index change multiple field with cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "lsp1", }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "select by field change multiple field", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Type: "sometype", Enabled: &trueVal, } return a.WhereAny(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "sometype", }) }, prepare: func(t *testLogicalSwitchPort) { t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "type", Function: ovsdb.ConditionEqual, Value: "sometype"}}, }, }, err: false, }, { name: "multiple select any by field change multiple field with cache hits", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{} return a.WhereAny(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "someOtherType", }, model.Condition{ Field: &t.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }) }, prepare: func(t *testLogicalSwitchPort) { t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}}}, }, { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "multiple select all by field change multiple field", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{} return a.WhereAll(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "sometype", }, model.Condition{ Field: &t.Enabled, Function: ovsdb.ConditionIncludes, Value: &trueVal, }) }, prepare: func(t *testLogicalSwitchPort) { t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{ {Column: "type", Function: ovsdb.ConditionEqual, Value: "sometype"}, {Column: "enabled", Function: ovsdb.ConditionIncludes, Value: testOvsSet(t, &trueVal)}, }, }, }, err: false, }, { name: "select by field inequality change multiple field with cache", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Type: "someType", Enabled: &trueVal, } return a.WhereAny(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionNotEqual, Value: "someType", }) }, prepare: func(t *testLogicalSwitchPort) { t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}}}, }, }, err: false, }, { name: "select by field inequality change multiple field with no cache", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Type: "sometype", Enabled: &trueVal, } return a.WhereAny(&t, model.Condition{ Field: &t.Tag, Function: ovsdb.ConditionNotEqual, Value: &one, }) }, prepare: func(t *testLogicalSwitchPort) { t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: tagRow, Where: []ovsdb.Condition{{Column: "tag", Function: ovsdb.ConditionNotEqual, Value: testOvsSet(t, &one)}}, }, }, err: false, }, { name: "select multiple by predicate change multiple field", condition: func(a API) ConditionalAPI { return a.WhereCache(func(t *testLogicalSwitchPort) bool { return t.Enabled != nil && *t.Enabled == true }) }, prepare: func(t *testLogicalSwitchPort) { t.Type = "somethingElse" t.Tag = &six }, result: []ovsdb.Operation{ { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, { Op: ovsdb.OperationUpdate, Table: "Logical_Switch_Port", Row: testRow, Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "multiple different selected models must fail", condition: func(a API) ConditionalAPI { return a.Where( &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitchPort{UUID: aUUID1}, &testLogicalSwitch{UUID: aUUID2}, ) }, err: true, }, { name: "fails if conditional is an error", condition: func(a API) ConditionalAPI { return newConditionalAPI(tcache, newErrorConditional(fmt.Errorf("error")), &discardLogger) }, prepare: func(t *testLogicalSwitchPort) { }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiUpdate: %s", tt.name), func(t *testing.T) { api := newAPI(tcache, &discardLogger) cond := tt.condition(api) // clean test Object testObj = testLogicalSwitchPort{} if tt.prepare != nil { tt.prepare(&testObj) } ops, err := cond.Update(&testObj, tt.fields...) if tt.err { require.Error(t, err) } else { require.NoError(t, err) require.ElementsMatchf(t, tt.result, ops, "ovsdb.Operations should match") } }) } } func TestAPIDelete(t *testing.T) { lspCache := map[string]model.Model{ aUUID0: &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", Type: "someType", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, Tag: &one, }, aUUID1: &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", Type: "someType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, Enabled: &trueVal, }, aUUID2: &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", Type: "someOtherType", ExternalIds: map[string]string{"foo": "baz"}, Tag: &one, }, } testData := cache.Data{ "Logical_Switch_Port": lspCache, } tcache := apiTestCache(t, testData) test := []struct { name string condition func(API) ConditionalAPI result []ovsdb.Operation err bool }{ { name: "select by UUID", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitch{ UUID: aUUID0, }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch", Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, }, err: false, }, { name: "select by index with cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "lsp1", }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "select by index with no cache", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "foo", }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, }, }, err: false, }, { name: "select by field equality", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Enabled: &trueVal, } return a.WhereAny(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "sometype", }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "type", Function: ovsdb.ConditionEqual, Value: "sometype"}}, }, }, err: false, }, { name: "select any by field ", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Enabled: &trueVal, } return a.WhereAny(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "sometype", }, model.Condition{ Field: &t.Name, Function: ovsdb.ConditionEqual, Value: "foo", }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "type", Function: ovsdb.ConditionEqual, Value: "sometype"}}, }, { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, }, }, err: false, }, { name: "select all by field ", condition: func(a API) ConditionalAPI { t := testLogicalSwitchPort{ Enabled: &trueVal, } return a.WhereAll(&t, model.Condition{ Field: &t.Type, Function: ovsdb.ConditionEqual, Value: "sometype", }, model.Condition{ Field: &t.Name, Function: ovsdb.ConditionEqual, Value: "foo", }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{ {Column: "type", Function: ovsdb.ConditionEqual, Value: "sometype"}, {Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}, }, }, }, err: false, }, { name: "select multiple by predicate", condition: func(a API) ConditionalAPI { return a.WhereCache(func(t *testLogicalSwitchPort) bool { return t.Enabled != nil && *t.Enabled == true }) }, result: []ovsdb.Operation{ { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, }, { Op: ovsdb.OperationDelete, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}}}, }, }, err: false, }, { name: "multiple different selected models must fail", condition: func(a API) ConditionalAPI { return a.Where( &testLogicalSwitchPort{UUID: aUUID0}, &testLogicalSwitchPort{UUID: aUUID1}, &testLogicalSwitch{UUID: aUUID2}, ) }, err: true, }, { name: "fails if conditional is an error", condition: func(a API) ConditionalAPI { return newConditionalAPI(nil, newErrorConditional(fmt.Errorf("error")), &discardLogger) }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiDelete: %s", tt.name), func(t *testing.T) { api := newAPI(tcache, &discardLogger) cond := tt.condition(api) ops, err := cond.Delete() if tt.err { assert.Error(t, err) } else { assert.NoError(t, err) assert.ElementsMatchf(t, tt.result, ops, "ovsdb.Operations should match") } }) } } func BenchmarkAPIList(b *testing.B) { const numRows = 10000 lscacheList := make([]*testLogicalSwitchPort, 0, numRows) for i := 0; i < numRows; i++ { lscacheList = append(lscacheList, &testLogicalSwitchPort{ UUID: uuid.New().String(), Name: fmt.Sprintf("ls%d", i), ExternalIds: map[string]string{"foo": "bar"}, }) } lscache := map[string]model.Model{} for i := range lscacheList { lscache[lscacheList[i].UUID] = lscacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lscache, } tcache := apiTestCache(b, testData) rand.Seed(int64(b.N)) var index int test := []struct { name string predicate interface{} }{ { name: "predicate returns none", predicate: func(t *testLogicalSwitchPort) bool { return false }, }, { name: "predicate returns all", predicate: func(t *testLogicalSwitchPort) bool { return true }, }, { name: "predicate on an arbitrary condition", predicate: func(t *testLogicalSwitchPort) bool { return strings.HasPrefix(t.Name, "ls1") }, }, { name: "predicate matches name", predicate: func(t *testLogicalSwitchPort) bool { return t.Name == lscacheList[index].Name }, }, { name: "by index, no predicate", }, } for _, tt := range test { b.Run(tt.name, func(b *testing.B) { for i := 0; i < b.N; i++ { index = rand.Intn(numRows) var result []*testLogicalSwitchPort api := newAPI(tcache, &discardLogger) var cond ConditionalAPI if tt.predicate != nil { cond = api.WhereCache(tt.predicate) } else { cond = api.Where(lscacheList[index]) } err := cond.List(context.Background(), &result) assert.NoError(b, err) } }) } } func BenchmarkAPIListMultiple(b *testing.B) { const numRows = 500 lscacheList := make([]*testLogicalSwitchPort, 0, numRows) for i := 0; i < numRows; i++ { lscacheList = append(lscacheList, &testLogicalSwitchPort{ UUID: uuid.New().String(), Name: fmt.Sprintf("ls%d", i), ExternalIds: map[string]string{"foo": "bar"}, }) } lscache := map[string]model.Model{} for i := range lscacheList { lscache[lscacheList[i].UUID] = lscacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lscache, } tcache := apiTestCache(b, testData) models := make([]model.Model, len(lscacheList)) for i := 0; i < len(lscacheList); i++ { models[i] = &testLogicalSwitchPort{UUID: lscacheList[i].UUID} } test := []struct { name string whereAny bool }{ { name: "multiple results one at a time with Get", }, { name: "multiple results in a batch with WhereAny", whereAny: true, }, } for _, tt := range test { b.Run(tt.name, func(b *testing.B) { for i := 0; i < b.N; i++ { var results []*testLogicalSwitchPort api := newAPI(tcache, &discardLogger) if tt.whereAny { // Looking up models with WhereAny() should be fast cond := api.Where(models...) err := cond.List(context.Background(), &results) assert.NoError(b, err) } else { // Looking up models one-at-a-time with Get() should be slow for j := 0; j < len(lscacheList); j++ { m := &testLogicalSwitchPort{UUID: lscacheList[j].UUID} err := api.Get(context.Background(), m) assert.NoError(b, err) results = append(results, m) } } assert.Len(b, results, len(models)) } }) } } func TestAPIWait(t *testing.T) { tcache := apiTestCache(t, cache.Data{}) timeout0 := 0 test := []struct { name string condition func(API) ConditionalAPI prepare func() (model.Model, []interface{}) until ovsdb.WaitCondition timeout *int result []ovsdb.Operation err bool }{ { name: "timeout 0, no columns", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "lsp0", }) }, until: "==", timeout: &timeout0, prepare: func() (model.Model, []interface{}) { testLSP := testLogicalSwitchPort{ Name: "lsp0", } return &testLSP, nil }, result: []ovsdb.Operation{ { Op: ovsdb.OperationWait, Table: "Logical_Switch_Port", Timeout: &timeout0, Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "lsp0"}}, Until: string(ovsdb.WaitConditionEqual), Columns: nil, Rows: []ovsdb.Row{{"name": "lsp0"}}, }, }, err: false, }, { name: "no timeout", condition: func(a API) ConditionalAPI { return a.Where(&testLogicalSwitchPort{ Name: "lsp0", }) }, until: "!=", prepare: func() (model.Model, []interface{}) { testLSP := testLogicalSwitchPort{ Name: "lsp0", Type: "someType", } return &testLSP, []interface{}{&testLSP.Name, &testLSP.Type} }, result: []ovsdb.Operation{ { Op: ovsdb.OperationWait, Timeout: nil, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "lsp0"}}, Until: string(ovsdb.WaitConditionNotEqual), Columns: []string{"name", "type"}, Rows: []ovsdb.Row{{"name": "lsp0", "type": "someType"}}, }, }, err: false, }, { name: "multiple conditions", condition: func(a API) ConditionalAPI { isUp := true lsp := testLogicalSwitchPort{} conditions := []model.Condition{ { Field: &lsp.Up, Function: ovsdb.ConditionNotEqual, Value: &isUp, }, { Field: &lsp.Name, Function: ovsdb.ConditionEqual, Value: "lspNameCondition", }, } return a.WhereAny(&lsp, conditions...) }, until: "!=", prepare: func() (model.Model, []interface{}) { testLSP := testLogicalSwitchPort{ Name: "lsp0", Type: "someType", } return &testLSP, []interface{}{&testLSP.Name, &testLSP.Type} }, result: []ovsdb.Operation{ { Op: ovsdb.OperationWait, Timeout: nil, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{ { Column: "up", Function: ovsdb.ConditionNotEqual, Value: ovsdb.OvsSet{GoSet: []interface{}{true}}, }, }, Until: string(ovsdb.WaitConditionNotEqual), Columns: []string{"name", "type"}, Rows: []ovsdb.Row{{"name": "lsp0", "type": "someType"}}, }, { Op: ovsdb.OperationWait, Timeout: nil, Table: "Logical_Switch_Port", Where: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "lspNameCondition"}}, Until: string(ovsdb.WaitConditionNotEqual), Columns: []string{"name", "type"}, Rows: []ovsdb.Row{{"name": "lsp0", "type": "someType"}}, }, }, err: false, }, { name: "non-indexed condition error", condition: func(a API) ConditionalAPI { isUp := false return a.Where(&testLogicalSwitchPort{Up: &isUp}) }, until: "==", prepare: func() (model.Model, []interface{}) { testLSP := testLogicalSwitchPort{Name: "lsp0"} return &testLSP, nil }, err: true, }, { name: "no operation", condition: func(a API) ConditionalAPI { return a.WhereCache(func(t *testLogicalSwitchPort) bool { return false }) }, until: "==", prepare: func() (model.Model, []interface{}) { testLSP := testLogicalSwitchPort{Name: "lsp0"} return &testLSP, nil }, result: []ovsdb.Operation{}, err: false, }, { name: "fails if conditional is an error", condition: func(a API) ConditionalAPI { return newConditionalAPI(nil, newErrorConditional(fmt.Errorf("error")), &discardLogger) }, prepare: func() (model.Model, []interface{}) { return nil, nil }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("ApiWait: %s", tt.name), func(t *testing.T) { api := newAPI(tcache, &discardLogger) cond := tt.condition(api) model, fields := tt.prepare() ops, err := cond.Wait(tt.until, tt.timeout, model, fields...) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatchf(t, tt.result, ops, "ovsdb.Operations should match") } }) } } golang-github-ovn-org-libovsdb-0.7.0/client/api_test_model.go000066400000000000000000000170651464501522100242420ustar00rootroot00000000000000package client import ( "encoding/json" "testing" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) var apiTestSchema = []byte(`{ "name": "OVN_Northbound", "version": "5.31.0", "cksum": "2352750632 28701", "tables": { "Logical_Switch": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "qos_rules": {"type": {"key": {"type": "uuid", "refTable": "QoS", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "dns_records": {"type": {"key": {"type": "uuid", "refTable": "DNS", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "forwarding_groups": { "type": {"key": {"type": "uuid", "refTable": "Forwarding_Group", "refType": "strong"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Switch_Port": { "columns": { "name": {"type": "string"}, "type": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, "tag_request": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4095}, "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "dynamic_addresses": {"type": {"key": "string", "min": 0, "max": 1}}, "port_security": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "dhcpv4_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "dhcpv6_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false} } }`) type testLogicalSwitch struct { UUID string `ovsdb:"_uuid"` Ports []string `ovsdb:"ports"` ExternalIds map[string]string `ovsdb:"external_ids"` Name string `ovsdb:"name"` QosRules []string `ovsdb:"qos_rules"` LoadBalancer []string `ovsdb:"load_balancer"` DNSRecords []string `ovsdb:"dns_records"` OtherConfig map[string]string `ovsdb:"other_config"` ForwardingGroups []string `ovsdb:"forwarding_groups"` Acls []string `ovsdb:"acls"` } // Table returns the table name. It's part of the Model interface func (*testLogicalSwitch) Table() string { return "Logical_Switch" } //LogicalSwitchPort struct defines an object in Logical_Switch_Port table type testLogicalSwitchPort struct { UUID string `ovsdb:"_uuid"` Up *bool `ovsdb:"up"` Dhcpv4Options *string `ovsdb:"dhcpv4_options"` Name string `ovsdb:"name"` DynamicAddresses *string `ovsdb:"dynamic_addresses"` HaChassisGroup *string `ovsdb:"ha_chassis_group"` Options map[string]string `ovsdb:"options"` Enabled *bool `ovsdb:"enabled"` Addresses []string `ovsdb:"addresses"` Dhcpv6Options *string `ovsdb:"dhcpv6_options"` TagRequest *int `ovsdb:"tag_request"` Tag *int `ovsdb:"tag"` PortSecurity []string `ovsdb:"port_security"` ExternalIds map[string]string `ovsdb:"external_ids"` Type string `ovsdb:"type"` ParentName *string `ovsdb:"parent_name"` } // Table returns the table name. It's part of the Model interface func (*testLogicalSwitchPort) Table() string { return "Logical_Switch_Port" } func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache { var schema ovsdb.DatabaseSchema err := json.Unmarshal(apiTestSchema, &schema) assert.Nil(t, err) db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{"Logical_Switch": &testLogicalSwitch{}, "Logical_Switch_Port": &testLogicalSwitchPort{}}) assert.Nil(t, err) dbModel, errs := model.NewDatabaseModel(schema, db) assert.Empty(t, errs) cache, err := cache.NewTableCache(dbModel, data, nil) assert.Nil(t, err) return cache } golang-github-ovn-org-libovsdb-0.7.0/client/client.go000066400000000000000000001240271464501522100225250ustar00rootroot00000000000000package client import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "log" "net" "net/url" "os" "reflect" "strings" "sync" "time" "github.com/cenkalti/backoff/v4" "github.com/cenkalti/rpc2" "github.com/cenkalti/rpc2/jsonrpc" "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/ovsdb/serverdb" ) // Constants defined for libovsdb const ( SSL = "ssl" TCP = "tcp" UNIX = "unix" ) const serverDB = "_Server" // ErrNotConnected is an error returned when the client is not connected var ErrNotConnected = errors.New("not connected") // ErrAlreadyConnected is an error returned when the client is already connected var ErrAlreadyConnected = errors.New("already connected") // ErrUnsupportedRPC is an error returned when an unsupported RPC method is called var ErrUnsupportedRPC = errors.New("unsupported rpc") // Client represents an OVSDB Client Connection // It provides all the necessary functionality to Connect to a server, // perform transactions, and build your own replica of the database with // Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB // update notifications. type Client interface { Connect(context.Context) error Disconnect() Close() Schema() ovsdb.DatabaseSchema Cache() *cache.TableCache UpdateEndpoints([]string) SetOption(Option) error Connected() bool DisconnectNotify() chan struct{} Echo(context.Context) error Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error) Monitor(context.Context, *Monitor) (MonitorCookie, error) MonitorAll(context.Context) (MonitorCookie, error) MonitorCancel(ctx context.Context, cookie MonitorCookie) error NewMonitor(...MonitorOption) *Monitor CurrentEndpoint() string API } type bufferedUpdate struct { updates *ovsdb.TableUpdates updates2 *ovsdb.TableUpdates2 lastTxnID string } type epInfo struct { address string serverID string } // ovsdbClient is an OVSDB client type ovsdbClient struct { options *options metrics metrics connected bool rpcClient *rpc2.Client rpcMutex sync.RWMutex // endpoints contains all possible endpoints; the first element is // the active endpoint if connected=true endpoints []*epInfo // The name of the "primary" database - that is to say, the DB // that the user expects to interact with. primaryDBName string databases map[string]*database errorCh chan error stopCh chan struct{} disconnect chan struct{} shutdown bool shutdownMutex sync.Mutex handlerShutdown *sync.WaitGroup trafficSeen chan struct{} logger *logr.Logger } // database is everything needed to map between go types and an ovsdb Database type database struct { // model encapsulates the database schema and model of the database we're connecting to model model.DatabaseModel // modelMutex protects model from being replaced (via reconnect) while in use modelMutex sync.RWMutex // cache is used to store the updates for monitored tables cache *cache.TableCache // cacheMutex protects cache from being replaced (via reconnect) while in use cacheMutex sync.RWMutex api API // any ongoing monitors, so we can re-create them if we disconnect monitors map[string]*Monitor monitorsMutex sync.Mutex // tracks any outstanding updates while waiting for a monitor response deferUpdates bool deferredUpdates []*bufferedUpdate } // NewOVSDBClient creates a new OVSDB Client with the provided // database model. The client can be configured using one or more Option(s), // like WithTLSConfig. If no WithEndpoint option is supplied, the default of // unix:/var/run/openvswitch/ovsdb.sock is used func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) { return newOVSDBClient(clientDBModel, opts...) } // newOVSDBClient creates a new ovsdbClient func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) { ovs := &ovsdbClient{ primaryDBName: clientDBModel.Name(), databases: map[string]*database{ clientDBModel.Name(): { model: model.NewPartialDatabaseModel(clientDBModel), monitors: make(map[string]*Monitor), deferUpdates: true, deferredUpdates: make([]*bufferedUpdate, 0), }, }, errorCh: make(chan error), handlerShutdown: &sync.WaitGroup{}, disconnect: make(chan struct{}), } var err error ovs.options, err = newOptions(opts...) if err != nil { return nil, err } for _, address := range ovs.options.endpoints { ovs.endpoints = append(ovs.endpoints, &epInfo{address: address}) } if ovs.options.logger == nil { // create a new logger to log to stdout l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("libovsdb").WithValues( "database", ovs.primaryDBName, ) stdr.SetVerbosity(5) ovs.logger = &l } else { // add the "database" value to the structured logger // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb) l := ovs.options.logger.WithValues( "database", ovs.primaryDBName, ) ovs.logger = &l } ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem) ovs.registerMetrics() // if we should only connect to the leader, then add the special "_Server" database as well if ovs.options.leaderOnly { sm, err := serverdb.FullDatabaseModel() if err != nil { return nil, fmt.Errorf("could not initialize model _Server: %w", err) } ovs.databases[serverDB] = &database{ model: model.NewPartialDatabaseModel(sm), monitors: make(map[string]*Monitor), } } return ovs, nil } // Connect opens a connection to an OVSDB Server using the // endpoint provided when the Client was created. // The connection can be configured using one or more Option(s), like WithTLSConfig // If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used func (o *ovsdbClient) Connect(ctx context.Context) error { if err := o.connect(ctx, false); err != nil { if err == ErrAlreadyConnected { return nil } return err } if o.options.leaderOnly { if err := o.watchForLeaderChange(); err != nil { return err } } return nil } // moveEndpointFirst makes the endpoint requested by active the first element // in the endpoints slice, indicating it is the active endpoint func (o *ovsdbClient) moveEndpointFirst(i int) { firstEp := o.endpoints[i] othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) o.endpoints = append([]*epInfo{firstEp}, othereps...) } // moveEndpointLast moves the requested endpoint to the end of the list func (o *ovsdbClient) moveEndpointLast(i int) { lastEp := o.endpoints[i] othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) o.endpoints = append(othereps, lastEp) } func (o *ovsdbClient) resetRPCClient() { if o.rpcClient != nil { o.rpcClient.Close() o.rpcClient = nil } } func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error { o.rpcMutex.Lock() defer o.rpcMutex.Unlock() if o.rpcClient != nil { return ErrAlreadyConnected } connected := false connectErrors := []error{} for i, endpoint := range o.endpoints { u, err := url.Parse(endpoint.address) if err != nil { return err } if sid, err := o.tryEndpoint(ctx, u); err != nil { o.resetRPCClient() connectErrors = append(connectErrors, fmt.Errorf("failed to connect to %s: %w", endpoint.address, err)) continue } else { o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid) endpoint.serverID = sid o.moveEndpointFirst(i) connected = true break } } if !connected { if len(connectErrors) == 1 { return connectErrors[0] } var combined []string for _, e := range connectErrors { combined = append(combined, e.Error()) } return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". ")) } // if we're reconnecting, re-start all the monitors if reconnect { o.logger.V(3).Info("reconnected - restarting monitors") for dbName, db := range o.databases { db.monitorsMutex.Lock() defer db.monitorsMutex.Unlock() // Purge entire cache if no monitors exist to update dynamically if len(db.monitors) == 0 { db.cache.Purge(db.model) continue } // Restart all monitors; each monitor will handle purging // the cache if necessary for id, request := range db.monitors { err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request) if err != nil { o.resetRPCClient() return err } } } } go o.handleDisconnectNotification() if o.options.inactivityTimeout > 0 { o.handlerShutdown.Add(1) go o.handleInactivityProbes() } for _, db := range o.databases { o.handlerShutdown.Add(1) eventStopChan := make(chan struct{}) go o.handleClientErrors(eventStopChan) o.handlerShutdown.Add(1) go func(db *database) { defer o.handlerShutdown.Done() db.cache.Run(o.stopCh) close(eventStopChan) }(db) } o.connected = true return nil } // tryEndpoint connects to a single database endpoint. Returns the // server ID (if clustered) on success, or an error. func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) { o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u)) var dialer net.Dialer var err error var c net.Conn switch u.Scheme { case UNIX: c, err = dialer.DialContext(ctx, u.Scheme, u.Path) case TCP: c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque) case SSL: dialer := tls.Dialer{ Config: o.options.tlsConfig, } c, err = dialer.DialContext(ctx, "tcp", u.Opaque) default: err = fmt.Errorf("unknown network protocol %s", u.Scheme) } if err != nil { return "", fmt.Errorf("failed to open connection: %w", err) } o.createRPC2Client(c) serverDBNames, err := o.listDbs(ctx) if err != nil { return "", err } // for every requested database, ensure the DB exists in the server and // that the schema matches what we expect. for dbName, db := range o.databases { // check the server has what we want found := false for _, name := range serverDBNames { if name == dbName { found = true break } } if !found { return "", fmt.Errorf("target database %s not found", dbName) } // load and validate the schema schema, err := o.getSchema(ctx, dbName) if err != nil { return "", err } db.modelMutex.Lock() var errors []error db.model, errors = model.NewDatabaseModel(schema, db.model.Client()) db.modelMutex.Unlock() if len(errors) > 0 { var combined []string for _, err := range errors { combined = append(combined, err.Error()) } return "", fmt.Errorf("database %s validation error (%d): %s", dbName, len(errors), strings.Join(combined, ". ")) } db.cacheMutex.Lock() if db.cache == nil { db.cache, err = cache.NewTableCache(db.model, nil, o.logger) if err != nil { db.cacheMutex.Unlock() return "", err } db.api = newAPI(db.cache, o.logger) } db.cacheMutex.Unlock() } // check that this is the leader var sid string if o.options.leaderOnly { var leader bool leader, sid, err = o.isEndpointLeader(ctx) if err != nil { return "", err } if !leader { return "", fmt.Errorf("endpoint is not leader") } } return sid, nil } // createRPC2Client creates an rpcClient using the provided connection // It is also responsible for setting up go routines for client-side event handling // Should only be called when the mutex is held func (o *ovsdbClient) createRPC2Client(conn net.Conn) { o.stopCh = make(chan struct{}) if o.options.inactivityTimeout > 0 { o.trafficSeen = make(chan struct{}) } o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn)) o.rpcClient.SetBlocking(true) o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []interface{}, reply *[]interface{}) error { return o.echo(args, reply) }) o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { return o.update(args, reply) }) o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { return o.update2(args, reply) }) o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { return o.update3(args, reply) }) go o.rpcClient.Run() } // isEndpointLeader returns true if the currently connected endpoint is leader, // otherwise false or an error. If the currently connected endpoint is the leader // and the database is clustered, also returns the database's Server ID. // Assumes rpcMutex is held. func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) { op := ovsdb.Operation{ Op: ovsdb.OperationSelect, Table: "Database", Columns: []string{"name", "model", "leader", "sid"}, } results, err := o.transact(ctx, serverDB, true, op) if err != nil { return false, "", fmt.Errorf("could not check if server was leader: %w", err) } // for now, if no rows are returned, just accept this server if len(results) != 1 { return true, "", nil } result := results[0] if len(result.Rows) == 0 { return true, "", nil } for _, row := range result.Rows { dbName, ok := row["name"].(string) if !ok { return false, "", fmt.Errorf("could not parse name") } if dbName != o.primaryDBName { continue } model, ok := row["model"].(string) if !ok { return false, "", fmt.Errorf("could not parse model") } // the database reports whether or not it is part of a cluster via the // "model" column. If it's not clustered, it is by definition leader. if model != serverdb.DatabaseModelClustered { return true, "", nil } // Clustered database must have a Server ID sid, ok := row["sid"].(ovsdb.UUID) if !ok { return false, "", fmt.Errorf("could not parse server id") } leader, ok := row["leader"].(bool) if !ok { return false, "", fmt.Errorf("could not parse leader") } return leader, sid.GoUUID, nil } // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed) // for now, just continue o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName) return true, "", nil } func (o *ovsdbClient) primaryDB() *database { return o.databases[o.primaryDBName] } // Schema returns the DatabaseSchema that is being used by the client // it will be nil until a connection has been established func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema { db := o.primaryDB() db.modelMutex.RLock() defer db.modelMutex.RUnlock() return db.model.Schema } // Cache returns the TableCache that is populated from // ovsdb update notifications. It will be nil until a connection // has been established, and empty unless you call Monitor func (o *ovsdbClient) Cache() *cache.TableCache { db := o.primaryDB() db.cacheMutex.RLock() defer db.cacheMutex.RUnlock() return db.cache } // UpdateEndpoints sets client endpoints // It is intended to be called at runtime func (o *ovsdbClient) UpdateEndpoints(endpoints []string) { o.logger.V(3).Info("update endpoints", "endpoints", endpoints) o.rpcMutex.Lock() defer o.rpcMutex.Unlock() if len(endpoints) == 0 { endpoints = []string{defaultUnixEndpoint} } o.options.endpoints = endpoints originEps := o.endpoints[:] var newEps []*epInfo activeIdx := -1 for i, address := range o.options.endpoints { var serverID string for j, origin := range originEps { if address == origin.address { if j == 0 { activeIdx = i } serverID = origin.serverID break } } newEps = append(newEps, &epInfo{address: address, serverID: serverID}) } o.endpoints = newEps if activeIdx > 0 { o.moveEndpointFirst(activeIdx) } else if activeIdx == -1 { o._disconnect() } } // SetOption sets a new value for an option. // It may only be called when the client is not connected func (o *ovsdbClient) SetOption(opt Option) error { o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() if o.rpcClient != nil { return fmt.Errorf("cannot set option when client is connected") } return opt(o.options) } // Connected returns whether or not the client is currently connected to the server func (o *ovsdbClient) Connected() bool { o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() return o.connected } func (o *ovsdbClient) CurrentEndpoint() string { o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() if o.rpcClient == nil { return "" } return o.endpoints[0].address } // DisconnectNotify returns a channel which will notify the caller when the // server has disconnected func (o *ovsdbClient) DisconnectNotify() chan struct{} { return o.disconnect } // RFC 7047 : Section 4.1.6 : Echo func (o *ovsdbClient) echo(args []interface{}, reply *[]interface{}) error { *reply = args return nil } // RFC 7047 : Update Notification Section 4.1.6 // params is an array of length 2: [json-value, table-updates] // - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie" // - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris func (o *ovsdbClient) update(params []json.RawMessage, reply *[]interface{}) error { cookie := MonitorCookie{} *reply = []interface{}{} if len(params) > 2 { return fmt.Errorf("update requires exactly 2 args") } err := json.Unmarshal(params[0], &cookie) if err != nil { return err } var updates ovsdb.TableUpdates err = json.Unmarshal(params[1], &updates) if err != nil { return err } db := o.databases[cookie.DatabaseName] if db == nil { return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) } o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc() for tableName := range updates { o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc() } db.cacheMutex.Lock() if db.deferUpdates { db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""}) db.cacheMutex.Unlock() return nil } db.cacheMutex.Unlock() // Update the local DB cache with the tableUpdates db.cacheMutex.RLock() err = db.cache.Update(cookie.ID, updates) db.cacheMutex.RUnlock() if err != nil { o.errorCh <- err } return err } // update2 handling from ovsdb-server.7 func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]interface{}) error { cookie := MonitorCookie{} *reply = []interface{}{} if len(params) > 2 { return fmt.Errorf("update2 requires exactly 2 args") } err := json.Unmarshal(params[0], &cookie) if err != nil { return err } var updates ovsdb.TableUpdates2 err = json.Unmarshal(params[1], &updates) if err != nil { return err } db := o.databases[cookie.DatabaseName] if db == nil { return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) } db.cacheMutex.Lock() if db.deferUpdates { db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""}) db.cacheMutex.Unlock() return nil } db.cacheMutex.Unlock() // Update the local DB cache with the tableUpdates db.cacheMutex.RLock() err = db.cache.Update2(cookie, updates) db.cacheMutex.RUnlock() if err != nil { o.errorCh <- err } return err } // update3 handling from ovsdb-server.7 func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]interface{}) error { cookie := MonitorCookie{} *reply = []interface{}{} if len(params) > 3 { return fmt.Errorf("update requires exactly 3 args") } err := json.Unmarshal(params[0], &cookie) if err != nil { return err } var lastTransactionID string err = json.Unmarshal(params[1], &lastTransactionID) if err != nil { return err } var updates ovsdb.TableUpdates2 err = json.Unmarshal(params[2], &updates) if err != nil { return err } db := o.databases[cookie.DatabaseName] if db == nil { return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) } db.cacheMutex.Lock() if db.deferUpdates { db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID}) db.cacheMutex.Unlock() return nil } db.cacheMutex.Unlock() // Update the local DB cache with the tableUpdates db.cacheMutex.RLock() err = db.cache.Update2(cookie, updates) db.cacheMutex.RUnlock() if err == nil { db.monitorsMutex.Lock() mon := db.monitors[cookie.ID] mon.LastTransactionID = lastTransactionID db.monitorsMutex.Unlock() } return err } // getSchema returns the schema in use for the provided database name // RFC 7047 : get_schema // Should only be called when mutex is held func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) { args := ovsdb.NewGetSchemaArgs(dbName) var reply ovsdb.DatabaseSchema err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply) if err != nil { if err == rpc2.ErrShutdown { return ovsdb.DatabaseSchema{}, ErrNotConnected } return ovsdb.DatabaseSchema{}, err } return reply, err } // listDbs returns the list of databases on the server // RFC 7047 : list_dbs // Should only be called when mutex is held func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) { var dbs []string err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs) if err != nil { if err == rpc2.ErrShutdown { return nil, ErrNotConnected } return nil, fmt.Errorf("listdbs failure - %v", err) } return dbs, err } // logFromContext returns a Logger from ctx or return the default logger func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger { if logger, err := logr.FromContext(ctx); err == nil { return &logger } return o.logger } // Transact performs the provided Operations on the database // RFC 7047 : transact func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { logger := o.logFromContext(ctx) o.rpcMutex.RLock() if o.rpcClient == nil || !o.connected { o.rpcMutex.RUnlock() if o.options.reconnect { logger.V(5).Info("blocking transaction until reconnected", "operations", fmt.Sprintf("%+v", operation)) ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() ReconnectWaitLoop: for { select { case <-ctx.Done(): return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err()) case <-ticker.C: o.rpcMutex.RLock() if o.rpcClient != nil && o.connected { break ReconnectWaitLoop } o.rpcMutex.RUnlock() } } } else { return nil, ErrNotConnected } } defer o.rpcMutex.RUnlock() return o.transact(ctx, o.primaryDBName, false, operation...) } func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { logger := o.logFromContext(ctx) var reply []ovsdb.OperationResult db := o.databases[dbName] db.modelMutex.RLock() schema := o.databases[dbName].model.Schema db.modelMutex.RUnlock() if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) { return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName) } if ok := schema.ValidateOperations(operation...); !ok { return nil, fmt.Errorf("validation failed for the operation") } args := ovsdb.NewTransactArgs(dbName, operation...) if o.rpcClient == nil { return nil, ErrNotConnected } dbgLogger := logger.WithValues("database", dbName).V(4) if dbgLogger.Enabled() { dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation)) } err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply) if err != nil { if err == rpc2.ErrShutdown { return nil, ErrNotConnected } return nil, err } if !skipChWrite && o.trafficSeen != nil { o.trafficSeen <- struct{}{} } return reply, nil } // MonitorAll is a convenience method to monitor every table/column func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) { m := newMonitor() for name := range o.primaryDB().model.Types() { m.Tables = append(m.Tables, TableMonitor{Table: name}) } return o.Monitor(ctx, m) } // MonitorCancel will request cancel a previously issued monitor request // RFC 7047 : monitor_cancel func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error { var reply ovsdb.OperationResult args := ovsdb.NewMonitorCancelArgs(cookie) o.rpcMutex.Lock() defer o.rpcMutex.Unlock() if o.rpcClient == nil { return ErrNotConnected } err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply) if err != nil { if err == rpc2.ErrShutdown { return ErrNotConnected } return err } if reply.Error != "" { return fmt.Errorf("error while executing transaction: %s", reply.Error) } o.primaryDB().monitorsMutex.Lock() defer o.primaryDB().monitorsMutex.Unlock() delete(o.primaryDB().monitors, cookie.ID) o.metrics.numMonitors.Dec() return nil } // Monitor will provide updates for a given table/column // and populate the cache with them. Subsequent updates will be processed // by the Update Notifications // RFC 7047 : monitor func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) { cookie := newMonitorCookie(o.primaryDBName) db := o.databases[o.primaryDBName] db.monitorsMutex.Lock() defer db.monitorsMutex.Unlock() return cookie, o.monitor(ctx, cookie, false, monitor) } // If fields is provided, the request will be constrained to the provided columns // If no fields are provided, all columns will be used func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) { var columns []string if len(fields) > 0 { columns = append(columns, fields...) } else { for c := range data.Metadata.TableSchema.Columns { columns = append(columns, c) } } return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil } // monitor must only be called with a lock on monitorsMutex // //gocyclo:ignore func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error { // if we're reconnecting, we already hold the rpcMutex if !reconnecting { o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() } if o.rpcClient == nil { return ErrNotConnected } if len(monitor.Errors) != 0 { var errString []string for _, err := range monitor.Errors { errString = append(errString, err.Error()) } return fmt.Errorf(strings.Join(errString, ". ")) } if len(monitor.Tables) == 0 { return fmt.Errorf("at least one table should be monitored") } dbName := cookie.DatabaseName db := o.databases[dbName] db.modelMutex.RLock() typeMap := db.model.Types() requests := make(map[string]ovsdb.MonitorRequest) for _, o := range monitor.Tables { _, ok := typeMap[o.Table] if !ok { return fmt.Errorf("type for table %s does not exist in model", o.Table) } model, err := db.model.NewModel(o.Table) if err != nil { return err } info, err := db.model.NewModelInfo(model) if err != nil { return err } request, err := newMonitorRequest(info, o.Fields, o.Conditions) if err != nil { return err } requests[o.Table] = *request } db.modelMutex.RUnlock() var args []interface{} if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { // If we are reconnecting a CondSince monitor that is the only // monitor, then we can use its LastTransactionID since it is // valid (because we're reconnecting) and we can safely keep // the cache intact (because it's the only monitor). transactionID := emptyUUID if reconnecting && len(db.monitors) == 1 { transactionID = monitor.LastTransactionID } args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID) } else { args = ovsdb.NewMonitorArgs(dbName, cookie, requests) } var err error var tableUpdates interface{} var lastTransactionFound bool switch monitor.Method { case ovsdb.MonitorRPC: var reply ovsdb.TableUpdates err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) tableUpdates = reply case ovsdb.ConditionalMonitorRPC: var reply ovsdb.TableUpdates2 err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) tableUpdates = reply case ovsdb.ConditionalMonitorSinceRPC: var reply ovsdb.MonitorCondSinceReply err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) if err == nil && reply.Found { monitor.LastTransactionID = reply.LastTransactionID lastTransactionFound = true } tableUpdates = reply.Updates default: return fmt.Errorf("unsupported monitor method: %v", monitor.Method) } if err != nil { if err == rpc2.ErrShutdown { return ErrNotConnected } if err.Error() == "unknown method" { if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond") monitor.Method = ovsdb.ConditionalMonitorRPC return o.monitor(ctx, cookie, reconnecting, monitor) } if monitor.Method == ovsdb.ConditionalMonitorRPC { o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor") monitor.Method = ovsdb.MonitorRPC return o.monitor(ctx, cookie, reconnecting, monitor) } } return err } if !reconnecting { db.monitors[cookie.ID] = monitor o.metrics.numMonitors.Inc() } db.cacheMutex.Lock() defer db.cacheMutex.Unlock() // On reconnect, purge the cache _unless_ the only monitor is a // MonitorCondSince one, whose LastTransactionID was known to the // server. In this case the reply contains only updates to the existing // cache data, while otherwise it includes complete DB data so we must // purge to get rid of old rows. if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) { db.cache.Purge(db.model) } if monitor.Method == ovsdb.MonitorRPC { u := tableUpdates.(ovsdb.TableUpdates) err = db.cache.Populate(u) } else { u := tableUpdates.(ovsdb.TableUpdates2) err = db.cache.Populate2(u) } if err != nil { return err } // populate any deferred updates db.deferUpdates = false for _, update := range db.deferredUpdates { if update.updates != nil { if err = db.cache.Populate(*update.updates); err != nil { return err } } if update.updates2 != nil { if err = db.cache.Populate2(*update.updates2); err != nil { return err } } if len(update.lastTxnID) > 0 { db.monitors[cookie.ID].LastTransactionID = update.lastTxnID } } // clear deferred updates for next time db.deferredUpdates = make([]*bufferedUpdate, 0) return err } // Echo tests the liveness of the OVSDB connetion func (o *ovsdbClient) Echo(ctx context.Context) error { args := ovsdb.NewEchoArgs() var reply []interface{} o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() if o.rpcClient == nil { return ErrNotConnected } err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply) if err != nil { if err == rpc2.ErrShutdown { return ErrNotConnected } } if !reflect.DeepEqual(args, reply) { return fmt.Errorf("incorrect server response: %v, %v", args, reply) } return nil } // watchForLeaderChange will trigger a reconnect if the connected endpoint // ever loses leadership func (o *ovsdbClient) watchForLeaderChange() error { updates := make(chan model.Model) o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{ UpdateFunc: func(table string, _, new model.Model) { if table == "Database" { updates <- new } }, }) m := newMonitor() // NOTE: _Server does not support monitor_cond_since m.Method = ovsdb.ConditionalMonitorRPC m.Tables = []TableMonitor{{Table: "Database"}} db := o.databases[serverDB] db.monitorsMutex.Lock() defer db.monitorsMutex.Unlock() err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m) if err != nil { return err } go func() { for m := range updates { dbInfo, ok := m.(*serverdb.Database) if !ok { continue } // Ignore the dbInfo for _Server if dbInfo.Name != o.primaryDBName { continue } // Only handle leadership changes for clustered databases if dbInfo.Model != serverdb.DatabaseModelClustered { continue } // Clustered database servers must have a valid Server ID var sid string if dbInfo.Sid != nil { sid = *dbInfo.Sid } if sid == "" { o.logger.V(3).Info("clustered database update contained invalid server ID") continue } o.rpcMutex.Lock() if !dbInfo.Leader && o.connected { activeEndpoint := o.endpoints[0] if sid == activeEndpoint.serverID { o.logger.V(3).Info("endpoint lost leader, reconnecting", "endpoint", activeEndpoint.address, "sid", sid) // don't immediately reconnect to the active endpoint since it's no longer leader o.moveEndpointLast(0) o._disconnect() } else { o.logger.V(3).Info("endpoint lost leader but had unexpected server ID", "endpoint", activeEndpoint.address, "expected", activeEndpoint.serverID, "found", sid) } } o.rpcMutex.Unlock() } }() return nil } func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) { defer o.handlerShutdown.Done() var errColumnNotFound *mapper.ErrColumnNotFound var errCacheInconsistent *cache.ErrCacheInconsistent var errIndexExists *cache.ErrIndexExists for { select { case <-stopCh: return case err := <-o.errorCh: if errors.As(err, &errColumnNotFound) { o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!") } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) { // trigger a reconnect, which will purge the cache // hopefully a rebuild will fix any inconsistency o.logger.V(3).Error(err, "triggering reconnect to rebuild cache") // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we // need to reset the last txn ID for _, db := range o.databases { db.monitorsMutex.Lock() for _, mon := range db.monitors { mon.LastTransactionID = emptyUUID } db.monitorsMutex.Unlock() } o.Disconnect() } else { o.logger.V(3).Error(err, "error updating cache") } } } } func (o *ovsdbClient) sendEcho(args []interface{}, reply *[]interface{}) *rpc2.Call { o.rpcMutex.RLock() defer o.rpcMutex.RUnlock() if o.rpcClient == nil { return nil } return o.rpcClient.Go("echo", args, reply, make(chan *rpc2.Call, 1)) } func (o *ovsdbClient) handleInactivityProbes() { defer o.handlerShutdown.Done() echoReplied := make(chan string) var lastEcho string stopCh := o.stopCh trafficSeen := o.trafficSeen for { select { case <-stopCh: return case <-trafficSeen: // We got some traffic from the server, restart our timer case ts := <-echoReplied: // Got a response from the server, check it against lastEcho; if same clear lastEcho; if not same Disconnect() if ts != lastEcho { o.Disconnect() return } lastEcho = "" case <-time.After(o.options.inactivityTimeout): // If there's a lastEcho already, then we didn't get a server reply, disconnect if lastEcho != "" { o.Disconnect() return } // Otherwise send an echo thisEcho := fmt.Sprintf("%d", time.Now().UnixMicro()) args := []interface{}{"libovsdb echo", thisEcho} var reply []interface{} // Can't use o.Echo() because it blocks; we need the Call object direct from o.rpcClient.Go() call := o.sendEcho(args, &reply) if call == nil { o.Disconnect() return } lastEcho = thisEcho go func() { // Wait for the echo reply select { case <-stopCh: return case <-call.Done: if call.Error != nil { // RPC timeout; disconnect o.logger.V(3).Error(call.Error, "server echo reply error") o.Disconnect() } else if !reflect.DeepEqual(args, reply) { o.logger.V(3).Info("warning: incorrect server echo reply", "expected", args, "reply", reply) o.Disconnect() } else { // Otherwise stuff thisEcho into the echoReplied channel echoReplied <- thisEcho } } }() } } } func (o *ovsdbClient) handleDisconnectNotification() { <-o.rpcClient.DisconnectNotify() // close the stopCh, which will stop the cache event processor close(o.stopCh) if o.trafficSeen != nil { close(o.trafficSeen) } o.metrics.numDisconnects.Inc() // wait for client related handlers to shutdown o.handlerShutdown.Wait() o.rpcMutex.Lock() if o.options.reconnect && !o.shutdown { o.rpcClient = nil o.rpcMutex.Unlock() suppressionCounter := 1 connect := func() error { // need to ensure deferredUpdates is cleared on every reconnect attempt for _, db := range o.databases { db.cacheMutex.Lock() db.deferredUpdates = make([]*bufferedUpdate, 0) db.deferUpdates = true db.cacheMutex.Unlock() } ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout) defer cancel() err := o.connect(ctx, true) if err != nil { if suppressionCounter < 5 { o.logger.V(2).Error(err, "failed to reconnect") } else if suppressionCounter == 5 { o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+ "for future attempts") } } suppressionCounter++ return err } o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address) err := backoff.Retry(connect, o.options.backoff) if err != nil { // TODO: We should look at passing this back to the // caller to handle panic(err) } // this goroutine finishes, and is replaced with a new one (from Connect) return } // clear connection state o.rpcClient = nil o.rpcMutex.Unlock() for _, db := range o.databases { db.cacheMutex.Lock() defer db.cacheMutex.Unlock() db.cache = nil // need to defer updates if/when we reconnect and clear any stale updates db.deferUpdates = true db.deferredUpdates = make([]*bufferedUpdate, 0) db.modelMutex.Lock() defer db.modelMutex.Unlock() db.model = model.NewPartialDatabaseModel(db.model.Client()) db.monitorsMutex.Lock() defer db.monitorsMutex.Unlock() db.monitors = make(map[string]*Monitor) } o.metrics.numMonitors.Set(0) o.shutdownMutex.Lock() defer o.shutdownMutex.Unlock() o.shutdown = false select { case o.disconnect <- struct{}{}: // sent disconnect notification to client default: // client is not listening to the channel } } // _disconnect will close the connection to the OVSDB server // If the client was created with WithReconnect then the client // will reconnect afterwards. Assumes rpcMutex is held. func (o *ovsdbClient) _disconnect() { o.connected = false if o.rpcClient == nil { return } o.rpcClient.Close() } // Disconnect will close the connection to the OVSDB server // If the client was created with WithReconnect then the client // will reconnect afterwards func (o *ovsdbClient) Disconnect() { o.rpcMutex.Lock() defer o.rpcMutex.Unlock() o._disconnect() } // Close will close the connection to the OVSDB server // It will remove all stored state ready for the next connection // Even If the client was created with WithReconnect it will not reconnect afterwards func (o *ovsdbClient) Close() { o.rpcMutex.Lock() defer o.rpcMutex.Unlock() o.connected = false if o.rpcClient == nil { return } o.shutdownMutex.Lock() defer o.shutdownMutex.Unlock() o.shutdown = true o.rpcClient.Close() } // Ensures the cache is consistent by evaluating that the client is connected // and the monitor is fully setup, with the cache populated. Caller must hold // the database's cache mutex for reading. func isCacheConsistent(db *database) bool { // This works because when a client is disconnected the deferUpdates variable // will be set to true. deferUpdates is also protected by the db.cacheMutex. // When the client reconnects and then re-establishes the monitor; the final step // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex return !db.deferUpdates } // best effort to ensure cache is in a good state for reading. RLocks the // database's cache before returning; caller must always unlock. func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) { if !hasMonitors(db) { db.cacheMutex.RLock() return } // Check immediately as a fastpath db.cacheMutex.RLock() if isCacheConsistent(db) { return } db.cacheMutex.RUnlock() ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() for { select { case <-ctx.Done(): logger.V(3).Info("warning: unable to ensure cache consistency for reading", "database", dbName) db.cacheMutex.RLock() return case <-ticker.C: db.cacheMutex.RLock() if isCacheConsistent(db) { return } db.cacheMutex.RUnlock() } } } func hasMonitors(db *database) bool { db.monitorsMutex.Lock() defer db.monitorsMutex.Unlock() return len(db.monitors) > 0 } // Client API interface wrapper functions // We add this wrapper to allow users to access the API directly on the // client object // Get implements the API interface's Get function func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error { primaryDB := o.primaryDB() waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) defer primaryDB.cacheMutex.RUnlock() return primaryDB.api.Get(ctx, model) } // Create implements the API interface's Create function func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) { return o.primaryDB().api.Create(models...) } // List implements the API interface's List function func (o *ovsdbClient) List(ctx context.Context, result interface{}) error { primaryDB := o.primaryDB() waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) defer primaryDB.cacheMutex.RUnlock() return primaryDB.api.List(ctx, result) } // Where implements the API interface's Where function func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI { return o.primaryDB().api.Where(models...) } // WhereAny implements the API interface's WhereAny function func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI { return o.primaryDB().api.WhereAny(m, conditions...) } // WhereAll implements the API interface's WhereAll function func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI { return o.primaryDB().api.WhereAll(m, conditions...) } // WhereCache implements the API interface's WhereCache function func (o *ovsdbClient) WhereCache(predicate interface{}) ConditionalAPI { return o.primaryDB().api.WhereCache(predicate) } golang-github-ovn-org-libovsdb-0.7.0/client/client_test.go000066400000000000000000001013351464501522100235610ustar00rootroot00000000000000package client import ( "bytes" "context" "encoding/json" "fmt" "log" "math/rand" "os" "reflect" "strings" "sync/atomic" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/cenkalti/rpc2" "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/database/inmemory" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/ovsdb/serverdb" "github.com/ovn-org/libovsdb/server" "github.com/ovn-org/libovsdb/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( aUUID0 = "2f77b348-9768-4866-b761-89d5177ecda0" aUUID1 = "2f77b348-9768-4866-b761-89d5177ecda1" aUUID2 = "2f77b348-9768-4866-b761-89d5177ecda2" aUUID3 = "2f77b348-9768-4866-b761-89d5177ecda3" ) type ( BridgeFailMode = string BridgeProtocols = string ) const ( BridgeFailModeStandalone BridgeFailMode = "standalone" BridgeFailModeSecure BridgeFailMode = "secure" BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10" BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11" BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12" BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13" BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14" BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15" ) // Bridge defines an object in Bridge table type Bridge struct { UUID string `ovsdb:"_uuid"` AutoAttach *string `ovsdb:"auto_attach"` Controller []string `ovsdb:"controller"` DatapathID *string `ovsdb:"datapath_id"` DatapathType string `ovsdb:"datapath_type"` DatapathVersion string `ovsdb:"datapath_version"` ExternalIDs map[string]string `ovsdb:"external_ids"` FailMode *BridgeFailMode `ovsdb:"fail_mode"` FloodVLANs []int `ovsdb:"flood_vlans"` FlowTables map[int]string `ovsdb:"flow_tables"` IPFIX *string `ovsdb:"ipfix"` McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` Mirrors []string `ovsdb:"mirrors"` Name string `ovsdb:"name"` Netflow *string `ovsdb:"netflow"` OtherConfig map[string]string `ovsdb:"other_config"` Ports []string `ovsdb:"ports"` Protocols []BridgeProtocols `ovsdb:"protocols"` RSTPEnable bool `ovsdb:"rstp_enable"` RSTPStatus map[string]string `ovsdb:"rstp_status"` Sflow *string `ovsdb:"sflow"` Status map[string]string `ovsdb:"status"` STPEnable bool `ovsdb:"stp_enable"` } // OpenvSwitch defines an object in Open_vSwitch table type OpenvSwitch struct { UUID string `ovsdb:"_uuid"` Bridges []string `ovsdb:"bridges"` CurCfg int `ovsdb:"cur_cfg"` DatapathTypes []string `ovsdb:"datapath_types"` Datapaths map[string]string `ovsdb:"datapaths"` DbVersion *string `ovsdb:"db_version"` DpdkInitialized bool `ovsdb:"dpdk_initialized"` DpdkVersion *string `ovsdb:"dpdk_version"` ExternalIDs map[string]string `ovsdb:"external_ids"` IfaceTypes []string `ovsdb:"iface_types"` ManagerOptions []string `ovsdb:"manager_options"` NextCfg int `ovsdb:"next_cfg"` OtherConfig map[string]string `ovsdb:"other_config"` OVSVersion *string `ovsdb:"ovs_version"` SSL *string `ovsdb:"ssl"` Statistics map[string]string `ovsdb:"statistics"` SystemType *string `ovsdb:"system_type"` SystemVersion *string `ovsdb:"system_version"` } var defDB, _ = model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Open_vSwitch": &OpenvSwitch{}, "Bridge": &Bridge{}, }, ) var schema = `{ "name": "Open_vSwitch", "version": "8.2.0", "tables": { "Bridge": { "columns": { "auto_attach": { "type": { "key": { "type": "uuid", "refTable": "AutoAttach" }, "min": 0, "max": 1 } }, "controller": { "type": { "key": { "type": "uuid", "refTable": "Controller" }, "min": 0, "max": "unlimited" } }, "datapath_id": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 }, "ephemeral": true }, "datapath_type": { "type": "string" }, "datapath_version": { "type": "string" }, "external_ids": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" } }, "fail_mode": { "type": { "key": { "type": "string", "enum": [ "set", [ "standalone", "secure" ] ] }, "min": 0, "max": 1 } }, "flood_vlans": { "type": { "key": { "type": "integer", "minInteger": 0, "maxInteger": 4095 }, "min": 0, "max": 4096 } }, "flow_tables": { "type": { "key": { "type": "integer", "minInteger": 0, "maxInteger": 254 }, "value": { "type": "uuid", "refTable": "Flow_Table" }, "min": 0, "max": "unlimited" } }, "ipfix": { "type": { "key": { "type": "uuid", "refTable": "IPFIX" }, "min": 0, "max": 1 } }, "mcast_snooping_enable": { "type": "boolean" }, "mirrors": { "type": { "key": { "type": "uuid", "refTable": "Mirror" }, "min": 0, "max": "unlimited" } }, "name": { "type": "string", "mutable": false }, "netflow": { "type": { "key": { "type": "uuid", "refTable": "NetFlow" }, "min": 0, "max": 1 } }, "other_config": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" } }, "ports": { "type": { "key": { "type": "uuid", "refTable": "Port" }, "min": 0, "max": "unlimited" } }, "protocols": { "type": { "key": { "type": "string", "enum": [ "set", [ "OpenFlow10", "OpenFlow11", "OpenFlow12", "OpenFlow13", "OpenFlow14", "OpenFlow15" ] ] }, "min": 0, "max": "unlimited" } }, "rstp_enable": { "type": "boolean" }, "rstp_status": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" }, "ephemeral": true }, "sflow": { "type": { "key": { "type": "uuid", "refTable": "sFlow" }, "min": 0, "max": 1 } }, "status": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" }, "ephemeral": true }, "stp_enable": { "type": "boolean" } }, "indexes": [ [ "name" ] ] }, "Open_vSwitch": { "columns": { "bridges": { "type": { "key": { "type": "uuid", "refTable": "Bridge" }, "min": 0, "max": "unlimited" } }, "cur_cfg": { "type": "integer" }, "datapath_types": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "datapaths": { "type": { "key": { "type": "string" }, "value": { "type": "uuid", "refTable": "Datapath" }, "min": 0, "max": "unlimited" } }, "db_version": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } }, "dpdk_initialized": { "type": "boolean" }, "dpdk_version": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } }, "external_ids": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" } }, "iface_types": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "manager_options": { "type": { "key": { "type": "uuid", "refTable": "Manager" }, "min": 0, "max": "unlimited" } }, "next_cfg": { "type": "integer" }, "other_config": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" } }, "ovs_version": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } }, "ssl": { "type": { "key": { "type": "uuid", "refTable": "SSL" }, "min": 0, "max": 1 } }, "statistics": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" }, "ephemeral": true }, "system_type": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } }, "system_version": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } } } } } }` func testOvsSet(t *testing.T, set interface{}) ovsdb.OvsSet { oSet, err := ovsdb.NewOvsSet(set) assert.Nil(t, err) return oSet } func testOvsMap(t *testing.T, set interface{}) ovsdb.OvsMap { oMap, err := ovsdb.NewOvsMap(set) assert.Nil(t, err) return oMap } func updateBenchmark(ovs *ovsdbClient, updates []byte, b *testing.B) { for n := 0; n < b.N; n++ { params := []json.RawMessage{[]byte(`{"databaseName":"Open_vSwitch","id":"v1"}`), updates} var reply []interface{} err := ovs.update(params, &reply) if err != nil { b.Fatal(err) } } } func newBridgeRow(name string) string { return `{ "connection_mode": [ "set", [] ], "controller": [ "set", [] ], "datapath_id": "blablabla", "datapath_type": "", "datapath_version": "", "external_ids": [ "map", [["foo","bar"]]], "fail_mode": [ "set", [] ], "flood_vlans": [ "set", [] ], "flow_tables": [ "map", [] ], "ipfix": [ "set", [] ], "mcast_snooping_enable": false, "mirrors": [ "set", [] ], "name": "` + name + `", "netflow": [ "set", [] ], "other_config": [ "map", [["bar","quux"]]], "ports": [ "set", [] ], "protocols": [ "set", [] ], "rstp_enable": false, "rstp_status": [ "map", [] ], "sflow": [ "set", [] ], "status": [ "map", [] ], "stp_enable": false }` } func newOvsRow(bridges ...string) string { bridgeUUIDs := []string{} for _, b := range bridges { bridgeUUIDs = append(bridgeUUIDs, `[ "uuid", "`+b+`" ]`) } return `{ "bridges": [ "set", [` + strings.Join(bridgeUUIDs, `,`) + `]], "cur_cfg": 0, "datapath_types": [ "set", [] ], "datapaths": [ "map", [] ], "db_version": "8.2.0", "dpdk_initialized": false, "dpdk_version": [ "set", [] ], "external_ids": [ "map", [["system-id","829f8534-94a8-468e-9176-132738cf260a"]]], "iface_types": [ "set", [] ], "manager_options": ["uuid", "6e4cd5fc-f51a-462a-b3d6-a696af6d7a84"], "next_cfg": 0, "other_config": [ "map", [] ], "ovs_version": "2.15.90", "ssl": [ "set", [] ], "statistics": [ "map", [] ], "system_type": "docker-ovs", "system_version": "0.1" }` } func BenchmarkUpdate1(b *testing.B) { ovs, err := newOVSDBClient(defDB) require.NoError(b, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(b, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(b, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(b, err) update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `} } }`) updateBenchmark(ovs, update, b) } func BenchmarkUpdate2(b *testing.B) { ovs, err := newOVSDBClient(defDB) require.NoError(b, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(b, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(b, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(b, err) update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo", "bar") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `}, "bar": {"new": ` + newBridgeRow("bar") + `} } }`) updateBenchmark(ovs, update, b) } func BenchmarkUpdate3(b *testing.B) { ovs, err := newOVSDBClient(defDB) require.NoError(b, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(b, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(b, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(b, err) update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo", "bar", "baz") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `}, "bar": {"new": ` + newBridgeRow("bar") + `}, "baz": {"new": ` + newBridgeRow("baz") + `} } }`) updateBenchmark(ovs, update, b) } func BenchmarkUpdate5(b *testing.B) { ovs, err := newOVSDBClient(defDB) require.NoError(b, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(b, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(b, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(b, err) update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo", "bar", "baz", "quux", "foofoo") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `}, "bar": {"new": ` + newBridgeRow("bar") + `}, "baz": {"new": ` + newBridgeRow("baz") + `}, "quux": {"new": ` + newBridgeRow("quux") + `}, "foofoo": {"new": ` + newBridgeRow("foofoo") + `} } }`) updateBenchmark(ovs, update, b) } func BenchmarkUpdate8(b *testing.B) { ovs, err := newOVSDBClient(defDB) require.NoError(b, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(b, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(b, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(b, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(b, err) update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo", "bar", "baz", "quux", "foofoo", "foobar", "foobaz", "fooquux") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `}, "bar": {"new": ` + newBridgeRow("bar") + `}, "baz": {"new": ` + newBridgeRow("baz") + `}, "quux": {"new": ` + newBridgeRow("quux") + `}, "foofoo": {"new": ` + newBridgeRow("foofoo") + `}, "foobar": {"new": ` + newBridgeRow("foobar") + `}, "foobaz": {"new": ` + newBridgeRow("foobaz") + `}, "fooquux": {"new": ` + newBridgeRow("fooquux") + `} } }`) updateBenchmark(ovs, update, b) } func TestEcho(t *testing.T) { req := []interface{}{"hi"} var reply []interface{} ovs, err := newOVSDBClient(defDB) require.NoError(t, err) err = ovs.echo(req, &reply) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(req, reply) { t.Error("Expected: ", req, " Got: ", reply) } } func TestUpdate(t *testing.T) { ovs, err := newOVSDBClient(defDB) require.NoError(t, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(t, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) require.NoError(t, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) require.Empty(t, errs) ovs.primaryDB().cache, err = cache.NewTableCache(dbModel, nil, nil) require.NoError(t, err) var reply []interface{} update := []byte(`{ "Open_vSwitch": { "ovs": {"new": ` + newOvsRow("foo") + `} }, "Bridge": { "foo": {"new": ` + newBridgeRow("foo") + `} } }`) params := []json.RawMessage{[]byte(`{"databaseName":"Open_vSwitch","id":"v1"}`), update} err = ovs.update(params, &reply) if err != nil { t.Error(err) } } func TestOperationWhenNeverConnected(t *testing.T) { ovs, err := newOVSDBClient(defDB) require.NoError(t, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(t, err) tests := []struct { name string fn func() error }{ { "echo", func() error { return ovs.Echo(context.TODO()) }, }, { "transact", func() error { comment := "this is only a test" _, err := ovs.Transact(context.TODO(), ovsdb.Operation{Op: ovsdb.OperationComment, Comment: &comment}) return err }, }, { "monitor/monitor all", func() error { _, err := ovs.MonitorAll(context.TODO()) return err }, }, { "monitor cancel", func() error { return ovs.MonitorCancel(context.TODO(), newMonitorCookie(s.Name)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.fn() assert.EqualError(t, err, ErrNotConnected.Error()) }) } } func TestTransactionLogger(t *testing.T) { stdr.SetVerbosity(5) var defSchema ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &defSchema) require.NoError(t, err) _, sock := newOVSDBServer(t, defDB, defSchema) // Create client for this server's Server database endpoint := fmt.Sprintf("unix:%s", sock) var defaultBuf bytes.Buffer defaultL := stdr.New(log.New(&defaultBuf, "", log.LstdFlags)).WithName("default") // Create client to test transaction logger ovs, err := newOVSDBClient(defDB, WithEndpoint(endpoint), WithLogger(&defaultL)) require.NoError(t, err) err = ovs.Connect(context.Background()) require.NoError(t, err) t.Cleanup(ovs.Close) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(t, err) dbModel, err := test.GetModel() require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) bridge1 := test.BridgeType{ Name: "foo", ExternalIds: map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }, } bridgeInfo1, err := dbModel.NewModelInfo(&bridge1) require.NoError(t, err) bridgeRow1, err := m.NewRow(bridgeInfo1) require.Nil(t, err) bridgeUUID1 := uuid.NewString() operation1 := ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID1, Row: bridgeRow1, } _, _ = ovs.Transact(context.TODO(), operation1) assert.Contains(t, defaultBuf.String(), "default") bridge2 := test.BridgeType{ Name: "bar", ExternalIds: map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }, } bridgeInfo2, err := dbModel.NewModelInfo(&bridge2) require.NoError(t, err) bridgeRow2, err := m.NewRow(bridgeInfo2) require.Nil(t, err) bridgeUUID2 := uuid.NewString() operation2 := ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID2, Row: bridgeRow2, } var customBuf bytes.Buffer customL := stdr.New(log.New(&customBuf, "", log.LstdFlags)).WithName("custom") ctx := logr.NewContext(context.TODO(), customL) _, _ = ovs.Transact(ctx, operation2) assert.Contains(t, customBuf.String(), "custom") } func TestOperationWhenNotConnected(t *testing.T) { ovs, err := newOVSDBClient(defDB) require.NoError(t, err) var s ovsdb.DatabaseSchema err = json.Unmarshal([]byte(schema), &s) require.NoError(t, err) var errs []error fullModel, errs := model.NewDatabaseModel(s, ovs.primaryDB().model.Client()) require.Equalf(t, len(errs), 0, "expected no error but some occurred: %+v", errs) ovs.primaryDB().model = fullModel tests := []struct { name string fn func() error }{ { "echo", func() error { return ovs.Echo(context.TODO()) }, }, { "transact", func() error { comment := "this is only a test" _, err := ovs.Transact(context.TODO(), ovsdb.Operation{Op: ovsdb.OperationComment, Comment: &comment}) return err }, }, { "monitor/monitor all", func() error { _, err := ovs.MonitorAll(context.TODO()) return err }, }, { "monitor cancel", func() error { return ovs.MonitorCancel(context.TODO(), newMonitorCookie(s.Name)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.fn() assert.EqualError(t, err, ErrNotConnected.Error()) }) } } func TestSetOption(t *testing.T) { o, err := newOVSDBClient(defDB) require.NoError(t, err) o.options, err = newOptions() require.NoError(t, err) err = o.SetOption(WithEndpoint("tcp::6640")) require.NoError(t, err) o.rpcClient = &rpc2.Client{} err = o.SetOption(WithEndpoint("tcp::6641")) assert.EqualError(t, err, "cannot set option when client is connected") } func newOVSDBServer(t *testing.T, dbModel model.ClientDBModel, schema ovsdb.DatabaseSchema) (*server.OvsdbServer, string) { serverDBModel, err := serverdb.FullDatabaseModel() require.NoError(t, err) serverSchema := serverdb.Schema() db := inmemory.NewDatabase(map[string]model.ClientDBModel{ schema.Name: dbModel, serverSchema.Name: serverDBModel, }) dbMod, errs := model.NewDatabaseModel(schema, dbModel) require.Empty(t, errs) servMod, errs := model.NewDatabaseModel(serverSchema, serverDBModel) require.Empty(t, errs) server, err := server.NewOvsdbServer(db, dbMod, servMod) require.NoError(t, err) tmpfile := fmt.Sprintf("/tmp/ovsdb-%d.sock", rand.Intn(10000)) t.Cleanup(func() { os.Remove(tmpfile) }) go func() { if err := server.Serve("unix", tmpfile); err != nil { t.Error(err) } }() t.Cleanup(server.Close) require.Eventually(t, func() bool { return server.Ready() }, 1*time.Second, 10*time.Millisecond) return server, tmpfile } func newClientServerPair(t *testing.T, connectCounter, disConnectCounter *int32, isLeader bool) (Client, *serverdb.Database, string) { var defSchema ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &defSchema) require.NoError(t, err) serverDBModel, err := serverdb.FullDatabaseModel() require.NoError(t, err) // Create server s, sock := newOVSDBServer(t, defDB, defSchema) s.OnConnect(func(_ *rpc2.Client) { atomic.AddInt32(connectCounter, 1) }) s.OnDisConnect(func(_ *rpc2.Client) { atomic.AddInt32(disConnectCounter, 1) }) // Create client for this server's Server database endpoint := fmt.Sprintf("unix:%s", sock) cli, err := newOVSDBClient(serverDBModel, WithEndpoint(endpoint)) require.NoError(t, err) err = cli.Connect(context.Background()) require.NoError(t, err) t.Cleanup(cli.Close) // Populate the _Server database table sid := fmt.Sprintf("%04x", rand.Uint32()) row := &serverdb.Database{ UUID: uuid.NewString(), Name: defDB.Name(), Connected: true, Leader: isLeader, Model: serverdb.DatabaseModelClustered, Sid: &sid, } ops, err := cli.Create(row) require.Nil(t, err) reply, err := cli.Transact(context.Background(), ops...) assert.Nil(t, err) opErr, err := ovsdb.CheckOperationResults(reply, ops) assert.NoErrorf(t, err, "%+v", opErr) row.UUID = reply[0].UUID.GoUUID return cli, row, endpoint } func setLeader(t *testing.T, cli Client, row *serverdb.Database, isLeader bool) { row.Leader = isLeader ops, err := cli.Where(row).Update(row, &row.Leader) require.Nil(t, err) reply, err := cli.Transact(context.Background(), ops...) require.Nil(t, err) opErr, err := ovsdb.CheckOperationResults(reply, ops) assert.NoErrorf(t, err, "%+v", opErr) } func TestClientInactiveCheck(t *testing.T) { var defSchema ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &defSchema) require.NoError(t, err) serverDBModel, err := serverdb.FullDatabaseModel() require.NoError(t, err) // Create server server, sock := newOVSDBServer(t, defDB, defSchema) // Create client to test inactivity check. endpoint := fmt.Sprintf("unix:%s", sock) ovs, err := newOVSDBClient(serverDBModel, WithInactivityCheck(2*time.Second, 1*time.Second, &backoff.ZeroBackOff{}), WithEndpoint(endpoint)) require.NoError(t, err) err = ovs.Connect(context.Background()) require.NoError(t, err) t.Cleanup(ovs.Close) // Make server to do echo off and then on for two times. // Ensure this is detected by client's inactivity probe // each time and then reconnects to the server when it // is started responding to echo requests. // 1st test for client with making server not to respond for echo requests. notified := make(chan struct{}) ready := make(chan struct{}) disconnectNotify := ovs.rpcClient.DisconnectNotify() go func() { ready <- struct{}{} <-disconnectNotify notified <- struct{}{} }() <-ready server.DoEcho(false) select { case <-notified: // got notification case <-time.After(5 * time.Second): assert.Fail(t, "client doesn't detect the echo failure") } // 2nd test for client with making server to respond for echo requests. server.DoEcho(true) loop: for timeout := time.After(5 * time.Second); ; { select { case <-timeout: assert.Fail(t, "reconnect is not successful") default: if ovs.Connected() { break loop } } } // 3rd test for client with making server not to respond for echo requests. notified = make(chan struct{}) ready = make(chan struct{}) disconnectNotify = ovs.rpcClient.DisconnectNotify() go func() { ready <- struct{}{} <-disconnectNotify notified <- struct{}{} }() <-ready server.DoEcho(false) select { case <-notified: // got notification case <-time.After(5 * time.Second): assert.Fail(t, "client doesn't detect the echo failure") } // 4th test for client with making server to respond for echo requests. server.DoEcho(true) loop1: for timeout := time.After(5 * time.Second); ; { select { case <-timeout: assert.Fail(t, "reconnect is not successful") default: if ovs.Connected() { break loop1 } } } } func TestClientReconnectLeaderOnly(t *testing.T) { rand.Seed(time.Now().UnixNano()) var connected1, connected2, disConnected1, disConnected2 int32 cli1, row1, endpoint1 := newClientServerPair(t, &connected1, &disConnected1, true) cli2, row2, endpoint2 := newClientServerPair(t, &connected2, &disConnected2, false) // Create client to test reconnection for ovs, err := newOVSDBClient(defDB, WithLeaderOnly(true), WithReconnect(5*time.Second, &backoff.ZeroBackOff{}), WithEndpoint(endpoint1), WithEndpoint(endpoint2)) require.NoError(t, err) err = ovs.Connect(context.Background()) require.NoError(t, err) t.Cleanup(ovs.Close) // Server1 should have 2 connections: cli1 and ovs require.Eventually(t, func() bool { return atomic.LoadInt32(&connected1) == 2 }, 2*time.Second, 10*time.Millisecond) // Server2 should have 1 connection: cli2 require.Never(t, func() bool { return atomic.LoadInt32(&connected2) > 1 }, 2*time.Second, 10*time.Millisecond) // First leadership change setLeader(t, cli2, row2, true) setLeader(t, cli1, row1, false) // Server2 should have 2 connections: cli2 and ovs require.Eventually(t, func() bool { return atomic.LoadInt32(&connected2) == 2 }, 2*time.Second, 10*time.Millisecond) // Server1 should still only have 2 total connections; eg the // client under test should not have reconnected require.Never(t, func() bool { return atomic.LoadInt32(&connected1) > 2 }, 2*time.Second, 10*time.Millisecond) // Second leadership change setLeader(t, cli1, row1, true) setLeader(t, cli2, row2, false) // Server1 should now have 3 total connections: cli1, original ovs, // and second ovs require.Eventually(t, func() bool { return atomic.LoadInt32(&connected1) == 3 }, 2*time.Second, 10*time.Millisecond) // Server2 should still only have 2 total connections; eg the // client under test should not have reconnected require.Never(t, func() bool { return atomic.LoadInt32(&connected2) > 2 }, 2*time.Second, 10*time.Millisecond) } func TestNewMonitorRequest(t *testing.T) { var testSchema = []byte(`{ "cksum": "223619766 22548", "name": "TestSchema", "tables": { "TestTable": { "indexes": [["name"],["composed_1","composed_2"]], "columns": { "name": { "type": "string" }, "composed_1": { "type": { "key": "string" } }, "composed_2": { "type": { "key": "string" } }, "int1": { "type": { "key": "integer" } }, "int2": { "type": { "key": "integer" } }, "config": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`) type testType struct { ID string `ovsdb:"_uuid"` MyName string `ovsdb:"name"` Config map[string]string `ovsdb:"config"` Comp1 string `ovsdb:"composed_1"` Comp2 string `ovsdb:"composed_2"` Int1 int `ovsdb:"int1"` Int2 int `ovsdb:"int2"` } var schema ovsdb.DatabaseSchema err := json.Unmarshal(testSchema, &schema) require.NoError(t, err) testTable := &testType{} info, err := mapper.NewInfo("TestTable", schema.Table("TestTable"), testTable) assert.NoError(t, err) mr, err := newMonitorRequest(info, nil, nil) require.NoError(t, err) assert.ElementsMatch(t, mr.Columns, []string{"name", "config", "composed_1", "composed_2", "int1", "int2"}) mr2, err := newMonitorRequest(info, []string{"int1", "name"}, nil) require.NoError(t, err) assert.ElementsMatch(t, mr2.Columns, []string{"int1", "name"}) } func TestUpdateEndpoints(t *testing.T) { rand.Seed(time.Now().UnixNano()) var connected1, connected2, connected3, disConnected1, disConnected2, disConnected3 int32 _, _, endpoint1 := newClientServerPair(t, &connected1, &disConnected1, true) _, _, endpoint2 := newClientServerPair(t, &connected2, &disConnected2, false) _, _, endpoint3 := newClientServerPair(t, &connected3, &disConnected3, true) // Create client to test reconnection for ovs, err := newOVSDBClient(defDB, WithLeaderOnly(true), WithReconnect(1*time.Second, &backoff.ZeroBackOff{}), WithEndpoint(endpoint1)) require.NoError(t, err) err = ovs.Connect(context.Background()) require.NoError(t, err) t.Cleanup(ovs.Close) require.Eventually(t, func() bool { return atomic.LoadInt32(&connected1) == 2 }, 2*time.Second, 10*time.Millisecond) require.Equal(t, ovs.CurrentEndpoint(), endpoint1) require.NotEmpty(t, ovs.endpoints[0].serverID) // update with same endpoints should not have a disconnect ovs.UpdateEndpoints([]string{endpoint1}) require.Eventually(t, func() bool { // connect should not increase return atomic.LoadInt32(&connected1) == 2 }, 2*time.Second, 10*time.Millisecond) require.Eventually(t, func() bool { // should not disconnect return atomic.LoadInt32(&disConnected1) == 0 }, 2*time.Second, 10*time.Millisecond) ovs.UpdateEndpoints([]string{endpoint2, endpoint1}) require.Eventually(t, func() bool { return ovs.CurrentEndpoint() == endpoint1 }, 2*time.Second, 10*time.Millisecond) require.Eventually(t, func() bool { return atomic.LoadInt32(&connected2) == 1 }, 2*time.Second, 10*time.Millisecond) require.Eventually(t, func() bool { // server1 should still be the active return atomic.LoadInt32(&disConnected1) == 0 }, 2*time.Second, 10*time.Millisecond) require.Equal(t, ovs.endpoints[0].address, endpoint1) require.Equal(t, ovs.endpoints[1].address, endpoint2) require.NotEmpty(t, ovs.endpoints[0].serverID) // server3 is the new leader ovs.UpdateEndpoints([]string{endpoint2, endpoint3}) require.Eventually(t, func() bool { return ovs.CurrentEndpoint() == endpoint3 }, 2*time.Second, 10*time.Millisecond) require.Eventually(t, func() bool { return atomic.LoadInt32(&disConnected2) == 1 }, 2*time.Second, 10*time.Millisecond) require.Eventually(t, func() bool { return atomic.LoadInt32(&connected3) == 2 }, 2*time.Second, 10*time.Millisecond) require.Equal(t, ovs.endpoints[0].address, endpoint3) require.Equal(t, ovs.endpoints[1].address, endpoint2) require.NotEmpty(t, ovs.endpoints[0].serverID) } golang-github-ovn-org-libovsdb-0.7.0/client/condition.go000066400000000000000000000165541464501522100232420ustar00rootroot00000000000000package client import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // Conditional is the interface used by the ConditionalAPI to match on cache objects // and generate ovsdb conditions type Conditional interface { // Generate returns a list of lists of conditions to be used in Operations // Each element in the (outer) list corresponds to an operation Generate() ([][]ovsdb.Condition, error) // Returns the models that match the conditions Matches() (map[string]model.Model, error) // returns the table that this condition is associated with Table() string } func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) { anyConditions := make([][]ovsdb.Condition, 0, len(models)) for _, model := range models { info, err := dbModel.NewModelInfo(model) if err != nil { return nil, err } allConditions, err := dbModel.Mapper.NewEqualityCondition(info) if err != nil { return nil, err } anyConditions = append(anyConditions, allConditions) } return anyConditions, nil } func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) { anyConditions := [][]ovsdb.Condition{} if singleOp { anyConditions = append(anyConditions, []ovsdb.Condition{}) } for _, condition := range conditions { ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value) if err != nil { return nil, err } allConditions := []ovsdb.Condition{*ovsdbCond} if singleOp { anyConditions[0] = append(anyConditions[0], allConditions...) } else { anyConditions = append(anyConditions, allConditions) } } return anyConditions, nil } // equalityConditional uses the indexes available in a provided model to find a // matching model in the database. type equalityConditional struct { tableName string models []model.Model cache *cache.TableCache } func (c *equalityConditional) Table() string { return c.tableName } // Returns the models that match the indexes available through the provided // model. func (c *equalityConditional) Matches() (map[string]model.Model, error) { tableCache := c.cache.Table(c.tableName) if tableCache == nil { return nil, ErrNotFound } return tableCache.RowsByModels(c.models) } // Generate conditions based on the equality of the first available index. If // the index can be matched against a model in the cache, the condition will be // based on the UUID of the found model. Otherwise, the conditions will be based // on the index. func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) { models, err := c.Matches() if err != nil && err != ErrNotFound { return nil, err } if len(models) == 0 { // no cache hits, generate condition from models we were given modelMap := make(map[string]model.Model, len(c.models)) for i, m := range c.models { // generateConditionsFromModels() ignores the map keys // so just use the range index modelMap[fmt.Sprintf("%d", i)] = m } return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap) } return generateConditionsFromModels(c.cache.DatabaseModel(), models) } // NewEqualityCondition creates a new equalityConditional func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) { return &equalityConditional{ tableName: table, models: models, cache: cache, }, nil } // explicitConditional generates conditions based on the provided Condition list type explicitConditional struct { tableName string anyConditions [][]ovsdb.Condition cache *cache.TableCache } func (c *explicitConditional) Table() string { return c.tableName } // Returns the models that match the conditions func (c *explicitConditional) Matches() (map[string]model.Model, error) { tableCache := c.cache.Table(c.tableName) if tableCache == nil { return nil, ErrNotFound } found := map[string]model.Model{} for _, allConditions := range c.anyConditions { models, err := tableCache.RowsByCondition(allConditions) if err != nil { return nil, err } for uuid, model := range models { found[uuid] = model } } return found, nil } // Generate returns conditions based on the provided Condition list func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) { models, err := c.Matches() if err != nil && err != ErrNotFound { return nil, err } if len(models) == 0 { // no cache hits, return conditions we were given return c.anyConditions, nil } return generateConditionsFromModels(c.cache.DatabaseModel(), models) } // newExplicitConditional creates a new explicitConditional func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) { dbModel := cache.DatabaseModel() info, err := dbModel.NewModelInfo(model) if err != nil { return nil, err } anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll) if err != nil { return nil, err } return &explicitConditional{ tableName: table, anyConditions: anyConditions, cache: cache, }, nil } // predicateConditional is a Conditional that calls a provided function pointer // to match on models. type predicateConditional struct { tableName string predicate interface{} cache *cache.TableCache } // matches returns the result of the execution of the predicate // Type verifications are not performed // Returns the models that match the conditions func (c *predicateConditional) Matches() (map[string]model.Model, error) { tableCache := c.cache.Table(c.tableName) if tableCache == nil { return nil, ErrNotFound } found := map[string]model.Model{} // run the predicate on a shallow copy of the models for speed and only // clone the matches for u, m := range tableCache.RowsShallow() { ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)}) if ret[0].Bool() { found[u] = model.Clone(m) } } return found, nil } func (c *predicateConditional) Table() string { return c.tableName } // generate returns a list of conditions that match, by _uuid equality, all the objects that // match the predicate func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) { models, err := c.Matches() if err != nil { return nil, err } return generateConditionsFromModels(c.cache.DatabaseModel(), models) } // newPredicateConditional creates a new predicateConditional func newPredicateConditional(table string, cache *cache.TableCache, predicate interface{}) (Conditional, error) { return &predicateConditional{ tableName: table, predicate: predicate, cache: cache, }, nil } // errorConditional is a conditional that encapsulates an error // It is used to delay the reporting of errors from conditional creation to API method call type errorConditional struct { err error } func (e *errorConditional) Matches() (map[string]model.Model, error) { return nil, e.err } func (e *errorConditional) Table() string { return "" } func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) { return nil, e.err } func newErrorConditional(err error) Conditional { return &errorConditional{ err: fmt.Errorf("conditionerror: %s", err.Error()), } } golang-github-ovn-org-libovsdb-0.7.0/client/condition_test.go000066400000000000000000000354261464501522100243000ustar00rootroot00000000000000package client import ( "fmt" "testing" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) func TestEqualityConditional(t *testing.T) { lspcacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", ExternalIds: map[string]string{"unique": "id"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp3", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &trueVal, }, } lspcache := map[string]model.Model{} for i := range lspcacheList { lspcache[lspcacheList[i].(*testLogicalSwitchPort).UUID] = lspcacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) test := []struct { name string models []model.Model condition [][]ovsdb.Condition matches map[string]model.Model err bool }{ { name: "by uuid", models: []model.Model{ &testLogicalSwitchPort{UUID: aUUID0, Name: "different"}, }, condition: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }}}, matches: map[string]model.Model{aUUID0: lspcacheList[0]}, }, { name: "by uuids", models: []model.Model{ &testLogicalSwitchPort{UUID: aUUID0, Name: "different"}, &testLogicalSwitchPort{UUID: aUUID1, Name: "different2"}, }, condition: [][]ovsdb.Condition{ {{ Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }}, {{ Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }}, }, matches: map[string]model.Model{ aUUID0: lspcacheList[0], aUUID1: lspcacheList[1], }, }, { name: "by index with cache", models: []model.Model{ &testLogicalSwitchPort{Name: "lsp1"}, &testLogicalSwitchPort{Name: "lsp2"}, }, condition: [][]ovsdb.Condition{ {{ Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }}, {{ Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}, }}, }, matches: map[string]model.Model{ aUUID1: lspcacheList[1], aUUID2: lspcacheList[2], }, }, { name: "by index with no cache", models: []model.Model{ &testLogicalSwitchPort{Name: "foo"}, &testLogicalSwitchPort{Name: "123"}, }, condition: [][]ovsdb.Condition{ {{ Column: "name", Function: ovsdb.ConditionEqual, Value: "foo", }}, {{ Column: "name", Function: ovsdb.ConditionEqual, Value: "123", }}, }, }, { name: "by non index", models: []model.Model{ &testLogicalSwitchPort{ExternalIds: map[string]string{"foo": "baz"}}, }, err: true, }, { name: "by non index multiple models", models: []model.Model{ &testLogicalSwitchPort{ExternalIds: map[string]string{"foo": "baz"}}, &testLogicalSwitchPort{ExternalIds: map[string]string{"foo": "123"}}, }, err: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("Equality Conditional: %s", tt.name), func(t *testing.T) { cond, err := newEqualityConditional("Logical_Switch_Port", tcache, tt.models) assert.Nil(t, err) matches, err := cond.Matches() assert.Nil(t, err) assert.Equal(t, tt.matches, matches) generated, err := cond.Generate() if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatch(t, tt.condition, generated) } }) } } func TestPredicateConditional(t *testing.T) { lspcacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", ExternalIds: map[string]string{"unique": "id"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp3", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &trueVal, }, } lspcache := map[string]model.Model{} for i := range lspcacheList { lspcache[lspcacheList[i].(*testLogicalSwitchPort).UUID] = lspcacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) test := []struct { name string predicate interface{} condition [][]ovsdb.Condition matches map[string]model.Model err bool }{ { name: "simple value comparison", predicate: func(lsp *testLogicalSwitchPort) bool { return lsp.UUID == aUUID0 }, condition: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }}}, matches: map[string]model.Model{aUUID0: lspcacheList[0]}, }, { name: "by random field", predicate: func(lsp *testLogicalSwitchPort) bool { return lsp.Enabled != nil && *lsp.Enabled == false }, condition: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }}, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}, }}}, matches: map[string]model.Model{ aUUID1: lspcacheList[1], aUUID2: lspcacheList[2], }, }, } for _, tt := range test { t.Run(fmt.Sprintf("Predicate Conditional: %s", tt.name), func(t *testing.T) { cond, err := newPredicateConditional("Logical_Switch_Port", tcache, tt.predicate) assert.Nil(t, err) matches, err := cond.Matches() assert.Nil(t, err) assert.Equal(t, tt.matches, matches) generated, err := cond.Generate() if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatch(t, tt.condition, generated) } }) } } func TestExplicitConditionalWithNoCache(t *testing.T) { lspcache := map[string]model.Model{} testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) testObj := &testLogicalSwitchPort{} test := []struct { name string args []model.Condition result [][]ovsdb.Condition all bool err bool }{ { name: "inequality comparison", args: []model.Condition{ { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "lsp0", }, }, result: [][]ovsdb.Condition{ { { Column: "name", Function: ovsdb.ConditionNotEqual, Value: "lsp0", }}}, }, { name: "inequality comparison all", args: []model.Condition{ { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "lsp0", }, }, result: [][]ovsdb.Condition{ { { Column: "name", Function: ovsdb.ConditionNotEqual, Value: "lsp0", }}}, all: true, }, { name: "map comparison", args: []model.Condition{ { Field: &testObj.ExternalIds, Function: ovsdb.ConditionIncludes, Value: map[string]string{"foo": "baz"}, }, }, result: [][]ovsdb.Condition{ { { Column: "external_ids", Function: ovsdb.ConditionIncludes, Value: testOvsMap(t, map[string]string{"foo": "baz"}), }}}, }, { name: "set comparison", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, }, result: [][]ovsdb.Condition{ { { Column: "enabled", Function: ovsdb.ConditionEqual, Value: testOvsSet(t, &trueVal), }}}, }, { name: "multiple conditions", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "foo", }, }, result: [][]ovsdb.Condition{ { { Column: "enabled", Function: ovsdb.ConditionEqual, Value: testOvsSet(t, &trueVal), }}, { { Column: "name", Function: ovsdb.ConditionNotEqual, Value: "foo", }}}, }, { name: "multiple conditions all", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "foo", }, }, result: [][]ovsdb.Condition{{ { Column: "enabled", Function: ovsdb.ConditionEqual, Value: testOvsSet(t, &trueVal), }, { Column: "name", Function: ovsdb.ConditionNotEqual, Value: "foo", }}}, all: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("Explicit Conditional with no cache: %s", tt.name), func(t *testing.T) { cond, err := newExplicitConditional("Logical_Switch_Port", tcache, tt.all, testObj, tt.args...) assert.Nil(t, err) generated, err := cond.Generate() if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatch(t, tt.result, generated) } }) } } func TestExplicitConditionalWithCache(t *testing.T) { lspcacheList := []model.Model{ &testLogicalSwitchPort{ UUID: aUUID0, Name: "lsp0", ExternalIds: map[string]string{"foo": "bar"}, Enabled: &trueVal, }, &testLogicalSwitchPort{ UUID: aUUID1, Name: "lsp1", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID2, Name: "lsp2", ExternalIds: map[string]string{"unique": "id"}, Enabled: &falseVal, }, &testLogicalSwitchPort{ UUID: aUUID3, Name: "lsp3", ExternalIds: map[string]string{"foo": "baz"}, Enabled: &trueVal, }, } lspcache := map[string]model.Model{} for i := range lspcacheList { lspcache[lspcacheList[i].(*testLogicalSwitchPort).UUID] = lspcacheList[i] } testData := cache.Data{ "Logical_Switch_Port": lspcache, } tcache := apiTestCache(t, testData) testObj := &testLogicalSwitchPort{} test := []struct { name string args []model.Condition result [][]ovsdb.Condition all bool err bool }{ { name: "inequality comparison", args: []model.Condition{ { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "lsp0", }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, }, { name: "inequality comparison all", args: []model.Condition{ { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "lsp0", }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, all: true, }, { name: "map comparison", args: []model.Condition{ { Field: &testObj.ExternalIds, Function: ovsdb.ConditionIncludes, Value: map[string]string{"foo": "baz"}, }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, }, { name: "set comparison", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, }, { name: "multiple conditions", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "foo", }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID1}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID2}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, }, { name: "multiple conditions all", args: []model.Condition{ { Field: &testObj.Enabled, Function: ovsdb.ConditionEqual, Value: &trueVal, }, { Field: &testObj.Name, Function: ovsdb.ConditionNotEqual, Value: "foo", }, }, result: [][]ovsdb.Condition{ { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}, }, }, { { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID3}, }, }, }, all: true, }, } for _, tt := range test { t.Run(fmt.Sprintf("Explicit Conditional with cache: %s", tt.name), func(t *testing.T) { cond, err := newExplicitConditional("Logical_Switch_Port", tcache, tt.all, testObj, tt.args...) assert.Nil(t, err) generated, err := cond.Generate() if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatch(t, tt.result, generated) } }) } } golang-github-ovn-org-libovsdb-0.7.0/client/config.go000066400000000000000000000014041464501522100225050ustar00rootroot00000000000000/** * Copyright (c) 2019 eBay Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ package client import ( "crypto/tls" ) // Config is a structure used in provisioning a connection to ovsdb. type Config struct { Addr string TLSConfig *tls.Config } golang-github-ovn-org-libovsdb-0.7.0/client/doc.go000066400000000000000000000135551464501522100220170ustar00rootroot00000000000000/* Package client connects to, monitors and interacts with OVSDB servers (RFC7047). This package uses structs, that contain the 'ovs' field tag to determine which field goes to which column in the database. We refer to pointers to this structs as Models. Example: type MyLogicalSwitch struct { UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory Name string `ovsdb:"name"` Ports []string `ovsdb:"ports"` Config map[string]string `ovsdb:"other_config"` } Based on these Models a Database Model (see ClientDBModel type) is built to represent the entire OVSDB: clientDBModel, _ := client.NewClientDBModel("OVN_Northbound", map[string]client.Model{ "Logical_Switch": &MyLogicalSwitch{}, }) The ClientDBModel represents the entire Database (or the part of it we're interested in). Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages and store them in Model instances. A client instance is created by simply specifying the connection information and the database model: ovs, _ := client.Connect(context.Background(), clientDBModel) Main API After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like to interact with the database: List(), Get(), Create(), Update(), Mutate(), Delete(). The specific database table that the operation targets is automatically determined based on the type of the parameter. In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(), interact with the database so they return list of ovsdb.Operation objects that can be grouped together and passed to client.Transact(). Others, such as List() and Get(), interact with the client's internal cache and are able to return Model instances (or a list thereof) directly. Conditions Some API functions (Create() and Get()), can be run directly. Others, require us to use a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as uses the Conditions to search the internal cache. The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions. Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances. Conditions must refer to fields of the provided Model (via pointer to fields). Example: ls = &MyLogicalSwitch {} ovs.Where(ls, client.Condition { Field: &ls.Ports, Function: ovsdb.ConditionIncludes, Value: []string{"portUUID"}, }) If no client.Condition is provided, the client will use any of fields that correspond to indexes to generate an appropriate condition. Therefore the following two statements are equivalent: ls = &MyLogicalSwitch {UUID:"myUUID"} ovs.Where(ls) ovs.Where(ls, client.Condition { Field: &ls.UUID, Function: ovsdb.ConditionEqual, Value: "myUUID"}, }) Where() accepts multiple Condition instances (through variadic arguments). If provided, the client will generate multiple operations each matching one condition. For example, the following operation will delete all the Logical Switches named "foo" OR "bar": ops, err := ovs.Where(ls, client.Condition { Field: &ls.Name Function: ovsdb.ConditionEqual, Value: "foo", },client.Condition { Field: &ls.Port, Function: ovsdb.ConditionIncludes, Value: "bar", }).Delete() To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll(). Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions will be based on the index or condition fields themselves. A more flexible mechanism to search the cache is available: WhereCache() WhereCache() accepts a function that takes any Model as argument and returns a boolean. It is used to search the cache so commonly used with List() function. For example: lsList := &[]LogicalSwitch{} err := ovs.WhereCache( func(ls *LogicalSwitch) bool { return strings.HasPrefix(ls.Name, "ext_") }).List(lsList) Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching cache element, an operation will be created matching on the "_uuid" column. The number of operations can be quite large depending on the cache size and the provided function. Most likely there is a way to express the same condition using Where() or WhereAll() which will be more efficient. Get Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g: ls := &LogicalSwitch{UUID:"myUUID"} err := ovs.Get(ls) fmt.Printf("Name of the switch is: &s", ls.Name) List List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache() lsList := &[]LogicalSwitch{} err := ovs.List(lsList) // List all elements err := ovs.WhereCache( func(ls *LogicalSwitch) bool { return strings.HasPrefix(ls.Name, "ext_") }).List(lsList) Create Create returns a list of operations to create the models provided. E.g: ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"}) Update Update returns a list of operations to update the matching rows to match the values of the provided model. E.g: ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}} ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs} Mutate Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g: ls := &LogicalSwitch{} ops, err := ovs.Where(...).Mutate(&ls, client.Mutation { Field: &ls.Config, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"foo":"bar"}, }) Delete Delete returns a list of operations needed to delete the matching rows. E.g: ops, err := ovs.Where(...).Delete() */ package client golang-github-ovn-org-libovsdb-0.7.0/client/metrics.go000066400000000000000000000040661464501522100227150ustar00rootroot00000000000000package client import ( "sync" "github.com/prometheus/client_golang/prometheus" ) const libovsdbName = "libovsdb" type metrics struct { numUpdates *prometheus.CounterVec numTableUpdates *prometheus.CounterVec numDisconnects prometheus.Counter numMonitors prometheus.Gauge registerOnce sync.Once } func (m *metrics) init(modelName string, namespace, subsystem string) { // labels that are the same across all metrics constLabels := prometheus.Labels{"primary_model": modelName} if namespace == "" { namespace = libovsdbName subsystem = "" } m.numUpdates = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "update_messages_total", Help: "Count of libovsdb monitor update messages processed, partitioned by database", ConstLabels: constLabels, }, []string{"database"}, ) m.numTableUpdates = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "table_updates_total", Help: "Count of libovsdb monitor update messages per table", ConstLabels: constLabels, }, []string{"database", "table"}, ) m.numDisconnects = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "disconnects_total", Help: "Count of libovsdb disconnects encountered", ConstLabels: constLabels, }, ) m.numMonitors = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "monitors", Help: "Number of running libovsdb ovsdb monitors", ConstLabels: constLabels, }, ) } func (m *metrics) register(r prometheus.Registerer) { m.registerOnce.Do(func() { r.MustRegister( m.numUpdates, m.numTableUpdates, m.numDisconnects, m.numMonitors, ) }) } func (o *ovsdbClient) registerMetrics() { if !o.options.shouldRegisterMetrics || o.options.registry == nil { return } o.metrics.register(o.options.registry) o.options.shouldRegisterMetrics = false } golang-github-ovn-org-libovsdb-0.7.0/client/monitor.go000066400000000000000000000071631464501522100227370ustar00rootroot00000000000000package client import ( "fmt" "reflect" "github.com/google/uuid" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) const emptyUUID = "00000000-0000-0000-0000-000000000000" // Monitor represents a monitor type Monitor struct { Method string Tables []TableMonitor Errors []error LastTransactionID string } // newMonitor creates a new *Monitor with default values func newMonitor() *Monitor { return &Monitor{ Method: ovsdb.ConditionalMonitorSinceRPC, Errors: make([]error, 0), LastTransactionID: emptyUUID, } } // NewMonitor creates a new Monitor with the provided options func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor { m := newMonitor() for _, opt := range opts { err := opt(o, m) if err != nil { m.Errors = append(m.Errors, err) } } return m } // MonitorOption adds Tables to a Monitor type MonitorOption func(o *ovsdbClient, m *Monitor) error // MonitorCookie is the struct we pass to correlate from updates back to their // originating Monitor request. type MonitorCookie struct { DatabaseName string `json:"databaseName"` ID string `json:"id"` } func newMonitorCookie(dbName string) MonitorCookie { return MonitorCookie{ DatabaseName: dbName, ID: uuid.NewString(), } } // TableMonitor is a table to be monitored type TableMonitor struct { // Table is the table to be monitored Table string // Conditions are the conditions under which the table should be monitored Conditions []ovsdb.Condition // Fields are the fields in the model to monitor // If none are supplied, all fields will be used Fields []string } func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []interface{}) (*TableMonitor, error) { dbModel := o.primaryDB().model tableName := dbModel.FindTable(reflect.TypeOf(m)) if tableName == "" { return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m)) } var columns []string var ovsdbConds []ovsdb.Condition if len(fields) == 0 && len(conditions) == 0 { return &TableMonitor{ Table: tableName, Conditions: ovsdbConds, Fields: columns, }, nil } data, err := dbModel.NewModelInfo(m) if err != nil { return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err) } for _, f := range fields { column, err := data.ColumnByPtr(f) if err != nil { return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err) } columns = append(columns, column) } db := o.databases[o.primaryDBName] mmapper := db.model.Mapper for _, modelCond := range conditions { ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value) if err != nil { return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err) } ovsdbConds = append(ovsdbConds, *ovsdbCond) } return &TableMonitor{ Table: tableName, Conditions: ovsdbConds, Fields: columns, }, nil } func WithTable(m model.Model, fields ...interface{}) MonitorOption { return func(o *ovsdbClient, monitor *Monitor) error { tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields) if err != nil { return err } monitor.Tables = append(monitor.Tables, *tableMonitor) return nil } } func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...interface{}) MonitorOption { return func(o *ovsdbClient, monitor *Monitor) error { tableMonitor, err := newTableMonitor(o, m, conditions, fields) if err != nil { return err } monitor.Tables = append(monitor.Tables, *tableMonitor) return nil } } golang-github-ovn-org-libovsdb-0.7.0/client/monitor_test.go000066400000000000000000000037251464501522100237760ustar00rootroot00000000000000package client import ( "encoding/json" "testing" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) func TestWithTable(t *testing.T) { client, err := newOVSDBClient(defDB) assert.NoError(t, err) m := newMonitor() opt := WithTable(&OpenvSwitch{}) err = opt(client, m) assert.NoError(t, err) assert.Equal(t, 1, len(m.Tables)) } func populateClientModel(t *testing.T, client *ovsdbClient) { var s ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &s) assert.NoError(t, err) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Bridge": &Bridge{}, "Open_vSwitch": &OpenvSwitch{}, }) assert.NoError(t, err) dbModel, errs := model.NewDatabaseModel(s, clientDBModel) assert.Empty(t, errs) client.primaryDB().model = dbModel assert.NoError(t, err) } func TestWithTableAndFields(t *testing.T) { client, err := newOVSDBClient(defDB) assert.NoError(t, err) populateClientModel(t, client) m := newMonitor() ovs := OpenvSwitch{} opt := WithTable(&ovs, &ovs.Bridges, &ovs.CurCfg) err = opt(client, m) assert.NoError(t, err) assert.Equal(t, 1, len(m.Tables)) assert.ElementsMatch(t, []string{"bridges", "cur_cfg"}, m.Tables[0].Fields) } func TestWithTableAndFieldsAndConditions(t *testing.T) { client, err := newOVSDBClient(defDB) assert.NoError(t, err) populateClientModel(t, client) m := newMonitor() bridge := Bridge{} conditions := []model.Condition{ { Field: &bridge.Name, Function: ovsdb.ConditionEqual, Value: "foo", }, } opt := WithConditionalTable(&bridge, conditions, &bridge.Name, &bridge.DatapathType) err = opt(client, m) assert.NoError(t, err) assert.Equal(t, 1, len(m.Tables)) assert.ElementsMatch(t, []string{"name", "datapath_type"}, m.Tables[0].Fields) assert.ElementsMatch(t, []ovsdb.Condition{ { Column: "name", Function: ovsdb.ConditionEqual, Value: "foo", }, }, m.Tables[0].Conditions) } golang-github-ovn-org-libovsdb-0.7.0/client/options.go000066400000000000000000000116271464501522100227430ustar00rootroot00000000000000package client import ( "crypto/tls" "net/url" "time" "github.com/cenkalti/backoff/v4" "github.com/go-logr/logr" "github.com/prometheus/client_golang/prometheus" ) const ( defaultTCPEndpoint = "tcp:127.0.0.1:6640" defaultSSLEndpoint = "ssl:127.0.0.1:6640" defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock" ) type options struct { endpoints []string tlsConfig *tls.Config reconnect bool leaderOnly bool timeout time.Duration backoff backoff.BackOff logger *logr.Logger registry prometheus.Registerer shouldRegisterMetrics bool // in case metrics are changed after-the-fact metricNamespace string // prometheus metric namespace metricSubsystem string // prometheus metric subsystem inactivityTimeout time.Duration } type Option func(o *options) error func newOptions(opts ...Option) (*options, error) { o := &options{} for _, opt := range opts { if err := opt(o); err != nil { return nil, err } } // if no endpoints are supplied, use the default unix socket if len(o.endpoints) == 0 { o.endpoints = []string{defaultUnixEndpoint} } return o, nil } // WithTLSConfig sets the tls.Config for use by the client func WithTLSConfig(cfg *tls.Config) Option { return func(o *options) error { o.tlsConfig = cfg return nil } } // WithEndpoint sets the endpoint to be used by the client // It can be used multiple times, and the first endpoint that // successfully connects will be used. // Endpoints are specified in OVSDB Connection Format // For more details, see the ovsdb(7) man page func WithEndpoint(endpoint string) Option { return func(o *options) error { ep, err := url.Parse(endpoint) if err != nil { return err } switch ep.Scheme { case UNIX: if len(ep.Path) == 0 { o.endpoints = append(o.endpoints, defaultUnixEndpoint) return nil } case TCP: if len(ep.Opaque) == 0 { o.endpoints = append(o.endpoints, defaultTCPEndpoint) return nil } case SSL: if len(ep.Opaque) == 0 { o.endpoints = append(o.endpoints, defaultSSLEndpoint) return nil } } o.endpoints = append(o.endpoints, endpoint) return nil } } // WithLeaderOnly tells the client to treat endpoints that are clustered // and not the leader as down. func WithLeaderOnly(leaderOnly bool) Option { return func(o *options) error { o.leaderOnly = leaderOnly return nil } } // WithReconnect tells the client to automatically reconnect when // disconnected. The timeout is used to construct the context on // each call to Connect, while backoff dictates the backoff // algorithm to use. Using WithReconnect implies that // requested transactions will block until the client has fully reconnected, // rather than immediately returning an error if there is no connection. func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option { return func(o *options) error { o.reconnect = true o.timeout = timeout o.backoff = backoff return nil } } // WithInactivityCheck tells the client to send Echo request to ovsdb server periodically // upon inactivityTimeout. When Echo request fails, then it attempts to reconnect // with server. The inactivity check is performed as long as the connection is established. // The reconnectTimeout argument is used to construct the context on each call to Connect, // while reconnectBackoff dictates the backoff algorithm to use. func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration, reconnectBackoff backoff.BackOff) Option { return func(o *options) error { o.reconnect = true o.timeout = reconnectTimeout o.backoff = reconnectBackoff o.inactivityTimeout = inactivityTimeout return nil } } // WithLogger allows setting a specific log sink. Otherwise, the default // go log package is used. func WithLogger(l *logr.Logger) Option { return func(o *options) error { o.logger = l return nil } } // WithMetricsRegistry allows the user to specify a Prometheus metrics registry. // If supplied, the metrics as defined in metrics.go will be registered. func WithMetricsRegistry(r prometheus.Registerer) Option { return func(o *options) error { o.registry = r o.shouldRegisterMetrics = (r != nil) return nil } } // WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry // and Prometheus metric namespace and subsystem of the component utilizing libovsdb. // If supplied, the metrics as defined in metrics.go will be registered. func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option { if namespace == "" || subsystem == "" { panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty") } return func(o *options) error { o.registry = r o.shouldRegisterMetrics = (r != nil) o.metricNamespace = namespace o.metricSubsystem = subsystem return nil } } golang-github-ovn-org-libovsdb-0.7.0/client/options_test.go000066400000000000000000000042731464501522100240010ustar00rootroot00000000000000package client import ( "crypto/tls" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWithTLSConfig(t *testing.T) { config := &tls.Config{ InsecureSkipVerify: true, } opts := &options{} fn := WithTLSConfig(config) err := fn(opts) require.Nil(t, err) assert.Equal(t, config, opts.tlsConfig) } func TestNewOptions(t *testing.T) { tests := []struct { name string opts []Option want *options }{ { "no endpoints", []Option{}, &options{ endpoints: []string{defaultUnixEndpoint}, }, }, { "single endpoints", []Option{WithEndpoint("ssl:192.168.1.1:6443")}, &options{ endpoints: []string{"ssl:192.168.1.1:6443"}, }, }, { "multiple endpoints", []Option{WithEndpoint("ssl:192.168.1.1:6443"), WithEndpoint("ssl:192.168.1.2:6443")}, &options{ endpoints: []string{"ssl:192.168.1.1:6443", "ssl:192.168.1.2:6443"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := newOptions(tt.opts...) got.logger = nil // hack; we don't care require.Nil(t, err) assert.Equal(t, tt.want, got) }) } } func TestWithEndpoint(t *testing.T) { tests := []struct { name string endpoint string want []string wantErr bool }{ { "default unix", "unix:", []string{defaultUnixEndpoint}, false, }, { "default tcp", "tcp:", []string{defaultTCPEndpoint}, false, }, { "default ssl", "ssl:", []string{defaultSSLEndpoint}, false, }, { "invalid", "foo : ", nil, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { opts := &options{} fn := WithEndpoint(tt.endpoint) err := fn(opts) if tt.wantErr { require.Error(t, err) } else { require.Nil(t, err) } assert.Equal(t, tt.want, opts.endpoints) }) } } func TestWithReconnect(t *testing.T) { timeout := 2 * time.Second opts := &options{} fn := WithReconnect(timeout, &backoff.ZeroBackOff{}) err := fn(opts) require.NoError(t, err) assert.Equal(t, timeout, opts.timeout) assert.Equal(t, true, opts.reconnect) assert.Equal(t, &backoff.ZeroBackOff{}, opts.backoff) } golang-github-ovn-org-libovsdb-0.7.0/cmd/000077500000000000000000000000001464501522100201775ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/cmd/modelgen/000077500000000000000000000000001464501522100217715ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/cmd/modelgen/main.go000066400000000000000000000037671464501522100232610ustar00rootroot00000000000000package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "path/filepath" "github.com/ovn-org/libovsdb/modelgen" "github.com/ovn-org/libovsdb/ovsdb" ) func usage() { fmt.Fprintf(os.Stderr, "Usage of modelgen:\n") fmt.Fprintf(os.Stderr, "\tmodelgen [flags] OVS_SCHEMA\n") fmt.Fprintf(os.Stderr, "Flags:\n") flag.PrintDefaults() } var ( outDirP = flag.String("o", ".", "Directory where the generated files shall be stored") pkgNameP = flag.String("p", "ovsmodel", "Package name") dryRun = flag.Bool("d", false, "Dry run") extended = flag.Bool("extended", false, "Generates additional code like deep-copy methods, etc.") ) func main() { log.SetFlags(0) log.SetPrefix("modelgen: ") flag.Usage = usage flag.Parse() outDir := *outDirP pkgName := *pkgNameP /*Option handling*/ outDir, err := filepath.Abs(outDir) if err != nil { log.Fatal(err) } if err := os.MkdirAll(outDir, 0755); err != nil { log.Fatal(err) } if len(flag.Args()) != 1 { flag.Usage() os.Exit(2) } schemaFile, err := os.Open(flag.Args()[0]) if err != nil { log.Fatal(err) } defer schemaFile.Close() schemaBytes, err := ioutil.ReadAll(schemaFile) if err != nil { log.Fatal(err) } var dbSchema ovsdb.DatabaseSchema if err := json.Unmarshal(schemaBytes, &dbSchema); err != nil { log.Fatal(err) } genOpts := []modelgen.Option{} if *dryRun { genOpts = append(genOpts, modelgen.WithDryRun()) } gen, err := modelgen.NewGenerator(genOpts...) if err != nil { log.Fatal(err) } for name, table := range dbSchema.Tables { tmpl := modelgen.NewTableTemplate() args := modelgen.GetTableTemplateData(pkgName, name, &table) args.WithExtendedGen(*extended) if err := gen.Generate(filepath.Join(outDir, modelgen.FileName(name)), tmpl, args); err != nil { log.Fatal(err) } } dbTemplate := modelgen.NewDBTemplate() dbArgs := modelgen.GetDBTemplateData(pkgName, dbSchema) if err := gen.Generate(filepath.Join(outDir, "model.go"), dbTemplate, dbArgs); err != nil { log.Fatal(err) } } golang-github-ovn-org-libovsdb-0.7.0/cmd/print_schema/000077500000000000000000000000001464501522100226535ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/cmd/print_schema/print_schema.go000066400000000000000000000032171464501522100256610ustar00rootroot00000000000000package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "runtime" "runtime/pprof" "github.com/ovn-org/libovsdb/ovsdb" ) func usage() { fmt.Fprintf(os.Stderr, "Print schema information:\n") fmt.Fprintf(os.Stderr, "\tprint_schema [flags] OVS_SCHEMA\n") fmt.Fprintf(os.Stderr, "Flag:\n") flag.PrintDefaults() } var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") var memprofile = flag.String("memoryprofile", "", "write memory profile to this file") var ntimes = flag.Int("ntimes", 1, "Parse the schema N times. Useful for profiling") var schemas []ovsdb.DatabaseSchema func main() { log.SetFlags(0) flag.Usage = usage flag.Parse() if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } if err := pprof.StartCPUProfile(f); err != nil { log.Fatal(err) } defer pprof.StopCPUProfile() } if len(flag.Args()) != 1 { flag.Usage() os.Exit(2) } schemaFile, err := os.Open(flag.Args()[0]) if err != nil { log.Fatal(err) } defer schemaFile.Close() schemaBytes, err := ioutil.ReadAll(schemaFile) if err != nil { log.Fatal(err) } schemas = make([]ovsdb.DatabaseSchema, *ntimes) for i := 0; i < *ntimes; i++ { if err := json.Unmarshal(schemaBytes, &schemas[i]); err != nil { log.Fatal(err) } } if *memprofile != "" { f, err := os.Create(*memprofile) if err != nil { log.Fatal(err) } defer f.Close() runtime.GC() if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write memory profile: ", err) } } // It only really makes sense to print 1 time if *ntimes > 0 { schemas[0].Print(os.Stdout) } } golang-github-ovn-org-libovsdb-0.7.0/cmd/stress/000077500000000000000000000000001464501522100215225ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/cmd/stress/stress.go000066400000000000000000000162141464501522100234000ustar00rootroot00000000000000package main import ( "context" "flag" "fmt" "io" "log" "os" "runtime" "runtime/pprof" "sync" "time" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // ORMBridge is the simplified ORM model of the Bridge table type bridgeType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` OtherConfig map[string]string `ovsdb:"other_config"` ExternalIds map[string]string `ovsdb:"external_ids"` Ports []string `ovsdb:"ports"` Status map[string]string `ovsdb:"status"` } // ORMovs is the simplified ORM model of the Bridge table type ovsType struct { UUID string `ovsdb:"_uuid"` Bridges []string `ovsdb:"bridges"` } var ( cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") memprofile = flag.String("memoryprofile", "", "write memory profile to this file") nins = flag.Int("inserts", 100, "the number of insertions to make to the database (per client)") nclients = flag.Int("clients", 1, "the number of clients to use") parallel = flag.Bool("parallel", false, "run clients in parallel") verbose = flag.Bool("verbose", false, "Be verbose") connection = flag.String("ovsdb", "unix:/var/run/openvswitch/db.sock", "OVSDB connection string") clientDBModel model.ClientDBModel ) type result struct { insertions int deletions int transactTime []time.Duration cacheTime []time.Duration } func cleanup(ctx context.Context) { ovs, err := client.NewOVSDBClient(clientDBModel, client.WithEndpoint(*connection)) if err != nil { log.Fatal(err) } err = ovs.Connect(ctx) if err != nil { log.Fatal(err) } defer ovs.Disconnect() if _, err := ovs.MonitorAll(ctx); err != nil { log.Fatal(err) } var rootUUID string // Get root UUID for uuid := range ovs.Cache().Table("Open_vSwitch").Rows() { rootUUID = uuid log.Printf("rootUUID is %v", rootUUID) } // Remove all existing bridges var bridges []bridgeType if err := ovs.List(context.Background(), &bridges); err == nil { log.Printf("%d existing bridges found", len(bridges)) for _, bridge := range bridges { deleteBridge(ctx, ovs, rootUUID, &bridge) } } else { if err != client.ErrNotFound { log.Fatal(err) } } } func run(ctx context.Context, resultsChan chan result, wg *sync.WaitGroup) { defer wg.Done() result := result{} ready := false var rootUUID string ovs, err := client.NewOVSDBClient(clientDBModel, client.WithEndpoint(*connection)) if err != nil { log.Fatal(err) } err = ovs.Connect(ctx) if err != nil { log.Fatal(err) } defer ovs.Disconnect() var bridges []bridgeType bridgeCh := make(map[string]chan bool) for i := 0; i < *nins; i++ { br := newBridge() bridges = append(bridges, br) bridgeCh[br.Name] = make(chan bool) } ovs.Cache().AddEventHandler( &cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { if ready && table == "Bridge" { br := model.(*bridgeType) var ch chan bool var ok bool if ch, ok = bridgeCh[br.Name]; !ok { return } close(ch) result.insertions++ } }, DeleteFunc: func(table string, model model.Model) { if table == "Bridge" { result.deletions++ } }, }, ) if _, err := ovs.MonitorAll(ctx); err != nil { log.Fatal(err) } // Get root UUID for uuid := range ovs.Cache().Table("Open_vSwitch").Rows() { rootUUID = uuid if *verbose { fmt.Printf("rootUUID is %v\n", rootUUID) } } ready = true cacheWg := sync.WaitGroup{} for i := 0; i < *nins; i++ { br := bridges[i] ch := bridgeCh[br.Name] log.Printf("create bridge: %s", br.Name) cacheWg.Add(1) go func(ctx context.Context, ch chan bool) { defer cacheWg.Done() <-ch }(ctx, ch) createBridge(ctx, ovs, rootUUID, br) } cacheWg.Wait() resultsChan <- result } func transact(ctx context.Context, ovs client.Client, operations []ovsdb.Operation) (bool, string) { reply, err := ovs.Transact(ctx, operations...) if err != nil { return false, "" } if _, err := ovsdb.CheckOperationResults(reply, operations); err != nil { return false, "" } return true, reply[0].UUID.GoUUID } func deleteBridge(ctx context.Context, ovs client.Client, rootUUID string, bridge *bridgeType) { log.Printf("deleting bridge %s", bridge.Name) deleteOp, err := ovs.Where(bridge).Delete() if err != nil { log.Fatal(err) } ovsRow := ovsType{ UUID: rootUUID, } mutateOp, err := ovs.Where(&ovsRow).Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: ovsdb.MutateOperationDelete, Value: []string{bridge.UUID}, }) if err != nil { log.Fatal(err) } operations := append(deleteOp, mutateOp...) _, _ = transact(ctx, ovs, operations) } func newBridge() bridgeType { return bridgeType{ UUID: "gopher", Name: fmt.Sprintf("br-%s", uuid.NewString()), OtherConfig: map[string]string{ "foo": "bar", "fake": "config", }, ExternalIds: map[string]string{ "key1": "val1", "key2": "val2", }, } } func createBridge(ctx context.Context, ovs client.Client, rootUUID string, bridge bridgeType) { insertOp, err := ovs.Create(&bridge) if err != nil { log.Fatal(err) } ovsRow := ovsType{} mutateOp, err := ovs.Where(&ovsType{UUID: rootUUID}).Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: ovsdb.MutateOperationInsert, Value: []string{bridge.UUID}, }) if err != nil { log.Fatal(err) } operations := append(insertOp, mutateOp...) _, _ = transact(ctx, ovs, operations) } func main() { flag.Parse() ctx := context.Background() if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } if err := pprof.StartCPUProfile(f); err != nil { log.Fatal(err) } defer pprof.StopCPUProfile() } if !*verbose { log.SetOutput(io.Discard) } var err error clientDBModel, err = model.NewClientDBModel("Open_vSwitch", map[string]model.Model{"Open_vSwitch": &ovsType{}, "Bridge": &bridgeType{}}) if err != nil { log.Fatal(err) } cleanup(ctx) var wg sync.WaitGroup resultChan := make(chan result) results := make([]result, *nclients) go func() { for result := range resultChan { results = append(results, result) } }() for i := 0; i < *nclients; i++ { wg.Add(1) go run(ctx, resultChan, &wg) if !*parallel { wg.Wait() } } log.Print("waiting for clients to complete") // wait for all clients wg.Wait() // close the result channel to avoid leaking a goroutine close(resultChan) result := result{} for _, r := range results { result.insertions += r.insertions result.deletions += r.deletions result.transactTime = append(result.transactTime, r.transactTime...) result.cacheTime = append(result.transactTime, r.cacheTime...) } fmt.Printf("\n\n\n") fmt.Printf("Summary:\n") fmt.Printf("\tTotal Insertions: %d\n", result.insertions) fmt.Printf("\tTotal Deletions: %d\n", result.deletions) if *memprofile != "" { f, err := os.Create(*memprofile) if err != nil { log.Fatal(err) } defer f.Close() runtime.GC() if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write memory profile: ", err) } } } golang-github-ovn-org-libovsdb-0.7.0/database/000077500000000000000000000000001464501522100212005ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/database/database.go000066400000000000000000000024071464501522100232760ustar00rootroot00000000000000package database import ( "github.com/google/uuid" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // Database abstracts a database that a server can use to store and transact data type Database interface { CreateDatabase(database string, model ovsdb.DatabaseSchema) error Exists(database string) bool NewTransaction(database string) Transaction Commit(database string, id uuid.UUID, update Update) error CheckIndexes(database string, table string, m model.Model) error List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) Get(database, table string, uuid string) (model.Model, error) GetReferences(database, table, row string) (References, error) } // Transaction abstracts a database transaction that can generate database // updates type Transaction interface { Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update) } // Update abstracts an update that can be committed to a database type Update interface { GetUpdatedTables() []string ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error ForReferenceUpdates(do func(references References) error) error } golang-github-ovn-org-libovsdb-0.7.0/database/doc.go000066400000000000000000000001511464501522100222710ustar00rootroot00000000000000/* Package database collects database related types, interfaces and implementations. */ package database golang-github-ovn-org-libovsdb-0.7.0/database/inmemory/000077500000000000000000000000001464501522100230375ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/database/inmemory/doc.go000066400000000000000000000001251464501522100241310ustar00rootroot00000000000000/* Package inmemory provides a in-memory database implementation */ package inmemory golang-github-ovn-org-libovsdb-0.7.0/database/inmemory/inmemory.go000066400000000000000000000075361464501522100252400ustar00rootroot00000000000000package inmemory import ( "fmt" "log" "os" "sync" "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" dbase "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/database/transaction" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) type inMemoryDatabase struct { databases map[string]*cache.TableCache models map[string]model.ClientDBModel references map[string]dbase.References logger *logr.Logger mutex sync.RWMutex } func NewDatabase(models map[string]model.ClientDBModel) dbase.Database { logger := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("database") return &inMemoryDatabase{ databases: make(map[string]*cache.TableCache), models: models, references: make(map[string]dbase.References), mutex: sync.RWMutex{}, logger: &logger, } } func (db *inMemoryDatabase) NewTransaction(dbName string) dbase.Transaction { db.mutex.Lock() defer db.mutex.Unlock() var model model.DatabaseModel if database, ok := db.databases[dbName]; ok { model = database.DatabaseModel() } transaction := transaction.NewTransaction(model, dbName, db, db.logger) return &transaction } func (db *inMemoryDatabase) CreateDatabase(name string, schema ovsdb.DatabaseSchema) error { db.mutex.Lock() defer db.mutex.Unlock() var mo model.ClientDBModel var ok bool if mo, ok = db.models[schema.Name]; !ok { return fmt.Errorf("no db model provided for schema with name %s", name) } dbModel, errs := model.NewDatabaseModel(schema, mo) if len(errs) > 0 { return fmt.Errorf("failed to create DatabaseModel: %#+v", errs) } database, err := cache.NewTableCache(dbModel, nil, nil) if err != nil { return err } db.databases[name] = database db.references[name] = make(dbase.References) return nil } func (db *inMemoryDatabase) Exists(name string) bool { db.mutex.RLock() defer db.mutex.RUnlock() _, ok := db.databases[name] return ok } func (db *inMemoryDatabase) Commit(database string, id uuid.UUID, update dbase.Update) error { if !db.Exists(database) { return fmt.Errorf("db does not exist") } db.mutex.RLock() targetDb := db.databases[database] db.mutex.RUnlock() err := targetDb.ApplyCacheUpdate(update) if err != nil { return err } return update.ForReferenceUpdates(func(references dbase.References) error { db.references[database].UpdateReferences(references) return nil }) } func (db *inMemoryDatabase) CheckIndexes(database string, table string, m model.Model) error { if !db.Exists(database) { return nil } db.mutex.RLock() targetDb := db.databases[database] db.mutex.RUnlock() targetTable := targetDb.Table(table) return targetTable.IndexExists(m) } func (db *inMemoryDatabase) List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) { if !db.Exists(database) { return nil, fmt.Errorf("db does not exist") } db.mutex.RLock() targetDb := db.databases[database] db.mutex.RUnlock() targetTable := targetDb.Table(table) if targetTable == nil { return nil, fmt.Errorf("table does not exist") } return targetTable.RowsByCondition(conditions) } func (db *inMemoryDatabase) Get(database, table string, uuid string) (model.Model, error) { if !db.Exists(database) { return nil, fmt.Errorf("db does not exist") } db.mutex.RLock() targetDb := db.databases[database] db.mutex.RUnlock() targetTable := targetDb.Table(table) if targetTable == nil { return nil, fmt.Errorf("table does not exist") } return targetTable.Row(uuid), nil } func (db *inMemoryDatabase) GetReferences(database, table, row string) (dbase.References, error) { if !db.Exists(database) { return nil, fmt.Errorf("db does not exist") } db.mutex.RLock() defer db.mutex.RUnlock() return db.references[database].GetReferences(table, row), nil } golang-github-ovn-org-libovsdb-0.7.0/database/inmemory/inmemory_test.go000066400000000000000000000710611464501522100262710ustar00rootroot00000000000000package inmemory import ( "testing" "time" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" . "github.com/ovn-org/libovsdb/test" ) func TestWaitOpEquals(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) ovsUUID := uuid.NewString() bridgeUUID := uuid.NewString() ovs := OvsType{} info, err := dbModel.NewModelInfo(&ovs) require.NoError(t, err) ovsRow, err := m.NewRow(info) require.Nil(t, err) bridge := BridgeType{ Name: "foo", ExternalIds: map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }, } bridgeInfo, err := dbModel.NewModelInfo(&bridge) require.NoError(t, err) bridgeRow, err := m.NewRow(bridgeInfo) require.Nil(t, err) transaction := db.NewTransaction("Open_vSwitch") operations := []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Open_vSwitch", UUIDName: ovsUUID, Row: ovsRow, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUIDName: bridgeUUID, Row: bridgeRow, }, } res, updates := transaction.Transact(operations...) _, err = checkOperationResults(res, operations...) require.NoError(t, err) err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) timeout := 0 // Attempt to wait for row with name foo to appear operation := ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name"}, Until: "==", Rows: []ovsdb.Row{{"name": "foo"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Attempt to wait for 2 rows, where one does not exist operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name"}, Until: "==", Rows: []ovsdb.Row{{"name": "foo"}, {"name": "blah"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.Error(t, err) extIDs, err := ovsdb.NewOvsMap(map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }) require.Nil(t, err) // Attempt to wait for a row, with multiple columns specified operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name", "external_ids"}, Until: "==", Rows: []ovsdb.Row{{"name": "foo", "external_ids": extIDs}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Attempt to wait for a row, with multiple columns, but not specified in row filtering operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name", "external_ids"}, Until: "==", Rows: []ovsdb.Row{{"name": "foo"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Attempt to get something with a non-zero timeout that will fail timeout = 400 operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name", "external_ids"}, Until: "==", Rows: []ovsdb.Row{{"name": "doesNotExist"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.Error(t, err) } func TestWaitOpNotEquals(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) ovsUUID := uuid.NewString() bridgeUUID := uuid.NewString() ovs := OvsType{} info, err := dbModel.NewModelInfo(&ovs) require.NoError(t, err) ovsRow, err := m.NewRow(info) require.Nil(t, err) bridge := BridgeType{ Name: "foo", ExternalIds: map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }, } bridgeInfo, err := dbModel.NewModelInfo(&bridge) require.NoError(t, err) bridgeRow, err := m.NewRow(bridgeInfo) require.Nil(t, err) transaction := db.NewTransaction("Open_vSwitch") operations := []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Open_vSwitch", UUIDName: ovsUUID, Row: ovsRow, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUIDName: bridgeUUID, Row: bridgeRow, }, } res, updates := transaction.Transact(operations...) _, err = checkOperationResults(res, operations...) require.NoError(t, err) err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) timeout := 0 // Attempt a wait where no entry with name blah should exist operation := ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name"}, Until: "!=", Rows: []ovsdb.Row{{"name": "blah"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Attempt another wait with multiple rows specified, one that would match, and one that doesn't operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name"}, Until: "!=", Rows: []ovsdb.Row{{"name": "blah"}, {"name": "foo"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Attempt another wait where name would match, but ext ids would not match NoMatchExtIDs, err := ovsdb.NewOvsMap(map[string]string{ "foo": "bar", "baz": "quux", "waldo": "is_different", }) require.NoError(t, err) // Attempt to wait for a row, with multiple columns specified and one is not a match operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name", "external_ids"}, Until: "!=", Rows: []ovsdb.Row{{"name": "foo", "external_ids": NoMatchExtIDs}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) // Check to see if a non match takes around the timeout start := time.Now() timeout = 200 operation = ovsdb.Operation{ Op: ovsdb.OperationWait, Table: "Bridge", Timeout: &timeout, Where: []ovsdb.Condition{ovsdb.NewCondition("name", ovsdb.ConditionEqual, "foo")}, Columns: []string{"name"}, Until: "!=", Rows: []ovsdb.Row{{"name": "foo"}}, } res, _ = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.Error(t, err) ts := time.Since(start) if ts < time.Duration(timeout)*time.Millisecond { t.Fatalf("Should have taken at least %d milliseconds to return, but it took %d instead", timeout, ts) } require.NotNil(t, err) } func TestMutateOp(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) bridgeUUID := uuid.NewString() ovs := OvsType{} info, err := dbModel.NewModelInfo(&ovs) require.NoError(t, err) ovsRow, err := m.NewRow(info) require.Nil(t, err) bridge := BridgeType{ Name: "foo", ExternalIds: map[string]string{ "foo": "bar", "baz": "quux", "waldo": "fred", }, } bridgeInfo, err := dbModel.NewModelInfo(&bridge) require.NoError(t, err) bridgeRow, err := m.NewRow(bridgeInfo) require.Nil(t, err) transaction := db.NewTransaction("Open_vSwitch") operations := []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Open_vSwitch", Row: ovsRow, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: bridgeRow, }, } res, updates := transaction.Transact(operations...) _, err = checkOperationResults(res, operations...) require.NoError(t, err) err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) ovsUUID := res[0].UUID.GoUUID operation := ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: "Open_vSwitch", Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: ovsUUID})}, Mutations: []ovsdb.Mutation{*ovsdb.NewMutation("bridges", ovsdb.MutateOperationInsert, ovsdb.UUID{GoUUID: bridgeUUID})}, } res, updates = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) assert.Equal(t, []*ovsdb.OperationResult{{Count: 1}}, res) err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) bridgeSet, err := ovsdb.NewOvsSet([]ovsdb.UUID{{GoUUID: bridgeUUID}}) assert.Nil(t, err) assert.Equal(t, ovsdb.TableUpdates2{ "Open_vSwitch": ovsdb.TableUpdate2{ ovsUUID: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "bridges": bridgeSet, }, Old: &ovsdb.Row{ // TODO: _uuid should be filtered "_uuid": ovsdb.UUID{GoUUID: ovsUUID}, }, New: &ovsdb.Row{ // TODO: _uuid should be filtered "_uuid": ovsdb.UUID{GoUUID: ovsUUID}, "bridges": bridgeSet, }, }, }, }, getTableUpdates(updates)) keyDelete, err := ovsdb.NewOvsSet([]string{"foo"}) assert.Nil(t, err) keyValueDelete, err := ovsdb.NewOvsMap(map[string]string{"baz": "quux"}) assert.Nil(t, err) operation = ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: "Bridge", Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: bridgeUUID})}, Mutations: []ovsdb.Mutation{ *ovsdb.NewMutation("external_ids", ovsdb.MutateOperationDelete, keyDelete), *ovsdb.NewMutation("external_ids", ovsdb.MutateOperationDelete, keyValueDelete), }, } res, updates = transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) assert.Equal(t, []*ovsdb.OperationResult{{Count: 1}}, res) oldExternalIds, _ := ovsdb.NewOvsMap(bridge.ExternalIds) newExternalIds, _ := ovsdb.NewOvsMap(map[string]string{"waldo": "fred"}) diffExternalIds, _ := ovsdb.NewOvsMap(map[string]string{"foo": "bar", "baz": "quux"}) assert.Nil(t, err) gotModify := *getTableUpdates(updates)["Bridge"][bridgeUUID].Modify gotOld := *getTableUpdates(updates)["Bridge"][bridgeUUID].Old gotNew := *getTableUpdates(updates)["Bridge"][bridgeUUID].New assert.Equal(t, diffExternalIds, gotModify["external_ids"]) assert.Equal(t, oldExternalIds, gotOld["external_ids"]) assert.Equal(t, newExternalIds, gotNew["external_ids"]) } func TestOvsdbServerInsert(t *testing.T) { t.Skip("need a helper for comparing rows as map elements aren't in same order") dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) gromit := "gromit" bridge := BridgeType{ Name: "foo", DatapathType: "bar", DatapathID: &gromit, ExternalIds: map[string]string{ "foo": "bar", "baz": "qux", "waldo": "fred", }, } bridgeUUID := uuid.NewString() bridgeInfo, err := dbModel.NewModelInfo(&bridge) require.NoError(t, err) bridgeRow, err := m.NewRow(bridgeInfo) require.Nil(t, err) transaction := db.NewTransaction("Open_vSwitch") operation := ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: bridgeRow, } res, updates := transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) err = db.Commit("Open_vSwitch", uuid.New(), updates) assert.NoError(t, err) bridge.UUID = bridgeUUID br, err := db.Get("Open_vSwitch", "Bridge", bridgeUUID) assert.NoError(t, err) assert.Equal(t, &bridge, br) assert.Equal(t, ovsdb.TableUpdates2{ "Bridge": { bridgeUUID: &ovsdb.RowUpdate2{ Insert: &bridgeRow, New: &bridgeRow, }, }, }, updates) } func TestOvsdbServerUpdate(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) m := mapper.NewMapper(dbModel.Schema) christmas := "christmas" bridge := BridgeType{ Name: "foo", DatapathID: &christmas, ExternalIds: map[string]string{ "foo": "bar", "baz": "qux", "waldo": "fred", }, } bridgeUUID := uuid.NewString() bridgeInfo, err := dbModel.NewModelInfo(&bridge) require.NoError(t, err) bridgeRow, err := m.NewRow(bridgeInfo) require.Nil(t, err) transaction := db.NewTransaction("Open_vSwitch") operation := ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: bridgeRow, } res, updates := transaction.Transact(operation) _, err = checkOperationResults(res, operation) require.NoError(t, err) err = db.Commit("Open_vSwitch", uuid.New(), updates) assert.NoError(t, err) halloween, _ := ovsdb.NewOvsSet([]string{"halloween"}) emptySet, _ := ovsdb.NewOvsSet([]string{}) tests := []struct { name string row ovsdb.Row expected *ovsdb.RowUpdate2 }{ { "update single field", ovsdb.Row{"datapath_type": "waldo"}, &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "waldo", }, }, }, { "update single optional field, with direct value", ovsdb.Row{"datapath_id": "halloween"}, &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": halloween, }, }, }, { "update single optional field, with set", ovsdb.Row{"datapath_id": halloween}, &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": halloween, }, }, }, { "unset single optional field", ovsdb.Row{"datapath_id": emptySet}, &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": emptySet, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { transaction := db.NewTransaction("Open_vSwitch") op := ovsdb.Operation{ Op: ovsdb.OperationUpdate, Table: "Bridge", Where: []ovsdb.Condition{{ Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: bridgeUUID}, }}, Row: tt.row, } res, updates := transaction.Transact(op) errs, err := checkOperationResults(res, op) require.NoErrorf(t, err, "%+v", errs) bridge.UUID = bridgeUUID row, err := db.Get("Open_vSwitch", "Bridge", bridgeUUID) assert.NoError(t, err) br := row.(*BridgeType) assert.NotEqual(t, br, bridgeRow) assert.Equal(t, tt.expected.Modify, getTableUpdates(updates)["Bridge"][bridgeUUID].Modify) }) } } func TestMultipleOps(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) var ops []ovsdb.Operation var op ovsdb.Operation bridgeUUID := uuid.NewString() op = ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": "a_bridge_to_nowhere", }, } ops = append(ops, op) op = ovsdb.Operation{ Op: ovsdb.OperationUpdate, Table: "Bridge", Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: bridgeUUID}), }, Row: ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port1"}, ovsdb.UUID{GoUUID: "port10"}}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key10": "value10"}}, }, } ops = append(ops, op) transaction := db.NewTransaction("Open_vSwitch") results, _ := transaction.Transact(ops...) assert.Len(t, results, len(ops)) assert.NotNil(t, results[0]) assert.Empty(t, results[0].Error) assert.Equal(t, 0, results[0].Count) assert.Equal(t, bridgeUUID, results[0].UUID.GoUUID) assert.NotNil(t, results[1]) assert.Equal(t, 1, results[1].Count) assert.Empty(t, results[1].Error) ops = ops[:0] op = ovsdb.Operation{ Table: "Bridge", Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: bridgeUUID}), }, Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ *ovsdb.NewMutation("external_ids", ovsdb.MutateOperationInsert, ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"keyA": "valueA"}}), *ovsdb.NewMutation("ports", ovsdb.MutateOperationDelete, ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port1"}, ovsdb.UUID{GoUUID: "port10"}}}), }, } ops = append(ops, op) op2 := ovsdb.Operation{ Table: "Bridge", Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: bridgeUUID}), }, Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ *ovsdb.NewMutation("external_ids", ovsdb.MutateOperationDelete, ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key10": "value10"}}), *ovsdb.NewMutation("ports", ovsdb.MutateOperationInsert, ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port1"}}}), }, } ops = append(ops, op2) results, updates := transaction.Transact(ops...) require.Len(t, results, len(ops)) for _, result := range results { assert.Empty(t, result.Error) assert.Equal(t, 1, result.Count) } assert.Equal(t, ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ bridgeUUID: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"keyA": "valueA", "key10": "value10"}}, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port10"}}}, }, Old: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: bridgeUUID}, "name": "a_bridge_to_nowhere", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key10": "value10"}}, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port1"}, ovsdb.UUID{GoUUID: "port10"}}}, }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: bridgeUUID}, "name": "a_bridge_to_nowhere", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "keyA": "valueA"}}, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "port1"}}}, }, }, }, }, getTableUpdates(updates)) } func TestOvsdbServerDbDoesNotExist(t *testing.T) { defDB, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Open_vSwitch": &OvsType{}, "Bridge": &BridgeType{}}) if err != nil { t.Fatal(err) } schema, err := GetSchema() if err != nil { t.Fatal(err) } db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": defDB}) err = db.CreateDatabase("Open_vSwitch", schema) require.NoError(t, err) ops := []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: uuid.NewString(), Row: ovsdb.Row{ "name": "bridge", }, }, { Op: ovsdb.OperationUpdate, Table: "Bridge", Where: []ovsdb.Condition{ { Column: "name", Function: ovsdb.ConditionEqual, Value: "bridge", }, }, Row: ovsdb.Row{ "datapath_type": "type", }, }, } transaction := db.NewTransaction("nonexsitent_db") res, _ := transaction.Transact(ops...) assert.Len(t, res, len(ops)) assert.Equal(t, "database does not exist", res[0].Error) assert.Nil(t, res[1]) } func TestCheckIndexes(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) bridgeUUID := uuid.NewString() fscsUUID := uuid.NewString() fscsUUID2 := uuid.NewString() fscsUUID3 := uuid.NewString() ops := []ovsdb.Operation{ { Table: "Bridge", Op: ovsdb.OperationInsert, UUID: bridgeUUID, Row: ovsdb.Row{ "name": "a_bridge_to_nowhere", }, }, { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID, Row: ovsdb.Row{ "id": 1, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID2, Row: ovsdb.Row{ "id": 2, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, } transaction := db.NewTransaction("Open_vSwitch") results, updates := transaction.Transact(ops...) require.Len(t, results, len(ops)) for _, result := range results { assert.Equal(t, "", result.Error) } err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) tests := []struct { desc string ops func() []ovsdb.Operation expectedErr string }{ { "Inserting an existing database index should fail", func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID3, Row: ovsdb.Row{ "id": 1, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, } }, "constraint violation", }, { "Updating an index to an existing database index should fail", func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "id": 2, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: fscsUUID}), }, }, } }, "constraint violation", }, { "Updating an index to an existing transaction index should fail", func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID3, Row: ovsdb.Row{ "id": 3, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "id": 3, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: fscsUUID}), }, }, } }, "constraint violation", }, { "Updating an index to an old index that is updated in the same transaction should succeed", func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID3, Row: ovsdb.Row{ "id": 1, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "id": 3, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: fscsUUID}), }, }, } }, "", }, { "Updating an index to a old index that is deleted in the same transaction should succeed", func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationInsert, UUID: fscsUUID3, Row: ovsdb.Row{ "id": 1, "bridge": ovsdb.UUID{GoUUID: bridgeUUID}, }, }, { Table: "Flow_Sample_Collector_Set", Op: ovsdb.OperationDelete, Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: fscsUUID}), }, }, } }, "", }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { transaction := db.NewTransaction("Open_vSwitch") ops := tt.ops() results, _ := transaction.Transact(ops...) var err string for _, result := range results { if result.Error != "" { err = result.Error break } } require.Equal(t, tt.expectedErr, err, "got a different error than expected") if tt.expectedErr != "" { require.Len(t, results, len(ops)+1) } else { require.Len(t, results, len(ops)) } }) } } func getTableUpdates(update database.Update) ovsdb.TableUpdates2 { tus := ovsdb.TableUpdates2{} tables := update.GetUpdatedTables() for _, table := range tables { tu := ovsdb.TableUpdate2{} _ = update.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { tu[uuid] = &row return nil }) tus[table] = tu } return tus } func checkOperationResults(result []*ovsdb.OperationResult, ops ...ovsdb.Operation) ([]ovsdb.OperationError, error) { r := make([]ovsdb.OperationResult, len(result)) for i := range result { r[i] = *result[i] } return ovsdb.CheckOperationResults(r, ops) } func TestCheckIndexesWithReferentialIntegrity(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) db := NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) err = db.CreateDatabase("Open_vSwitch", dbModel.Schema) require.NoError(t, err) ovsUUID := uuid.NewString() managerUUID := uuid.NewString() managerUUID2 := uuid.NewString() ops := []ovsdb.Operation{ { Table: "Open_vSwitch", Op: ovsdb.OperationInsert, UUID: ovsUUID, Row: ovsdb.Row{ "manager_options": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: managerUUID}}}, }, }, { Table: "Manager", Op: ovsdb.OperationInsert, UUID: managerUUID, Row: ovsdb.Row{ "target": "target", }, }, } transaction := db.NewTransaction("Open_vSwitch") results, updates := transaction.Transact(ops...) require.Len(t, results, len(ops)) for _, result := range results { assert.Equal(t, "", result.Error) } err = db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) tests := []struct { desc string ops func() []ovsdb.Operation wantUpdates int }{ { // As a row is deleted due to garbage collection, that row's index // should be available for use by a different row desc: "Replacing a strong reference should garbage collect and account for indexes", ops: func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Open_vSwitch", Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "manager_options": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: managerUUID2}}}, }, Where: []ovsdb.Condition{ ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: ovsUUID}), }, }, { Table: "Manager", Op: ovsdb.OperationInsert, UUID: managerUUID2, Row: ovsdb.Row{ "target": "target", }, }, } }, // the update and insert above plus the delete from the garbage // collection wantUpdates: 3, }, { desc: "A row that is not root and not strongly referenced should not cause index collisions", ops: func() []ovsdb.Operation { return []ovsdb.Operation{ { Table: "Manager", Op: ovsdb.OperationInsert, UUID: managerUUID2, Row: ovsdb.Row{ "target": "target", }, }, } }, // no updates as the row is not strongly referenced wantUpdates: 0, }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { transaction := db.NewTransaction("Open_vSwitch") ops := tt.ops() results, update := transaction.Transact(ops...) var err string for _, result := range results { if result.Error != "" { err = result.Error break } } require.Empty(t, err, "got an unexpected error") tables := update.GetUpdatedTables() var gotUpdates int for _, table := range tables { _ = update.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { gotUpdates++ return nil }) } assert.Equal(t, tt.wantUpdates, gotUpdates, "got a different number of updates than expected") }) } } golang-github-ovn-org-libovsdb-0.7.0/database/references.go000066400000000000000000000037671464501522100236650ustar00rootroot00000000000000package database // References tracks the references to rows from other rows at specific // locations in the schema. type References map[ReferenceSpec]Reference // ReferenceSpec specifies details about where in the schema a reference occurs. type ReferenceSpec struct { // ToTable is the table of the row to which the reference is made ToTable string // FromTable is the table of the row from which the reference is made FromTable string // FromColumn is the column of the row from which the reference is made FromColumn string // FromValue flags if the reference is made on a map key or map value when // the column is a map FromValue bool } // Reference maps the UUIDs of rows to which the reference is made to the // rows it is made from type Reference map[string][]string // GetReferences gets references to a row func (rs References) GetReferences(table, uuid string) References { refs := References{} for spec, values := range rs { if spec.ToTable != table { continue } if _, ok := values[uuid]; ok { refs[spec] = Reference{uuid: values[uuid]} } } return refs } // UpdateReferences updates the references with the provided ones. Dangling // references, that is, the references of rows that are no longer referenced // from anywhere, are cleaned up. func (rs References) UpdateReferences(other References) { for spec, otherRefs := range other { for to, from := range otherRefs { rs.updateReference(spec, to, from) } } } // updateReference updates the references to a row at a specific location in the // schema func (rs References) updateReference(spec ReferenceSpec, to string, from []string) { thisRefs, ok := rs[spec] if !ok && len(from) > 0 { // add references from a previously untracked location rs[spec] = Reference{to: from} return } if len(from) > 0 { // replace references to this row at this specific location thisRefs[to] = from return } // otherwise remove previously tracked references delete(thisRefs, to) if len(thisRefs) == 0 { delete(rs, spec) } } golang-github-ovn-org-libovsdb-0.7.0/database/transaction/000077500000000000000000000000001464501522100235255ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/database/transaction/doc.go000066400000000000000000000001241464501522100246160ustar00rootroot00000000000000/* Package transaction provides a transaction implementation */ package transaction golang-github-ovn-org-libovsdb-0.7.0/database/transaction/errors.go000066400000000000000000000007531464501522100253750ustar00rootroot00000000000000package transaction import ( "fmt" "github.com/ovn-org/libovsdb/cache" ) func newIndexExistsDetails(err cache.ErrIndexExists) string { return fmt.Sprintf("operation would cause rows in the \"%s\" table to have identical values (%v) for index on column \"%s\". First row, with UUID %s, was inserted by this transaction. Second row, with UUID %s, existed in the database before this operation and was not modified", err.Table, err.Value, err.Index, err.New, err.Existing, ) } golang-github-ovn-org-libovsdb-0.7.0/database/transaction/transaction.go000066400000000000000000000327721464501522100264140ustar00rootroot00000000000000package transaction import ( "fmt" "reflect" "time" "github.com/go-logr/logr" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/updates" ) type Transaction struct { ID uuid.UUID Cache *cache.TableCache DeletedRows map[string]struct{} Model model.DatabaseModel DbName string Database database.Database logger *logr.Logger } func NewTransaction(model model.DatabaseModel, dbName string, database database.Database, logger *logr.Logger) Transaction { if logger != nil { l := logger.WithName("transaction") logger = &l } return Transaction{ ID: uuid.New(), DeletedRows: make(map[string]struct{}), Model: model, DbName: dbName, Database: database, logger: logger, } } func (t *Transaction) Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, database.Update) { results := make([]*ovsdb.OperationResult, len(operations), len(operations)+1) update := updates.ModelUpdates{} if !t.Database.Exists(t.DbName) { r := ovsdb.ResultFromError(fmt.Errorf("database does not exist")) results[0] = &r return results, updates.NewDatabaseUpdate(update, nil) } err := t.initializeCache() if err != nil { r := ovsdb.ResultFromError(err) results[0] = &r return results, updates.NewDatabaseUpdate(update, nil) } // Every Insert operation must have a UUID for i := range operations { op := &operations[i] if op.Op == ovsdb.OperationInsert && op.UUID == "" { op.UUID = uuid.NewString() } } // Ensure Named UUIDs are expanded in all operations operations, err = ovsdb.ExpandNamedUUIDs(operations, &t.Model.Schema) if err != nil { r := ovsdb.ResultFromError(err) results[0] = &r return results, updates.NewDatabaseUpdate(update, nil) } var r ovsdb.OperationResult for i, op := range operations { var u *updates.ModelUpdates switch op.Op { case ovsdb.OperationInsert: r, u = t.Insert(&op) case ovsdb.OperationSelect: r = t.Select(op.Table, op.Where, op.Columns) case ovsdb.OperationUpdate: r, u = t.Update(&op) case ovsdb.OperationMutate: r, u = t.Mutate(&op) case ovsdb.OperationDelete: r, u = t.Delete(&op) case ovsdb.OperationWait: r = t.Wait(op.Table, op.Timeout, op.Where, op.Columns, op.Until, op.Rows) case ovsdb.OperationCommit: durable := op.Durable r = t.Commit(*durable) case ovsdb.OperationAbort: r = t.Abort() case ovsdb.OperationComment: r = t.Comment(*op.Comment) case ovsdb.OperationAssert: r = t.Assert(*op.Lock) default: r = ovsdb.ResultFromError(&ovsdb.NotSupported{}) } if r.Error == "" && u != nil { err := update.Merge(t.Model, *u) if err != nil { r = ovsdb.ResultFromError(err) } if err := t.Cache.ApplyCacheUpdate(*u); err != nil { r = ovsdb.ResultFromError(err) } u = nil } result := r results[i] = &result // if an operation failed, no need to process any further operation if r.Error != "" { break } } // if an operation failed, no need to do any further validation if r.Error != "" { return results, updates.NewDatabaseUpdate(update, nil) } // if there is no updates, no need to do any further validation if len(update.GetUpdatedTables()) == 0 { return results, updates.NewDatabaseUpdate(update, nil) } // check & update references update, refUpdates, refs, err := updates.ProcessReferences(t.Model, t.Database, update) if err != nil { r = ovsdb.ResultFromError(err) results = append(results, &r) return results, updates.NewDatabaseUpdate(update, refs) } // apply updates resulting from referential integrity to the transaction // caches so they are accounted for when checking index constraints err = t.applyReferenceUpdates(refUpdates) if err != nil { r = ovsdb.ResultFromError(err) results = append(results, &r) return results, updates.NewDatabaseUpdate(update, refs) } // check index constraints if err := t.checkIndexes(); err != nil { if indexExists, ok := err.(*cache.ErrIndexExists); ok { err = ovsdb.NewConstraintViolation(newIndexExistsDetails(*indexExists)) r := ovsdb.ResultFromError(err) results = append(results, &r) } else { r := ovsdb.ResultFromError(err) results = append(results, &r) } return results, updates.NewDatabaseUpdate(update, refs) } return results, updates.NewDatabaseUpdate(update, refs) } func (t *Transaction) applyReferenceUpdates(update updates.ModelUpdates) error { tables := update.GetUpdatedTables() for _, table := range tables { err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error { // track deleted rows due to reference updates if old != nil && new == nil { t.DeletedRows[uuid] = struct{}{} } // warm the cache with updated and deleted rows due to reference // updates if old != nil && !t.Cache.Table(table).HasRow(uuid) { row, err := t.Database.Get(t.DbName, table, uuid) if err != nil { return err } err = t.Cache.Table(table).Create(uuid, row, false) if err != nil { return err } } return nil }) if err != nil { return err } } // apply reference updates to the cache return t.Cache.ApplyCacheUpdate(update) } func (t *Transaction) initializeCache() error { if t.Cache != nil { return nil } var err error t.Cache, err = cache.NewTableCache(t.Model, nil, t.logger) return err } func (t *Transaction) rowsFromTransactionCacheAndDatabase(table string, where []ovsdb.Condition) (map[string]model.Model, error) { err := t.initializeCache() if err != nil { return nil, err } txnRows, err := t.Cache.Table(table).RowsByCondition(where) if err != nil { return nil, fmt.Errorf("failed getting rows for table %s from transaction cache: %v", table, err) } rows, err := t.Database.List(t.DbName, table, where...) if err != nil { return nil, fmt.Errorf("failed getting rows for table %s from database: %v", table, err) } // prefer rows from transaction cache while copying into cache // rows that are in the db. for rowUUID, row := range rows { if txnRow, found := txnRows[rowUUID]; found { rows[rowUUID] = txnRow // delete txnRows so that only inserted rows remain in txnRows delete(txnRows, rowUUID) } else { // warm the transaction cache with the current contents of the row if err := t.Cache.Table(table).Create(rowUUID, row, false); err != nil { return nil, fmt.Errorf("failed warming transaction cache row %s %v for table %s: %v", rowUUID, row, table, err) } } } // add rows that have been inserted in this transaction for rowUUID, row := range txnRows { rows[rowUUID] = row } // exclude deleted rows for rowUUID := range t.DeletedRows { delete(rows, rowUUID) } return rows, nil } // checkIndexes checks that there are no index conflicts: // - no duplicate indexes among any two rows operated with in the transaction // - no duplicate indexes of any transaction row with any database row func (t *Transaction) checkIndexes() error { // check for index conflicts. tables := t.Cache.Tables() for _, table := range tables { tc := t.Cache.Table(table) for _, row := range tc.RowsShallow() { err := tc.IndexExists(row) if err != nil { return err } err = t.Database.CheckIndexes(t.DbName, table, row) errIndexExists, isErrIndexExists := err.(*cache.ErrIndexExists) if err == nil { continue } if !isErrIndexExists { return err } for _, existing := range errIndexExists.Existing { if _, isDeleted := t.DeletedRows[existing]; isDeleted { // this model is deleted in the transaction, ignore it continue } if tc.HasRow(existing) { // this model is updated in the transaction and was not // detected as a duplicate, so an index must have been // updated, ignore it continue } return err } } } return nil } func (t *Transaction) Insert(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) { if err := ovsdb.ValidateUUID(op.UUID); err != nil { return ovsdb.ResultFromError(err), nil } update := updates.ModelUpdates{} err := update.AddOperation(t.Model, op.Table, op.UUID, nil, op) if err != nil { return ovsdb.ResultFromError(err), nil } result := ovsdb.OperationResult{ UUID: ovsdb.UUID{GoUUID: op.UUID}, } return result, &update } func (t *Transaction) Select(table string, where []ovsdb.Condition, columns []string) ovsdb.OperationResult { var results []ovsdb.Row dbModel := t.Model rows, err := t.rowsFromTransactionCacheAndDatabase(table, where) if err != nil { return ovsdb.ResultFromError(err) } m := dbModel.Mapper for _, row := range rows { info, err := dbModel.NewModelInfo(row) if err != nil { return ovsdb.ResultFromError(err) } resultRow, err := m.NewRow(info) if err != nil { return ovsdb.ResultFromError(err) } results = append(results, resultRow) } return ovsdb.OperationResult{ Rows: results, } } func (t *Transaction) Update(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) { rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where) if err != nil { return ovsdb.ResultFromError(err), nil } update := updates.ModelUpdates{} for uuid, old := range rows { err := update.AddOperation(t.Model, op.Table, uuid, old, op) if err != nil { return ovsdb.ResultFromError(err), nil } } // FIXME: We need to filter the returned columns return ovsdb.OperationResult{Count: len(rows)}, &update } func (t *Transaction) Mutate(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) { rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where) if err != nil { return ovsdb.ResultFromError(err), nil } update := updates.ModelUpdates{} for uuid, old := range rows { err := update.AddOperation(t.Model, op.Table, uuid, old, op) if err != nil { return ovsdb.ResultFromError(err), nil } } return ovsdb.OperationResult{Count: len(rows)}, &update } func (t *Transaction) Delete(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) { rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where) if err != nil { return ovsdb.ResultFromError(err), nil } update := updates.ModelUpdates{} for uuid, row := range rows { err := update.AddOperation(t.Model, op.Table, uuid, row, op) if err != nil { return ovsdb.ResultFromError(err), nil } // track delete operation in transaction to complement cache t.DeletedRows[uuid] = struct{}{} } return ovsdb.OperationResult{Count: len(rows)}, &update } func (t *Transaction) Wait(table string, timeout *int, where []ovsdb.Condition, columns []string, until string, rows []ovsdb.Row) ovsdb.OperationResult { start := time.Now() if until != "!=" && until != "==" { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } dbModel := t.Model realTable := dbModel.Schema.Table(table) if realTable == nil { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } model, err := dbModel.NewModel(table) if err != nil { return ovsdb.ResultFromError(err) } Loop: for { var filteredRows []ovsdb.Row foundRowModels, err := t.rowsFromTransactionCacheAndDatabase(table, where) if err != nil { return ovsdb.ResultFromError(err) } m := dbModel.Mapper for _, rowModel := range foundRowModels { info, err := dbModel.NewModelInfo(rowModel) if err != nil { return ovsdb.ResultFromError(err) } foundMatch := true for _, column := range columns { columnSchema := info.Metadata.TableSchema.Column(column) for _, r := range rows { i, err := dbModel.NewModelInfo(model) if err != nil { return ovsdb.ResultFromError(err) } err = dbModel.Mapper.GetRowData(&r, i) if err != nil { return ovsdb.ResultFromError(err) } x, err := i.FieldByColumn(column) if err != nil { return ovsdb.ResultFromError(err) } // check to see if field value is default for given rows // if it is default (not provided) we shouldn't try to compare // for equality if ovsdb.IsDefaultValue(columnSchema, x) { continue } y, err := info.FieldByColumn(column) if err != nil { return ovsdb.ResultFromError(err) } if !reflect.DeepEqual(x, y) { foundMatch = false } } } if foundMatch { resultRow, err := m.NewRow(info) if err != nil { return ovsdb.ResultFromError(err) } filteredRows = append(filteredRows, resultRow) } } if until == "==" && len(filteredRows) == len(rows) { return ovsdb.OperationResult{} } else if until == "!=" && len(filteredRows) != len(rows) { return ovsdb.OperationResult{} } if timeout != nil { // TODO(trozet): this really shouldn't just break and loop on a time interval // Really this client handler should pause, wait for another handler to update the DB // and then try again. However the server is single threaded for now and not capable of // doing something like that. if time.Since(start) > time.Duration(*timeout)*time.Millisecond { break Loop } } time.Sleep(200 * time.Millisecond) } return ovsdb.ResultFromError(&ovsdb.TimedOut{}) } func (t *Transaction) Commit(durable bool) ovsdb.OperationResult { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } func (t *Transaction) Abort() ovsdb.OperationResult { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } func (t *Transaction) Comment(comment string) ovsdb.OperationResult { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } func (t *Transaction) Assert(lock string) ovsdb.OperationResult { return ovsdb.ResultFromError(&ovsdb.NotSupported{}) } golang-github-ovn-org-libovsdb-0.7.0/example/000077500000000000000000000000001464501522100210675ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/example/ovsdb-server/000077500000000000000000000000001464501522100235105ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/example/ovsdb-server/main.go000066400000000000000000000050151464501522100247640ustar00rootroot00000000000000package main import ( "context" "flag" "fmt" "log" "os" "os/signal" "path/filepath" "runtime" "runtime/pprof" "time" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/database/inmemory" "github.com/ovn-org/libovsdb/example/vswitchd" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/server" ) var ( cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") memprofile = flag.String("memoryprofile", "", "write memory profile to this file") port = flag.Int("port", 56640, "tcp port to listen on") ) func main() { flag.Parse() var err error if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } if err := pprof.StartCPUProfile(f); err != nil { log.Fatal(err) } defer pprof.StopCPUProfile() } clientDBModel, err := vswitchd.FullDatabaseModel() if err != nil { log.Fatal(err) } wd, err := os.Getwd() if err != nil { log.Fatal(err) } path := filepath.Join(wd, "vswitchd", "ovs.ovsschema") f, err := os.Open(path) if err != nil { log.Fatal(err) } schema, err := ovsdb.SchemaFromFile(f) if err != nil { log.Fatal(err) } ovsDB := inmemory.NewDatabase(map[string]model.ClientDBModel{ schema.Name: clientDBModel, }) dbModel, errs := model.NewDatabaseModel(schema, clientDBModel) if len(errs) > 0 { log.Fatal(errs) } s, err := server.NewOvsdbServer(ovsDB, dbModel) if err != nil { log.Fatal(err) } defer s.Close() sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) go func(o *server.OvsdbServer) { if err := o.Serve("tcp", fmt.Sprintf(":%d", *port)); err != nil { log.Fatal(err) } }(s) time.Sleep(1 * time.Second) c, err := client.NewOVSDBClient(clientDBModel, client.WithEndpoint(fmt.Sprintf("tcp::%d", *port))) if err != nil { log.Fatal(err) } err = c.Connect(context.Background()) if err != nil { log.Fatal(err) } ovsRow := &vswitchd.OpenvSwitch{ UUID: "ovs", } ovsOps, err := c.Create(ovsRow) if err != nil { log.Fatal(err) } reply, err := c.Transact(context.Background(), ovsOps...) if err != nil { log.Fatal(err) } _, err = ovsdb.CheckOperationResults(reply, ovsOps) if err != nil { log.Fatal(err) } c.Close() log.Printf("listening on tcp::%d", *port) <-sig if *memprofile != "" { f, err := os.Create(*memprofile) if err != nil { log.Fatal(err) } defer f.Close() runtime.GC() if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write memory profile: ", err) } } } golang-github-ovn-org-libovsdb-0.7.0/example/play_with_ovs/000077500000000000000000000000001464501522100237565ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/example/play_with_ovs/main.go000066400000000000000000000063041464501522100252340ustar00rootroot00000000000000package main import ( "context" "flag" "fmt" "log" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/example/vswitchd" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // Silly game that detects creation of Bridge named "stop" and exits // Just a demonstration of how an app can use libovsdb library to configure and manage OVS const ( bridgeTable = "Bridge" ovsTable = "Open_vSwitch" ) var quit chan bool var update chan model.Model var rootUUID string var connection = flag.String("ovsdb", "unix:/var/run/openvswitch/db.sock", "OVSDB connection string") func play(ovs client.Client) { go processInput(ovs) for model := range update { bridge := model.(*vswitchd.Bridge) if bridge.Name == "stop" { fmt.Printf("Bridge stop detected: %+v\n", *bridge) ovs.Disconnect() quit <- true } else { fmt.Printf("Current list of bridges:\n") var bridges []vswitchd.Bridge if err := ovs.List(context.Background(), &bridges); err != nil { log.Fatal(err) } for _, b := range bridges { fmt.Printf("UUID: %s Name: %s\n", b.UUID, b.Name) } } } } func createBridge(ovs client.Client, bridgeName string) { bridge := vswitchd.Bridge{ UUID: "gopher", Name: bridgeName, } insertOp, err := ovs.Create(&bridge) if err != nil { log.Fatal(err) } ovsRow := vswitchd.OpenvSwitch{ UUID: rootUUID, } mutateOps, err := ovs.Where(&ovsRow).Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: "insert", Value: []string{bridge.UUID}, }) if err != nil { log.Fatal(err) } operations := append(insertOp, mutateOps...) reply, err := ovs.Transact(context.TODO(), operations...) if err != nil { log.Fatal(err) } if _, err := ovsdb.CheckOperationResults(reply, operations); err != nil { log.Fatal(err) } fmt.Println("Bridge Addition Successful : ", reply[0].UUID.GoUUID) } func processInput(ovs client.Client) { for { fmt.Printf("\n Enter a Bridge Name : ") var bridgeName string fmt.Scanf("%s", &bridgeName) if bridgeName == "" { continue } createBridge(ovs, bridgeName) } } func main() { flag.Parse() quit = make(chan bool) update = make(chan model.Model) clientDBModel, err := model.NewClientDBModel("Open_vSwitch", map[string]model.Model{bridgeTable: &vswitchd.Bridge{}, ovsTable: &vswitchd.OpenvSwitch{}}) if err != nil { log.Fatal("Unable to create DB model ", err) } ovs, err := client.NewOVSDBClient(clientDBModel, client.WithEndpoint(*connection)) if err != nil { log.Fatal(err) } err = ovs.Connect(context.Background()) if err != nil { log.Fatal(err) } defer ovs.Disconnect() ovs.Cache().AddEventHandler(&cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { if table == bridgeTable { update <- model } }, }) _, err = ovs.Monitor( context.TODO(), ovs.NewMonitor( client.WithTable(&vswitchd.OpenvSwitch{}), client.WithTable(&vswitchd.Bridge{}), ), ) if err != nil { log.Fatal(err) } // Get root UUID for uuid := range ovs.Cache().Table("Open_vSwitch").Rows() { rootUUID = uuid } fmt.Println(`Silly game of stopping this app when a Bridge with name "stop" is monitored !`) go play(ovs) <-quit } golang-github-ovn-org-libovsdb-0.7.0/example/vswitchd/000077500000000000000000000000001464501522100227225ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/example/vswitchd/gen.go000066400000000000000000000001351464501522100240210ustar00rootroot00000000000000package vswitchd //go:generate ../../bin/modelgen --extended -p vswitchd -o . ovs.ovsschema golang-github-ovn-org-libovsdb-0.7.0/go.mod000066400000000000000000000046371464501522100205540ustar00rootroot00000000000000module github.com/ovn-org/libovsdb go 1.18 require ( github.com/cenkalti/backoff/v4 v4.1.3 github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 github.com/go-logr/logr v1.2.2 github.com/go-logr/stdr v1.2.2 github.com/google/uuid v1.2.0 github.com/ory/dockertest/v3 v3.9.1 github.com/prometheus/client_golang v1.12.1 github.com/stretchr/testify v1.8.0 golang.org/x/text v0.14.0 ) require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenk/hub v1.0.1 // indirect github.com/cenkalti/hub v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/cli v20.10.14+incompatible // indirect github.com/docker/docker v24.0.9+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/kr/pretty v0.2.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.12 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) golang-github-ovn-org-libovsdb-0.7.0/go.sum000066400000000000000000001541021464501522100205720ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/cli v20.10.14+incompatible h1:dSBKJOVesDgHo7rbxlYjYsXe7gPzrTT+/cKQgpDAazg= github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 h1:hRGSmZu7j271trc9sneMrpOW7GN5ngLm8YUZIPzf394= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= golang-github-ovn-org-libovsdb-0.7.0/mapper/000077500000000000000000000000001464501522100207205ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/mapper/info.go000066400000000000000000000124311464501522100222030ustar00rootroot00000000000000package mapper import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/ovsdb" ) // ErrColumnNotFound is an error that can occur when the column does not exist for a table type ErrColumnNotFound struct { column string table string } // Error implements the error interface func (e *ErrColumnNotFound) Error() string { return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table) } func NewErrColumnNotFound(column, table string) *ErrColumnNotFound { return &ErrColumnNotFound{ column: column, table: table, } } // Info is a struct that wraps an object with its metadata type Info struct { // FieldName indexed by column Obj interface{} Metadata Metadata } // Metadata represents the information needed to know how to map OVSDB columns into an objetss fields type Metadata struct { Fields map[string]string // Map of ColumnName -> FieldName TableSchema *ovsdb.TableSchema // TableSchema associated TableName string // Table name } // FieldByColumn returns the field value that corresponds to a column func (i *Info) FieldByColumn(column string) (interface{}, error) { fieldName, ok := i.Metadata.Fields[column] if !ok { return nil, NewErrColumnNotFound(column, i.Metadata.TableName) } return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil } // FieldByColumn returns the field value that corresponds to a column func (i *Info) hasColumn(column string) bool { _, ok := i.Metadata.Fields[column] return ok } // SetField sets the field in the column to the specified value func (i *Info) SetField(column string, value interface{}) error { fieldName, ok := i.Metadata.Fields[column] if !ok { return fmt.Errorf("SetField: column %s not found in orm info", column) } fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName) if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) { return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)", column, value, reflect.TypeOf(value), fieldName, fieldValue.Type()) } fieldValue.Set(reflect.ValueOf(value)) return nil } // ColumnByPtr returns the column name that corresponds to the field by the field's pointer func (i *Info) ColumnByPtr(fieldPtr interface{}) (string, error) { fieldPtrVal := reflect.ValueOf(fieldPtr) if fieldPtrVal.Kind() != reflect.Ptr { return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr) } offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer() objType := reflect.TypeOf(i.Obj).Elem() for j := 0; j < objType.NumField(); j++ { if objType.Field(j).Offset == offset { column := objType.Field(j).Tag.Get("ovsdb") if _, ok := i.Metadata.Fields[column]; !ok { return "", fmt.Errorf("field does not have orm column information") } return column, nil } } return "", fmt.Errorf("field pointer does not correspond to orm struct") } // getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch // the object has non-default values func (i *Info) getValidIndexes() ([][]string, error) { var validIndexes [][]string var possibleIndexes [][]string possibleIndexes = append(possibleIndexes, []string{"_uuid"}) possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...) // Iterate through indexes and validate them OUTER: for _, idx := range possibleIndexes { for _, col := range idx { if !i.hasColumn(col) { continue OUTER } columnSchema := i.Metadata.TableSchema.Column(col) if columnSchema == nil { continue OUTER } field, err := i.FieldByColumn(col) if err != nil { return nil, err } if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) { continue OUTER } } validIndexes = append(validIndexes, idx) } return validIndexes, nil } // NewInfo creates a MapperInfo structure around an object based on a given table schema func NewInfo(tableName string, table *ovsdb.TableSchema, obj interface{}) (*Info, error) { objPtrVal := reflect.ValueOf(obj) if objPtrVal.Type().Kind() != reflect.Ptr { return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) } objVal := reflect.Indirect(objPtrVal) if objVal.Kind() != reflect.Struct { return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) } objType := objVal.Type() fields := make(map[string]string, objType.NumField()) for i := 0; i < objType.NumField(); i++ { field := objType.Field(i) colName := field.Tag.Get("ovsdb") if colName == "" { // Untagged fields are ignored continue } column := table.Column(colName) if column == nil { return nil, &ErrMapper{ objType: objType.String(), field: field.Name, fieldType: field.Type.String(), fieldTag: colName, reason: "Column does not exist in schema", } } // Perform schema-based type checking expType := ovsdb.NativeType(column) if expType != field.Type { return nil, &ErrMapper{ objType: objType.String(), field: field.Name, fieldType: field.Type.String(), fieldTag: colName, reason: fmt.Sprintf("Wrong type, column expects %s", expType), } } fields[colName] = field.Name } return &Info{ Obj: obj, Metadata: Metadata{ Fields: fields, TableSchema: table, TableName: tableName, }, }, nil } golang-github-ovn-org-libovsdb-0.7.0/mapper/info_test.go000066400000000000000000000163251464501522100232500ustar00rootroot00000000000000package mapper import ( "encoding/json" "fmt" "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) var sampleTable = []byte(`{ "columns": { "aString": { "type": "string" }, "aInteger": { "type": "integer" }, "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "value": "string" } } } }`) func TestNewMapperInfo(t *testing.T) { type test struct { name string table []byte obj interface{} expectedCols []string err bool } tests := []test{ { name: "no_orm", table: sampleTable, obj: &struct { foo string bar int }{}, err: false, }, } for _, tt := range tests { t.Run(fmt.Sprintf("NewMapper_%s", tt.name), func(t *testing.T) { var table ovsdb.TableSchema err := json.Unmarshal(tt.table, &table) assert.Nil(t, err) info, err := NewInfo("Test", &table, tt.obj) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) } for _, col := range tt.expectedCols { assert.Truef(t, info.hasColumn(col), "Expected column should be present in Mapper Info") } assert.Equal(t, "Test", info.Metadata.TableName) }) } } func TestMapperInfoSet(t *testing.T) { type obj struct { Ostring string `ovsdb:"aString"` Oint int `ovsdb:"aInteger"` Oset []string `ovsdb:"aSet"` Omap map[string]string `ovsdb:"aMap"` } type test struct { name string table []byte obj interface{} field interface{} column string err bool } tests := []test{ { name: "string", table: sampleTable, obj: &obj{}, field: "foo", column: "aString", err: false, }, { name: "set", table: sampleTable, obj: &obj{}, field: []string{"foo", "bar"}, column: "aSet", err: false, }, { name: "map", table: sampleTable, obj: &obj{}, field: map[string]string{"foo": "bar"}, column: "aMap", err: false, }, { name: "nonempty", table: sampleTable, obj: &obj{ Omap: map[string]string{"original": "stuff"}, Oint: 1, Ostring: "foo", Oset: []string{"foo"}, }, field: map[string]string{"foo": "bar"}, column: "aMap", err: false, }, { name: "un-assignable", table: sampleTable, obj: &obj{}, field: []string{"foo"}, column: "aMap", err: true, }, } for _, tt := range tests { t.Run(fmt.Sprintf("SetField_%s", tt.name), func(t *testing.T) { var table ovsdb.TableSchema err := json.Unmarshal(tt.table, &table) assert.Nil(t, err) info, err := NewInfo("Test", &table, tt.obj) assert.Nil(t, err) err = info.SetField(tt.column, tt.field) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) readBack, err := info.FieldByColumn(tt.column) assert.Nil(t, err) assert.Equalf(t, tt.field, readBack, "Set field should match original") } }) } } func TestMapperInfoColByPtr(t *testing.T) { type obj struct { ostring string `ovsdb:"aString"` oint int `ovsdb:"aInteger"` oset []string `ovsdb:"aSet"` omap map[string]string `ovsdb:"aMap"` } obj1 := obj{} type test struct { name string table []byte obj interface{} field interface{} column string err bool } tests := []test{ { name: "first", table: sampleTable, obj: &obj1, field: &obj1.ostring, column: "aString", err: false, }, { name: "middle", table: sampleTable, obj: &obj1, field: &obj1.oint, column: "aInteger", err: false, }, { name: "middle2", table: sampleTable, obj: &obj1, field: &obj1.oset, column: "aSet", err: false, }, { name: "last", table: sampleTable, obj: &obj1, field: &obj1.omap, column: "aMap", err: false, }, { name: "external", table: sampleTable, obj: &obj1, field: &obj{}, err: true, }, } for _, tt := range tests { t.Run(fmt.Sprintf("GetFieldByPtr_%s", tt.name), func(t *testing.T) { var table ovsdb.TableSchema err := json.Unmarshal(tt.table, &table) assert.Nil(t, err) info, err := NewInfo("Test", &table, tt.obj) assert.Nil(t, err) col, err := info.ColumnByPtr(tt.field) if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Equalf(t, tt.column, col, "Column name extracted should match") } }) } } func TestOrmGetIndex(t *testing.T) { tableSchema := []byte(`{ "indexes": [["name"],["composed_1","composed_2"]], "columns": { "name": { "type": "string" }, "composed_1": { "type": { "key": "string" } }, "composed_2": { "type": { "key": "string" } }, "config": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } }`) var table ovsdb.TableSchema err := json.Unmarshal(tableSchema, &table) assert.Nil(t, err) type obj struct { ID string `ovsdb:"_uuid"` MyName string `ovsdb:"name"` Config map[string]string `ovsdb:"config"` Comp1 string `ovsdb:"composed_1"` Comp2 string `ovsdb:"composed_2"` } type test struct { name string obj interface{} expected [][]string err bool } tests := []test{ { name: "empty", obj: &obj{}, expected: [][]string{}, err: false, }, { name: "UUID", obj: &obj{ ID: aUUID0, }, expected: [][]string{{"_uuid"}}, err: false, }, { name: "simple", obj: &obj{ MyName: "foo", }, expected: [][]string{{"name"}}, err: false, }, { name: "additional index", obj: &obj{ ID: aUUID0, MyName: "foo", }, expected: [][]string{{"_uuid"}, {"name"}}, err: false, }, { name: "complex index", obj: &obj{ Comp1: "foo", Comp2: "bar", }, expected: [][]string{{"composed_1", "composed_2"}}, err: false, }, { name: "multiple index", obj: &obj{ MyName: "something", Comp1: "foo", Comp2: "bar", }, expected: [][]string{{"name"}, {"composed_1", "composed_2"}}, err: false, }, { name: "all ", obj: &obj{ ID: aUUID0, MyName: "something", Comp1: "foo", Comp2: "bar", }, expected: [][]string{{"_uuid"}, {"name"}, {"composed_1", "composed_2"}}, err: false, }, { name: "Error: None", obj: &obj{ Config: map[string]string{"foo": "bar"}, }, expected: [][]string{}, err: false, }, } for _, tt := range tests { t.Run(fmt.Sprintf("GetValidIndexes_%s", tt.name), func(t *testing.T) { info, err := NewInfo("Test", &table, tt.obj) assert.Nil(t, err) indexes, err := info.getValidIndexes() if tt.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.ElementsMatchf(t, tt.expected, indexes, "Indexes must match") } }) } } golang-github-ovn-org-libovsdb-0.7.0/mapper/mapper.go000066400000000000000000000227211464501522100225370ustar00rootroot00000000000000package mapper import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/ovsdb" ) // Mapper offers functions to interact with libovsdb through user-provided native structs. // The way to specify what field of the struct goes // to what column in the database id through field a field tag. // The tag used is "ovsdb" and has the following structure // 'ovsdb:"${COLUMN_NAME}"' // where COLUMN_NAME is the name of the column and must match the schema // //Example: // type MyObj struct { // Name string `ovsdb:"name"` // } type Mapper struct { Schema ovsdb.DatabaseSchema } // ErrMapper describes an error in an Mapper type type ErrMapper struct { objType string field string fieldType string fieldTag string reason string } func (e *ErrMapper) Error() string { return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s", e.objType, e.field, e.fieldType, e.fieldTag, e.reason) } // NewMapper returns a new mapper func NewMapper(schema ovsdb.DatabaseSchema) Mapper { return Mapper{ Schema: schema, } } // GetRowData transforms a Row to a struct based on its tags // The result object must be given as pointer to an object with the right tags func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error { if row == nil { return nil } return m.getData(*row, result) } // getData transforms a map[string]interface{} containing OvS types (e.g: a ResultRow // has this format) to orm struct // The result object must be given as pointer to an object with the right tags func (m Mapper) getData(ovsData ovsdb.Row, result *Info) error { for name, column := range result.Metadata.TableSchema.Columns { if !result.hasColumn(name) { // If provided struct does not have a field to hold this value, skip it continue } ovsElem, ok := ovsData[name] if !ok { // Ignore missing columns continue } nativeElem, err := ovsdb.OvsToNative(column, ovsElem) if err != nil { return fmt.Errorf("table %s, column %s: failed to extract native element: %s", result.Metadata.TableName, name, err.Error()) } if err := result.SetField(name, nativeElem); err != nil { return err } } return nil } // NewRow transforms an orm struct to a map[string] interface{} that can be used as libovsdb.Row // By default, default or null values are skipped. This behavior can be modified by specifying // a list of fields (pointers to fields in the struct) to be added to the row func (m Mapper) NewRow(data *Info, fields ...interface{}) (ovsdb.Row, error) { columns := make(map[string]*ovsdb.ColumnSchema) for k, v := range data.Metadata.TableSchema.Columns { columns[k] = v } columns["_uuid"] = &ovsdb.UUIDColumn ovsRow := make(map[string]interface{}, len(columns)) for name, column := range columns { nativeElem, err := data.FieldByColumn(name) if err != nil { // If provided struct does not have a field to hold this value, skip it continue } // add specific fields if len(fields) > 0 { found := false for _, f := range fields { col, err := data.ColumnByPtr(f) if err != nil { return nil, err } if col == name { found = true break } } if !found { continue } } if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) { continue } ovsElem, err := ovsdb.NativeToOvs(column, nativeElem) if err != nil { return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error()) } ovsRow[name] = ovsElem } return ovsRow, nil } // NewEqualityCondition returns a list of equality conditions that match a given object // A list of valid columns that shall be used as a index can be provided. // If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag // If it does not exist or is null (""), then we will traverse all of the table indexes and // use the first index (list of simultaneously unique columns) for which the provided mapper // object has valid data. The order in which they are traversed matches the order defined // in the schema. // By `valid data` we mean non-default data. func (m Mapper) NewEqualityCondition(data *Info, fields ...interface{}) ([]ovsdb.Condition, error) { var conditions []ovsdb.Condition var condIndex [][]string // If index is provided, use it. If not, obtain the valid indexes from the mapper info if len(fields) > 0 { providedIndex := []string{} for i := range fields { if col, err := data.ColumnByPtr(fields[i]); err == nil { providedIndex = append(providedIndex, col) } else { return nil, err } } condIndex = append(condIndex, providedIndex) } else { var err error condIndex, err = data.getValidIndexes() if err != nil { return nil, err } } if len(condIndex) == 0 { return nil, fmt.Errorf("failed to find a valid index") } // Pick the first valid index for _, col := range condIndex[0] { field, err := data.FieldByColumn(col) if err != nil { return nil, err } column := data.Metadata.TableSchema.Column(col) if column == nil { return nil, fmt.Errorf("column %s not found", col) } ovsVal, err := ovsdb.NativeToOvs(column, field) if err != nil { return nil, err } conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal)) } return conditions, nil } // EqualFields compares two mapped objects. // The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond // to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one) func (m Mapper) EqualFields(one, other *Info, fields ...interface{}) (bool, error) { indexes := []string{} for _, f := range fields { col, err := one.ColumnByPtr(f) if err != nil { return false, err } indexes = append(indexes, col) } return m.equalIndexes(one, other, indexes...) } // NewCondition returns a ovsdb.Condition based on the model func (m Mapper) NewCondition(data *Info, field interface{}, function ovsdb.ConditionFunction, value interface{}) (*ovsdb.Condition, error) { column, err := data.ColumnByPtr(field) if err != nil { return nil, err } // Check that the condition is valid columnSchema := data.Metadata.TableSchema.Column(column) if columnSchema == nil { return nil, fmt.Errorf("column %s not found", column) } if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil { return nil, err } ovsValue, err := ovsdb.NativeToOvs(columnSchema, value) if err != nil { return nil, err } ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue) return &ovsdbCondition, nil } // NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format) // It takes care of field validation against the column type func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value interface{}) (*ovsdb.Mutation, error) { // Check the column exists in the object if !data.hasColumn(column) { return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data) } // Check that the mutation is valid columnSchema := data.Metadata.TableSchema.Column(column) if columnSchema == nil { return nil, fmt.Errorf("column %s not found", column) } if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil { return nil, err } var ovsValue interface{} var err error // Usually a mutation value is of the same type of the value being mutated // except for delete mutation of maps where it can also be a list of same type of // keys (rfc7047 5.1). Handle this special case here. if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map { // It's OK to cast the value to a list of elements because validation has passed ovsSet, err := ovsdb.NewOvsSet(value) if err != nil { return nil, err } ovsValue = ovsSet } else { ovsValue, err = ovsdb.NativeToOvs(columnSchema, value) if err != nil { return nil, err } } return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil } // equalIndexes returns whether both models are equal from the DB point of view // Two objects are considered equal if any of the following conditions is true // They have a field tagged with column name '_uuid' and their values match // For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal // (as per RFC7047) // The values of all of the optional indexes passed as variadic parameter to this function are equal. func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) { match := false oneIndexes, err := one.getValidIndexes() if err != nil { return false, err } otherIndexes, err := other.getValidIndexes() if err != nil { return false, err } oneIndexes = append(oneIndexes, indexes) otherIndexes = append(otherIndexes, indexes) for _, lidx := range oneIndexes { for _, ridx := range otherIndexes { if reflect.DeepEqual(ridx, lidx) { // All columns in an index must be simultaneously equal for _, col := range lidx { if !one.hasColumn(col) || !other.hasColumn(col) { break } lfield, err := one.FieldByColumn(col) if err != nil { return false, err } rfield, err := other.FieldByColumn(col) if err != nil { return false, err } if reflect.DeepEqual(lfield, rfield) { match = true } else { match = false break } } if match { return true, nil } } } } return false, nil } golang-github-ovn-org-libovsdb-0.7.0/mapper/mapper_test.go000066400000000000000000000574331464501522100236060ustar00rootroot00000000000000package mapper import ( "encoding/json" "fmt" "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) var ( aString = "foo" aEnum = "enum1" aSet = []string{"a", "set", "of", "strings"} aUUID0 = "2f77b348-9768-4866-b761-89d5177ecda0" aUUID1 = "2f77b348-9768-4866-b761-89d5177ecda1" aUUID2 = "2f77b348-9768-4866-b761-89d5177ecda2" aUUID3 = "2f77b348-9768-4866-b761-89d5177ecda3" aUUIDSet = []string{ aUUID0, aUUID1, aUUID2, aUUID3, } aIntSet = []int{ 3, 2, 42, } aFloat = 42.00 aFloatSet = []float64{ 3.0, 2.0, 42.0, } aMap = map[string]string{ "key1": "value1", "key2": "value2", "key3": "value3", } ) var testSchema = []byte(`{ "cksum": "223619766 22548", "name": "TestSchema", "tables": { "TestTable": { "columns": { "aString": { "type": "string" }, "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aSingleSet": { "type": { "key": "string", "max": "unlimited", "min": 0, "max": 1 } }, "aUUIDSet": { "type": { "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 0, "max": "unlimited" } }, "aUUID": { "type": { "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 1, "max": 1 } }, "aIntSet": { "type": { "key": { "type": "integer" }, "min": 0, "max": "unlimited" } }, "aFloat": { "type": { "key": { "type": "real" } } }, "aFloatSet": { "type": { "key": { "type": "real" }, "min": 0, "max": 10 } }, "aEmptySet": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "aEnum": { "type": { "key": { "enum": [ "set", [ "enum1", "enum2", "enum3" ] ], "type": "string" } } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`) func getOvsTestRow(t *testing.T) ovsdb.Row { ovsRow := ovsdb.NewRow() ovsRow["aString"] = aString ovsRow["aSet"] = testOvsSet(t, aSet) // Set's can hold the value if they have len == 1 ovsRow["aSingleSet"] = aString us := make([]ovsdb.UUID, 0) for _, u := range aUUIDSet { us = append(us, ovsdb.UUID{GoUUID: u}) } ovsRow["aUUIDSet"] = testOvsSet(t, us) ovsRow["aUUID"] = ovsdb.UUID{GoUUID: aUUID0} ovsRow["aIntSet"] = testOvsSet(t, aIntSet) ovsRow["aFloat"] = aFloat ovsRow["aFloatSet"] = testOvsSet(t, aFloatSet) ovsRow["aEmptySet"] = testOvsSet(t, []string{}) ovsRow["aEnum"] = aEnum ovsRow["aMap"] = testOvsMap(t, aMap) return ovsRow } func TestMapperGetData(t *testing.T) { type ormTestType struct { AString string `ovsdb:"aString"` ASet []string `ovsdb:"aSet"` ASingleSet *string `ovsdb:"aSingleSet"` AUUIDSet []string `ovsdb:"aUUIDSet"` AUUID string `ovsdb:"aUUID"` AIntSet []int `ovsdb:"aIntSet"` AFloat float64 `ovsdb:"aFloat"` AFloatSet []float64 `ovsdb:"aFloatSet"` YetAnotherStringSet []string `ovsdb:"aEmptySet"` AEnum string `ovsdb:"aEnum"` AMap map[string]string `ovsdb:"aMap"` NonTagged string } var expected = ormTestType{ AString: aString, ASet: aSet, ASingleSet: &aString, AUUIDSet: aUUIDSet, AUUID: aUUID0, AIntSet: aIntSet, AFloat: aFloat, AFloatSet: aFloatSet, YetAnotherStringSet: []string{}, AEnum: aEnum, AMap: aMap, NonTagged: "something", } ovsRow := getOvsTestRow(t) /* Code under test */ var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Error(err) } mapper := NewMapper(schema) test := ormTestType{ NonTagged: "something", } testInfo, err := NewInfo("TestTable", schema.Table("TestTable"), &test) assert.NoError(t, err) err = mapper.GetRowData(&ovsRow, testInfo) assert.NoError(t, err) /*End code under test*/ if err != nil { t.Error(err) } assert.Equal(t, expected, test) } func TestMapperNewRow(t *testing.T) { var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Error(err) } tests := []struct { name string objInput interface{} expectedRow ovsdb.Row shoulderr bool }{{ name: "string", objInput: &struct { AString string `ovsdb:"aString"` }{ AString: aString, }, expectedRow: ovsdb.Row(map[string]interface{}{"aString": aString}), }, { name: "set", objInput: &struct { SomeSet []string `ovsdb:"aSet"` }{ SomeSet: aSet, }, expectedRow: ovsdb.Row(map[string]interface{}{"aSet": testOvsSet(t, aSet)}), }, { name: "emptySet with no column specification", objInput: &struct { EmptySet []string `ovsdb:"aSet"` }{ EmptySet: []string{}, }, expectedRow: ovsdb.Row(map[string]interface{}{}), }, { name: "UUID", objInput: &struct { MyUUID string `ovsdb:"aUUID"` }{ MyUUID: aUUID0, }, expectedRow: ovsdb.Row(map[string]interface{}{"aUUID": ovsdb.UUID{GoUUID: aUUID0}}), }, { name: "aUUIDSet", objInput: &struct { MyUUIDSet []string `ovsdb:"aUUIDSet"` }{ MyUUIDSet: []string{aUUID0, aUUID1}, }, expectedRow: ovsdb.Row(map[string]interface{}{"aUUIDSet": testOvsSet(t, []ovsdb.UUID{{GoUUID: aUUID0}, {GoUUID: aUUID1}})}), }, { name: "aIntSet", objInput: &struct { MyIntSet []int `ovsdb:"aIntSet"` }{ MyIntSet: []int{0, 42}, }, expectedRow: ovsdb.Row(map[string]interface{}{"aIntSet": testOvsSet(t, []int{0, 42})}), }, { name: "aFloat", objInput: &struct { MyFloat float64 `ovsdb:"aFloat"` }{ MyFloat: 42.42, }, expectedRow: ovsdb.Row(map[string]interface{}{"aFloat": 42.42}), }, { name: "aFloatSet", objInput: &struct { MyFloatSet []float64 `ovsdb:"aFloatSet"` }{ MyFloatSet: aFloatSet, }, expectedRow: ovsdb.Row(map[string]interface{}{"aFloatSet": testOvsSet(t, aFloatSet)}), }, { name: "Enum", objInput: &struct { MyEnum string `ovsdb:"aEnum"` }{ MyEnum: aEnum, }, expectedRow: ovsdb.Row(map[string]interface{}{"aEnum": aEnum}), }, { name: "untagged fields should not affect row", objInput: &struct { AString string `ovsdb:"aString"` MyStuff map[string]string }{ AString: aString, MyStuff: map[string]string{"this is": "private"}, }, expectedRow: ovsdb.Row(map[string]interface{}{"aString": aString}), }, { name: "Maps", objInput: &struct { MyMap map[string]string `ovsdb:"aMap"` }{ MyMap: aMap, }, expectedRow: ovsdb.Row(map[string]interface{}{"aMap": testOvsMap(t, aMap)}), }, } for _, test := range tests { t.Run(fmt.Sprintf("NewRow: %s", test.name), func(t *testing.T) { mapper := NewMapper(schema) info, err := NewInfo("TestTable", schema.Table("TestTable"), test.objInput) assert.NoError(t, err) row, err := mapper.NewRow(info) if test.shoulderr { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Equalf(t, test.expectedRow, row, "NewRow should match expected") } }) } } func TestMapperNewRowFields(t *testing.T) { var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Error(err) } type obj struct { MyMap map[string]string `ovsdb:"aMap"` MySet []string `ovsdb:"aSet"` MyString string `ovsdb:"aString"` MyFloat float64 `ovsdb:"aFloat"` } testObj := obj{} tests := []struct { name string prepare func(*obj) expectedRow ovsdb.Row fields []interface{} err bool }{{ name: "string", prepare: func(o *obj) { o.MyString = aString }, expectedRow: ovsdb.Row(map[string]interface{}{"aString": aString}), }, { name: "empty string with field specification", prepare: func(o *obj) { o.MyString = "" }, fields: []interface{}{&testObj.MyString}, expectedRow: ovsdb.Row(map[string]interface{}{"aString": ""}), }, { name: "empty set without field specification", prepare: func(o *obj) { }, expectedRow: ovsdb.Row(map[string]interface{}{}), }, { name: "empty set without field specification", prepare: func(o *obj) { }, fields: []interface{}{&testObj.MySet}, expectedRow: ovsdb.Row(map[string]interface{}{"aSet": testOvsSet(t, []string{})}), }, { name: "empty maps", prepare: func(o *obj) { o.MyString = "foo" }, expectedRow: ovsdb.Row(map[string]interface{}{"aString": aString}), }, { name: "empty maps with field specification", prepare: func(o *obj) { o.MyString = "foo" }, fields: []interface{}{&testObj.MyMap}, expectedRow: ovsdb.Row(map[string]interface{}{"aMap": testOvsMap(t, map[string]string{})}), }, { name: "Complex object with field selection", prepare: func(o *obj) { o.MyString = aString o.MyMap = aMap o.MySet = aSet o.MyFloat = aFloat }, fields: []interface{}{&testObj.MyMap, &testObj.MySet}, expectedRow: ovsdb.Row(map[string]interface{}{"aMap": testOvsMap(t, aMap), "aSet": testOvsSet(t, aSet)}), }, } for _, test := range tests { t.Run(fmt.Sprintf("NewRow: %s", test.name), func(t *testing.T) { mapper := NewMapper(schema) // Clean the test object testObj.MyString = "" testObj.MyMap = nil testObj.MySet = nil testObj.MyFloat = 0 test.prepare(&testObj) info, err := NewInfo("TestTable", schema.Table("TestTable"), &testObj) assert.NoError(t, err) row, err := mapper.NewRow(info, test.fields...) if test.err { assert.NotNil(t, err) } else { assert.Nil(t, err) assert.Equalf(t, test.expectedRow, row, "NewRow should match expected") } }) } } func TestMapperCondition(t *testing.T) { var testSchema = []byte(`{ "cksum": "223619766 22548", "name": "TestSchema", "tables": { "TestTable": { "indexes": [["name"],["composed_1","composed_2"]], "columns": { "name": { "type": "string" }, "composed_1": { "type": { "key": "string" } }, "composed_2": { "type": { "key": "string" } }, "config": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`) type testType struct { ID string `ovsdb:"_uuid"` MyName string `ovsdb:"name"` Config map[string]string `ovsdb:"config"` Comp1 string `ovsdb:"composed_1"` Comp2 string `ovsdb:"composed_2"` } var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Fatal(err) } mapper := NewMapper(schema) type Test struct { name string prepare func(*testType) expected []ovsdb.Condition index []interface{} err bool } testObj := testType{} tests := []Test{ { name: "simple index", prepare: func(t *testType) { t.ID = "" t.MyName = "foo" t.Config = nil t.Comp1 = "" t.Comp2 = "" }, index: []interface{}{}, expected: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, err: false, }, { name: "UUID", prepare: func(t *testType) { t.ID = aUUID0 t.MyName = "foo" t.Config = nil t.Comp1 = "" t.Comp2 = "" }, index: []interface{}{}, expected: []ovsdb.Condition{{Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: aUUID0}}}, err: false, }, { name: "specify index", prepare: func(t *testType) { t.ID = aUUID0 t.MyName = "foo" t.Config = nil t.Comp1 = "" t.Comp2 = "" }, index: []interface{}{&testObj.MyName}, expected: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "foo"}}, err: false, }, { name: "complex index", prepare: func(t *testType) { t.ID = "" t.MyName = "" t.Config = nil t.Comp1 = "foo" t.Comp2 = "bar" }, expected: []ovsdb.Condition{ {Column: "composed_1", Function: ovsdb.ConditionEqual, Value: "foo"}, {Column: "composed_2", Function: ovsdb.ConditionEqual, Value: "bar"}}, index: []interface{}{}, err: false, }, { name: "first index", prepare: func(t *testType) { t.ID = "" t.MyName = "something" t.Config = nil t.Comp1 = "foo" t.Comp2 = "bar" }, expected: []ovsdb.Condition{{Column: "name", Function: ovsdb.ConditionEqual, Value: "something"}}, index: []interface{}{}, err: false, }, { name: "Error: None", prepare: func(t *testType) { t.ID = "" t.MyName = "" t.Config = map[string]string{"foo": "bar"} t.Comp1 = "" t.Comp2 = "" }, index: []interface{}{}, err: true, }, } for _, tt := range tests { t.Run(fmt.Sprintf("newEqualityCondition_%s", tt.name), func(t *testing.T) { tt.prepare(&testObj) info, err := NewInfo("TestTable", schema.Table("TestTable"), &testObj) assert.NoError(t, err) conds, err := mapper.NewEqualityCondition(info, tt.index...) if tt.err { if err == nil { t.Errorf("expected an error but got none") } } else { if err != nil { t.Error(err) } if !assert.ElementsMatch(t, tt.expected, conds, "Condition must match expected") { t.Logf("%v \n", conds) } } }) } } func TestMapperEqualIndexes(t *testing.T) { var testSchema = []byte(`{ "cksum": "223619766 22548", "name": "TestSchema", "tables": { "TestTable": { "indexes": [["name"],["composed_1","composed_2"]], "columns": { "name": { "type": "string" }, "composed_1": { "type": { "key": "string" } }, "composed_2": { "type": { "key": "string" } }, "int1": { "type": { "key": "integer" } }, "int2": { "type": { "key": "integer" } }, "config": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`) type testType struct { ID string `ovsdb:"_uuid"` MyName string `ovsdb:"name"` Config map[string]string `ovsdb:"config"` Comp1 string `ovsdb:"composed_1"` Comp2 string `ovsdb:"composed_2"` Int1 int `ovsdb:"int1"` Int2 int `ovsdb:"int2"` } var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Fatal(err) } mapper := NewMapper(schema) type Test struct { name string obj1 testType obj2 testType expected bool indexes []string } tests := []Test{ { name: "same simple index", obj1: testType{ MyName: "foo", }, obj2: testType{ MyName: "foo", }, expected: true, indexes: []string{}, }, { name: "diff simple index", obj1: testType{ MyName: "foo", }, obj2: testType{ MyName: "bar", }, expected: false, indexes: []string{}, }, { name: "same uuid", obj1: testType{ ID: aUUID0, MyName: "foo", }, obj2: testType{ ID: aUUID0, MyName: "bar", }, expected: true, indexes: []string{}, }, { name: "diff uuid", obj1: testType{ ID: aUUID0, MyName: "foo", }, obj2: testType{ ID: aUUID1, MyName: "bar", }, expected: false, indexes: []string{}, }, { name: "same complex_index", obj1: testType{ ID: aUUID0, MyName: "foo", Comp1: "foo", Comp2: "bar", }, obj2: testType{ ID: aUUID1, MyName: "bar", Comp1: "foo", Comp2: "bar", }, expected: true, indexes: []string{}, }, { name: "different", obj1: testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar", }, obj2: testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", }, expected: false, indexes: []string{}, }, { name: "same additional index", obj1: testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar1", Int1: 42, }, obj2: testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", Int1: 42, }, expected: true, indexes: []string{"int1"}, }, { name: "diff additional index", obj1: testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar1", Int1: 42, }, obj2: testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", Int1: 420, }, expected: false, indexes: []string{"int1"}, }, { name: "same additional indexes ", obj1: testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar1", Int1: 42, Int2: 25, }, obj2: testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", Int1: 42, Int2: 25, }, expected: true, indexes: []string{"int1", "int2"}, }, { name: "diff additional indexes ", obj1: testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar1", Int1: 42, Int2: 50, }, obj2: testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", Int1: 42, Int2: 25, }, expected: false, indexes: []string{"int1", "int2"}, }, } for _, test := range tests { t.Run(fmt.Sprintf("Equal %s", test.name), func(t *testing.T) { info1, err := NewInfo("TestTable", schema.Table("TestTable"), &test.obj1) assert.NoError(t, err) info2, err := NewInfo("TestTable", schema.Table("TestTable"), &test.obj2) assert.NoError(t, err) eq, err := mapper.equalIndexes(info1, info2, test.indexes...) assert.Nil(t, err) assert.Equalf(t, test.expected, eq, "equal value should match expected") }) } // Test we can also use field pointers obj1 := testType{ ID: aUUID0, MyName: "name1", Comp1: "foo", Comp2: "bar1", Int1: 42, Int2: 25, } obj2 := testType{ ID: aUUID1, MyName: "name2", Comp1: "foo", Comp2: "bar2", Int1: 42, Int2: 25, } info1, err := NewInfo("TestTable", schema.Table("TestTable"), &obj1) assert.NoError(t, err) info2, err := NewInfo("TestTable", schema.Table("TestTable"), &obj2) assert.NoError(t, err) eq, err := mapper.EqualFields(info1, info2, &obj1.Int1, &obj1.Int2) assert.Nil(t, err) assert.True(t, eq) // Using pointers to second value is not supported _, err = mapper.EqualFields(info1, info2, &obj2.Int1, &obj2.Int2) assert.NotNil(t, err) } func TestMapperMutation(t *testing.T) { var testSchema = []byte(`{ "cksum": "223619766 22548", "name": "TestSchema", "tables": { "TestTable": { "columns": { "string": { "type": "string" }, "set": { "type": { "key": "string", "min": 0, "max": "unlimited" } }, "map": { "type": { "key": "string", "value": "string" } }, "unmutable": { "mutable": false, "type": { "key": "integer" } }, "int": { "type": { "key": "integer" } } } } } }`) type testType struct { UUID string `ovsdb:"_uuid"` String string `ovsdb:"string"` Set []string `ovsdb:"set"` Map map[string]string `ovsdb:"map"` Int int `ovsdb:"int"` UnMutable int `ovsdb:"unmutable"` } var schema ovsdb.DatabaseSchema if err := json.Unmarshal(testSchema, &schema); err != nil { t.Fatal(err) } mapper := NewMapper(schema) type Test struct { name string column string obj testType expected *ovsdb.Mutation mutator ovsdb.Mutator value interface{} err bool } tests := []Test{ { name: "string", column: "string", obj: testType{}, mutator: ovsdb.MutateOperationAdd, err: true, }, { name: "Increment integer", column: "int", obj: testType{}, mutator: ovsdb.MutateOperationAdd, value: 1, expected: ovsdb.NewMutation("int", ovsdb.MutateOperationAdd, 1), err: false, }, { name: "Increment integer", column: "int", obj: testType{}, mutator: ovsdb.MutateOperationModulo, value: 2, expected: ovsdb.NewMutation("int", ovsdb.MutateOperationModulo, 2), err: false, }, { name: "non-mutable", column: "unmutable", obj: testType{}, mutator: ovsdb.MutateOperationSubtract, value: 2, err: true, }, { name: "Add element to set ", column: "set", obj: testType{}, mutator: ovsdb.MutateOperationInsert, value: []string{"foo"}, expected: ovsdb.NewMutation("set", ovsdb.MutateOperationInsert, testOvsSet(t, []string{"foo"})), err: false, }, { name: "Delete element from set ", column: "set", obj: testType{}, mutator: ovsdb.MutateOperationDelete, value: []string{"foo"}, expected: ovsdb.NewMutation("set", ovsdb.MutateOperationDelete, testOvsSet(t, []string{"foo"})), err: false, }, { name: "Delete keys from map ", column: "map", obj: testType{}, mutator: ovsdb.MutateOperationDelete, value: []string{"foo", "bar"}, expected: ovsdb.NewMutation("map", ovsdb.MutateOperationDelete, testOvsSet(t, []string{"foo", "bar"})), err: false, }, { name: "Delete key value pairs from map ", column: "map", obj: testType{}, mutator: ovsdb.MutateOperationDelete, value: map[string]string{"foo": "bar"}, expected: ovsdb.NewMutation("map", ovsdb.MutateOperationDelete, testOvsMap(t, map[string]string{"foo": "bar"})), err: false, }, { name: "Insert elements in map ", column: "map", obj: testType{}, mutator: ovsdb.MutateOperationInsert, value: map[string]string{"foo": "bar"}, expected: ovsdb.NewMutation("map", ovsdb.MutateOperationInsert, testOvsMap(t, map[string]string{"foo": "bar"})), err: false, }, } for _, test := range tests { t.Run(fmt.Sprintf("newMutation%s", test.name), func(t *testing.T) { info, err := NewInfo("TestTable", schema.Table("TestTable"), &test.obj) assert.NoError(t, err) mutation, err := mapper.NewMutation(info, test.column, test.mutator, test.value) if test.err { if err == nil { t.Errorf("expected an error but got none") } } else { if err != nil { t.Error(err) } } assert.Equalf(t, test.expected, mutation, "Mutation must match expected") }) } } func testOvsSet(t *testing.T, set interface{}) ovsdb.OvsSet { oSet, err := ovsdb.NewOvsSet(set) assert.Nil(t, err) return oSet } func testOvsMap(t *testing.T, set interface{}) ovsdb.OvsMap { oMap, err := ovsdb.NewOvsMap(set) assert.Nil(t, err) return oMap } golang-github-ovn-org-libovsdb-0.7.0/model/000077500000000000000000000000001464501522100205345ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/model/client.go000066400000000000000000000120101464501522100223330ustar00rootroot00000000000000package model import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/ovsdb" ) // ColumnKey addresses a column and optionally a key within a column type ColumnKey struct { Column string Key interface{} } // ClientIndex defines a client index by a set of columns type ClientIndex struct { Columns []ColumnKey } // ClientDBModel contains the client information needed to build a DatabaseModel type ClientDBModel struct { name string types map[string]reflect.Type indexes map[string][]ClientIndex } // NewModel returns a new instance of a model from a specific string func (db ClientDBModel) newModel(table string) (Model, error) { mtype, ok := db.types[table] if !ok { return nil, fmt.Errorf("table %s not found in database model", string(table)) } model := reflect.New(mtype.Elem()) return model.Interface().(Model), nil } // Name returns the database name func (db ClientDBModel) Name() string { return db.name } // Indexes returns the client indexes for a model func (db ClientDBModel) Indexes(table string) []ClientIndex { if len(db.indexes) == 0 { return nil } if _, ok := db.indexes[table]; ok { return copyIndexes(db.indexes)[table] } return nil } // SetIndexes sets the client indexes. Client indexes are optional, similar to // schema indexes and are only tracked in the specific client instances that are // provided with this client model. A client index may point to multiple models // as uniqueness is not enforced. They are defined per table and multiple // indexes can be defined for a table. Each index consists of a set of columns. // If the column is a map, specific keys of that map can be addressed for the // index. func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) { db.indexes = copyIndexes(indexes) } // Validate validates the DatabaseModel against the input schema // Returns all the errors detected func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error { var errors []error if db.name != schema.Name { errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)", db.name, schema.Name)) } infos := make(map[string]*mapper.Info, len(db.types)) for tableName := range db.types { tableSchema := schema.Table(tableName) if tableSchema == nil { errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName)) continue } model, err := db.newModel(tableName) if err != nil { errors = append(errors, err) continue } info, err := mapper.NewInfo(tableName, tableSchema, model) if err != nil { errors = append(errors, err) continue } infos[tableName] = info } for tableName, indexSets := range db.indexes { info, ok := infos[tableName] if !ok { errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName)) continue } for _, indexSet := range indexSets { for _, indexColumn := range indexSet.Columns { f, err := info.FieldByColumn(indexColumn.Column) if err != nil { errors = append( errors, fmt.Errorf("database model contains a client index for column %s that does not exist in table %s", indexColumn.Column, tableName)) continue } if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map { errors = append( errors, fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map", indexColumn.Key, indexColumn.Column, tableName)) continue } } } } return errors } // NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) { types := make(map[string]reflect.Type, len(models)) for table, model := range models { modelType := reflect.TypeOf(model) if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct { return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct") } hasUUID := false for i := 0; i < modelType.Elem().NumField(); i++ { if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" && field.Type.Kind() == reflect.String { hasUUID = true break } } if !hasUUID { return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid") } types[table] = modelType } return ClientDBModel{ types: types, name: name, }, nil } func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex { if len(src) == 0 { return nil } dst := make(map[string][]ClientIndex, len(src)) for table, indexSets := range src { dst[table] = make([]ClientIndex, 0, len(indexSets)) for _, indexSet := range indexSets { indexSetCopy := ClientIndex{ Columns: make([]ColumnKey, len(indexSet.Columns)), } copy(indexSetCopy.Columns, indexSet.Columns) dst[table] = append(dst[table], indexSetCopy) } } return dst } golang-github-ovn-org-libovsdb-0.7.0/model/database.go000066400000000000000000000070541464501522100226350ustar00rootroot00000000000000package model import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/ovsdb" ) // A DatabaseModel represents libovsdb's metadata about the database. // It's the result of combining the client's ClientDBModel and the server's Schema type DatabaseModel struct { client ClientDBModel Schema ovsdb.DatabaseSchema Mapper mapper.Mapper metadata map[reflect.Type]mapper.Metadata } // NewDatabaseModel returns a new DatabaseModel func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) { dbModel := &DatabaseModel{ Schema: schema, client: client, } errs := client.validate(schema) if len(errs) > 0 { return DatabaseModel{}, errs } dbModel.Mapper = mapper.NewMapper(schema) var metadata map[reflect.Type]mapper.Metadata metadata, errs = generateModelInfo(schema, client.types) if len(errs) > 0 { return DatabaseModel{}, errs } dbModel.metadata = metadata return *dbModel, nil } // NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel { return DatabaseModel{ client: client, } } // Valid returns whether the DatabaseModel is fully functional func (db DatabaseModel) Valid() bool { return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{}) } // Client returns the DatabaseModel's client dbModel func (db DatabaseModel) Client() ClientDBModel { return db.client } // NewModel returns a new instance of a model from a specific string func (db DatabaseModel) NewModel(table string) (Model, error) { mtype, ok := db.client.types[table] if !ok { return nil, fmt.Errorf("table %s not found in database model", string(table)) } model := reflect.New(mtype.Elem()) return model.Interface().(Model), nil } // Types returns the DatabaseModel Types // the DatabaseModel types is a map of reflect.Types indexed by string // The reflect.Type is a pointer to a struct that contains 'ovs' tags // as described above. Such pointer to struct also implements the Model interface func (db DatabaseModel) Types() map[string]reflect.Type { return db.client.types } // FindTable returns the string associated with a reflect.Type or "" func (db DatabaseModel) FindTable(mType reflect.Type) string { for table, tType := range db.client.types { if tType == mType { return table } } return "" } // generateModelMetadata creates metadata objects from all models included in the // database and caches them for future re-use func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) { errors := []error{} metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes)) for tableName, tType := range modelTypes { tableSchema := dbSchema.Table(tableName) if tableSchema == nil { errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName)) continue } obj := reflect.New(tType.Elem()).Interface().(Model) info, err := mapper.NewInfo(tableName, tableSchema, obj) if err != nil { errors = append(errors, err) continue } metadata[tType] = info.Metadata } return metadata, errors } // NewModelInfo returns a mapper.Info object based on a provided model func (db DatabaseModel) NewModelInfo(obj interface{}) (*mapper.Info, error) { meta, ok := db.metadata[reflect.TypeOf(obj)] if !ok { return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj) } return &mapper.Info{ Obj: obj, Metadata: meta, }, nil } golang-github-ovn-org-libovsdb-0.7.0/model/model.go000066400000000000000000000067441464501522100221760ustar00rootroot00000000000000package model import ( "encoding/json" "fmt" "reflect" "github.com/ovn-org/libovsdb/ovsdb" ) // A Model is the base interface used to build Database Models. It is used // to express how data from a specific Database Table shall be translated into structs // A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag // The value of 'ovs' field must be a valid column name in the OVS Database // A field associated with the "_uuid" column mandatory. The rest of the columns are optional // The struct may also have non-tagged fields (which will be ignored by the API calls) // The Model interface must be implemented by the pointer to such type // Example: //type MyLogicalRouter struct { // UUID string `ovsdb:"_uuid"` // Name string `ovsdb:"name"` // ExternalIDs map[string]string `ovsdb:"external_ids"` // LoadBalancers []string `ovsdb:"load_balancer"` //} type Model interface{} type CloneableModel interface { CloneModel() Model CloneModelInto(Model) } type ComparableModel interface { EqualsModel(Model) bool } // Clone creates a deep copy of a model func Clone(a Model) Model { if cloner, ok := a.(CloneableModel); ok { return cloner.CloneModel() } val := reflect.Indirect(reflect.ValueOf(a)) b := reflect.New(val.Type()).Interface() aBytes, _ := json.Marshal(a) _ = json.Unmarshal(aBytes, b) return b } // CloneInto deep copies a model into another one func CloneInto(src, dst Model) { if cloner, ok := src.(CloneableModel); ok { cloner.CloneModelInto(dst) return } aBytes, _ := json.Marshal(src) _ = json.Unmarshal(aBytes, dst) } func Equal(l, r Model) bool { if comparator, ok := l.(ComparableModel); ok { return comparator.EqualsModel(r) } return reflect.DeepEqual(l, r) } func modelSetUUID(model Model, uuid string) error { modelVal := reflect.ValueOf(model).Elem() for i := 0; i < modelVal.NumField(); i++ { if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" && field.Type.Kind() == reflect.String { modelVal.Field(i).Set(reflect.ValueOf(uuid)) return nil } } return fmt.Errorf("model is expected to have a string field mapped to column _uuid") } // Condition is a model-based representation of an OVSDB Condition type Condition struct { // Pointer to the field of the model where the operation applies Field interface{} // Condition function Function ovsdb.ConditionFunction // Value to use in the condition Value interface{} } // Mutation is a model-based representation of an OVSDB Mutation type Mutation struct { // Pointer to the field of the model that shall be mutated Field interface{} // String representing the mutator (as per RFC7047) Mutator ovsdb.Mutator // Value to use in the mutation Value interface{} } // CreateModel creates a new Model instance based on an OVSDB Row information func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) { if !dbModel.Valid() { return nil, fmt.Errorf("database model not valid") } table := dbModel.Schema.Table(tableName) if table == nil { return nil, fmt.Errorf("table %s not found", tableName) } model, err := dbModel.NewModel(tableName) if err != nil { return nil, err } info, err := dbModel.NewModelInfo(model) if err != nil { return nil, err } err = dbModel.Mapper.GetRowData(row, info) if err != nil { return nil, err } if uuid != "" { if err := info.SetField("_uuid", uuid); err != nil { return nil, err } } return model, nil } golang-github-ovn-org-libovsdb-0.7.0/model/model_test.go000066400000000000000000000250071464501522100232260ustar00rootroot00000000000000package model import ( "encoding/json" "fmt" "reflect" "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) type modelA struct { UUID string `ovsdb:"_uuid"` } type modelB struct { UID string `ovsdb:"_uuid"` Foo string `ovsdb:"bar"` Bar string `ovsdb:"baz"` } type modelInvalid struct { Foo string } func TestClientDBModel(t *testing.T) { type Test struct { name string obj map[string]Model valid bool } tests := []Test{ { name: "valid", obj: map[string]Model{"Test_A": &modelA{}}, valid: true, }, { name: "valid_multiple", obj: map[string]Model{"Test_A": &modelA{}, "Test_B": &modelB{}}, valid: true, }, { name: "invalid", obj: map[string]Model{"INVALID": &modelInvalid{}}, valid: false, }, } for _, tt := range tests { t.Run(fmt.Sprintf("TestNewModel_%s", tt.name), func(t *testing.T) { db, err := NewClientDBModel(tt.name, tt.obj) if tt.valid { assert.Nil(t, err) assert.Len(t, db.types, len(tt.obj)) assert.Equal(t, tt.name, db.Name()) } else { assert.NotNil(t, err) } }) } } func TestNewModel(t *testing.T) { db, err := NewClientDBModel("testTable", map[string]Model{"Test_A": &modelA{}, "Test_B": &modelB{}}) assert.Nil(t, err) _, err = db.newModel("Unknown") assert.NotNilf(t, err, "Creating model from unknown table should fail") model, err := db.newModel("Test_A") assert.Nilf(t, err, "Creating model from valid table should succeed") assert.IsTypef(t, model, &modelA{}, "model creation should return the appropriate type") } func TestSetUUID(t *testing.T) { var err error a := modelA{} err = modelSetUUID(&a, "foo") assert.Nilf(t, err, "Setting UUID should succeed") assert.Equal(t, "foo", a.UUID) b := modelB{} err = modelSetUUID(&b, "foo") assert.Nilf(t, err, "Setting UUID should succeed") assert.Equal(t, "foo", b.UID) } func TestValidate(t *testing.T) { model, err := NewClientDBModel("TestDB", map[string]Model{ "TestTable": &struct { aUUID string `ovsdb:"_uuid"` aString string `ovsdb:"aString"` aInt int `ovsdb:"aInt"` aFloat float64 `ovsdb:"aFloat"` aSet []string `ovsdb:"aSet"` aMap map[string]string `ovsdb:"aMap"` }{}, }) assert.Nil(t, err) tests := []struct { name string schema []byte err bool }{ { name: "wrong name", schema: []byte(`{ "name": "Wrong" }`), err: true, }, { name: "correct", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "aString": { "type": "string" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: false, }, { name: "extra column should be OK", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "ExtraCol": { "type": "real" } , "aString": { "type": "string" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: false, }, { name: "extra table should be OK", schema: []byte(`{ "name": "TestDB", "tables": { "ExtraTable": { "columns": { "foo": { "type": "real" } } }, "TestTable": { "columns": { "ExtraCol": { "type": "real" } , "aString": { "type": "string" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: false, }, { name: "Less columns should fail", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "aString": { "type": "string" }, "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: true, }, { name: "Wrong simple type should fail", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "aString": { "type": "integer" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "string", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: true, }, { name: "Wrong set type should fail", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "aString": { "type": "string" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "integer", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } } } } } }`), err: true, }, { name: "Wrong map type should fail", schema: []byte(`{ "name": "TestDB", "tables": { "TestTable": { "columns": { "aString": { "type": "string" }, "aInt": { "type": "integer" }, "aFloat": { "type": "real" } , "aSet": { "type": { "key": "integer", "max": "unlimited", "min": 0 } }, "aMap": { "type": { "key": "string", "max": "unlimited", "min": 0, "value": "boolean" } } } } } }`), err: true, }, } for _, tt := range tests { t.Run(fmt.Sprintf("TestValidate %s", tt.name), func(t *testing.T) { var schema ovsdb.DatabaseSchema err := json.Unmarshal(tt.schema, &schema) assert.Nil(t, err) errors := model.validate(schema) if tt.err { assert.Greater(t, len(errors), 0) } else { assert.Len(t, errors, 0) } }) } } type modelC struct { modelB NoClone string } func (a *modelC) CloneModel() Model { return &modelC{ modelB: a.modelB, } } func (a *modelC) CloneModelInto(b Model) { c := b.(*modelC) c.modelB = a.modelB } func (a *modelC) EqualsModel(b Model) bool { c := b.(*modelC) return reflect.DeepEqual(a.modelB, c.modelB) } func TestCloneViaMarshalling(t *testing.T) { a := &modelB{UID: "foo", Foo: "bar", Bar: "baz"} b := Clone(a).(*modelB) assert.Equal(t, a, b) a.UID = "baz" assert.NotEqual(t, a, b) b.UID = "quux" assert.NotEqual(t, a, b) } func TestCloneIntoViaMarshalling(t *testing.T) { a := &modelB{UID: "foo", Foo: "bar", Bar: "baz"} b := &modelB{} CloneInto(a, b) assert.Equal(t, a, b) a.UID = "baz" assert.NotEqual(t, a, b) b.UID = "quux" assert.NotEqual(t, a, b) } func TestCloneViaCloneable(t *testing.T) { a := &modelC{modelB: modelB{UID: "foo", Foo: "bar", Bar: "baz"}, NoClone: "noClone"} func(a interface{}) { _, ok := a.(CloneableModel) assert.True(t, ok, "is not cloneable") }(a) // test that Clone() uses the cloneable interface, in which // case modelC.NoClone won't be copied b := Clone(a).(*modelC) assert.NotEqual(t, a, b) b.NoClone = a.NoClone assert.Equal(t, a, b) a.UID = "baz" assert.NotEqual(t, a, b) b.UID = "quux" assert.NotEqual(t, a, b) } func TestCloneIntoViaCloneable(t *testing.T) { a := &modelC{modelB: modelB{UID: "foo", Foo: "bar", Bar: "baz"}, NoClone: "noClone"} func(a interface{}) { _, ok := a.(CloneableModel) assert.True(t, ok, "is not cloneable") }(a) // test that CloneInto() uses the cloneable interface, in which // case modelC.NoClone won't be copied b := &modelC{} CloneInto(a, b) assert.NotEqual(t, a, b) b.NoClone = a.NoClone assert.Equal(t, a, b) a.UID = "baz" assert.NotEqual(t, a, b) b.UID = "quux" assert.NotEqual(t, a, b) } func TestEqualViaDeepEqual(t *testing.T) { a := &modelB{UID: "foo", Foo: "bar", Bar: "baz"} b := &modelB{UID: "foo", Foo: "bar", Bar: "baz"} assert.True(t, Equal(a, b)) a.UID = "baz" assert.False(t, Equal(a, b)) } func TestEqualViaComparable(t *testing.T) { a := &modelC{modelB: modelB{UID: "foo", Foo: "bar", Bar: "baz"}, NoClone: "noClone"} func(a interface{}) { _, ok := a.(ComparableModel) assert.True(t, ok, "is not comparable") }(a) b := a.CloneModel().(*modelC) // test that Equal() uses the comparable interface, in which // case the difference on modelC.NoClone won't be noticed assert.True(t, Equal(a, b)) a.UID = "baz" assert.False(t, Equal(a, b)) } golang-github-ovn-org-libovsdb-0.7.0/modelgen/000077500000000000000000000000001464501522100212265ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/modelgen/dbmodel.go000066400000000000000000000052631464501522100231710ustar00rootroot00000000000000package modelgen import ( "encoding/json" "sort" "text/template" "github.com/ovn-org/libovsdb/ovsdb" ) // NewDBTemplate returns a new ClientDBModel template. It includes the following // other templates that can be overridden to customize the generated file: // // - `header`: to include a comment as a header before package definition // - `preDBDefinitions`: to include code after package definition // - `postDBDefinitions`: to include code at the end // // It is designed to be used with a map[string] interface and some defined keys // (see GetDBTemplateData) func NewDBTemplate() *template.Template { return template.Must(template.New("").Funcs( template.FuncMap{ "escape": escape, }, ).Parse(` {{- define "header" }} // Code generated by "libovsdb.modelgen" // DO NOT EDIT. {{- end }} {{- define "preDBDefinitions" }} import ( "encoding/json" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) {{- end }} {{ define "postDBDefinitions" }}{{ end }} {{ template "header" . }} package {{ index . "PackageName" }} {{ template "preDBDefinitions" }} // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb func FullDatabaseModel() (model.ClientDBModel, error) { return model.NewClientDBModel("{{ index . "DatabaseName" }}", map[string]model.Model{ {{ range index . "Tables" }} "{{ .TableName }}" : &{{ .StructName }}{}, {{ end }} }) } var schema = {{ index . "Schema" | escape }} func Schema() ovsdb.DatabaseSchema { var s ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &s) if err != nil { panic(err) } return s } {{ template "postDBDefinitions" . }} `)) } // TableInfo represents the information of a table needed by the Model template type TableInfo struct { TableName string StructName string } // GetDBTemplateData returns the map needed to execute the DBTemplate. It has // the following keys: // // - `DatabaseName`: (string) the database name // - `PackageName`: (string) the package name // - `Tables`: []Table list of Tables that form the Model func GetDBTemplateData(pkg string, schema ovsdb.DatabaseSchema) map[string]interface{} { data := map[string]interface{}{} data["DatabaseName"] = schema.Name data["PackageName"] = pkg schemaBytes, _ := json.MarshalIndent(schema, "", " ") data["Schema"] = string(schemaBytes) tables := []TableInfo{} var order sort.StringSlice for tableName := range schema.Tables { order = append(order, tableName) } order.Sort() for _, tableName := range order { tables = append(tables, TableInfo{ TableName: tableName, StructName: StructName(tableName), }) } data["Tables"] = tables return data } func escape(s string) string { return "`" + s + "`" } golang-github-ovn-org-libovsdb-0.7.0/modelgen/dbmodel_test.go000066400000000000000000000056021464501522100242250ustar00rootroot00000000000000package modelgen import ( "encoding/json" "testing" "text/template" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDbModelTemplate(t *testing.T) { rawSchema := []byte(` { "name": "AtomicDB", "version": "0.0.0", "tables": { "atomicTable": { "columns": { "str": { "type": "string" }, "int": { "type": "integer" }, "float": { "type": "real" }, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "event_type": {"type": {"key": {"type": "string", "enum": ["set", ["empty_lb_backends"]]}}} } } } }`) test := []struct { name string extend func(tmpl *template.Template, data map[string]interface{}) expected string err bool formatErr bool }{ { name: "normal", expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test import ( "encoding/json" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb func FullDatabaseModel() (model.ClientDBModel, error) { return model.NewClientDBModel("AtomicDB", map[string]model.Model{ "atomicTable": &AtomicTable{}, }) } ` + ` var schema = ` + "`" + `{ "name": "AtomicDB", "version": "0.0.0", "tables": { "atomicTable": { "columns": { "event_type": { "type": { "key": { "type": "string", "enum": "empty_lb_backends" } } }, "float": { "type": "real" }, "int": { "type": "integer" }, "protocol": { "type": { "key": { "type": "string", "enum": [ "set", [ "tcp", "udp", "sctp" ] ] }, "min": 0, "max": 1 } }, "str": { "type": "string" } } } } }` + "`" + ` func Schema() ovsdb.DatabaseSchema { var s ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &s) if err != nil { panic(err) } return s } `, }, } var schema ovsdb.DatabaseSchema err := json.Unmarshal(rawSchema, &schema) if err != nil { t.Fatal(err) } for _, tt := range test { t.Run(tt.name, func(t *testing.T) { tmpl := NewDBTemplate() data := GetDBTemplateData("test", schema) if tt.err { assert.NotNil(t, err) } else { g, err := NewGenerator() require.NoError(t, err) b, err := g.Format(tmpl, data) if tt.formatErr { assert.NotNil(t, err) } else { require.NoError(t, err) assert.Equal(t, tt.expected, string(b)) } } }) } } golang-github-ovn-org-libovsdb-0.7.0/modelgen/doc.go000066400000000000000000000003551464501522100223250ustar00rootroot00000000000000/* Package modelgen provides core functionality to implement Model code generators based on a schema. It allows to create and customize a text/template that can generate the Model types that libovsdb can work with. */ package modelgen golang-github-ovn-org-libovsdb-0.7.0/modelgen/generator.go000066400000000000000000000027301464501522100235450ustar00rootroot00000000000000package modelgen import ( "bytes" "fmt" "go/format" "io/ioutil" "log" "text/template" ) // Generator is an interface that allows to format code from a template and write it to a file type Generator interface { Generate(string, *template.Template, interface{}) error Format(*template.Template, interface{}) ([]byte, error) } type generator struct { dryRun bool } // Format returns a formatted byte slice by executing the template with the given args func (g *generator) Format(tmpl *template.Template, args interface{}) ([]byte, error) { buffer := bytes.Buffer{} err := tmpl.Execute(&buffer, args) if err != nil { return nil, err } src, err := format.Source(buffer.Bytes()) if err != nil { return nil, err } return src, nil } // Generate generates the code and writes it to specified file path func (g *generator) Generate(filename string, tmpl *template.Template, args interface{}) error { src, err := g.Format(tmpl, args) if err != nil { return err } if g.dryRun { log.Printf("---- Content of file %s ----\n", filename) log.Print(string(src)) fmt.Print("\n") return nil } content, err := ioutil.ReadFile(filename) if err == nil && bytes.Equal(content, src) { return nil } return ioutil.WriteFile(filename, src, 0644) } // NewGenerator returns a new Generator func NewGenerator(opts ...Option) (Generator, error) { options, err := newOptions(opts...) if err != nil { return nil, err } return &generator{ dryRun: options.dryRun, }, nil } golang-github-ovn-org-libovsdb-0.7.0/modelgen/options.go000066400000000000000000000005521464501522100232520ustar00rootroot00000000000000package modelgen type options struct { dryRun bool } type Option func(o *options) error func newOptions(opts ...Option) (*options, error) { o := &options{} for _, opt := range opts { if err := opt(o); err != nil { return nil, err } } return o, nil } func WithDryRun() Option { return func(o *options) error { o.dryRun = true return nil } } golang-github-ovn-org-libovsdb-0.7.0/modelgen/options_test.go000066400000000000000000000023331464501522100243100ustar00rootroot00000000000000package modelgen import ( "reflect" "testing" ) func TestWithDryRun(t *testing.T) { tests := []struct { name string call bool dryRun bool }{ { "call", true, true, }, { "not call", false, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { opts := &options{} if tt.call { fn := WithDryRun() _ = fn(opts) } if got := opts.dryRun; !reflect.DeepEqual(got, tt.dryRun) { t.Errorf("WithDryRun() = %v, want %v", got, tt.dryRun) } }) } } func Test_newOptions(t *testing.T) { type args struct { opts []Option } tests := []struct { name string args args want *options wantErr bool }{ { "With DryRun", args{opts: []Option{WithDryRun()}}, &options{dryRun: true}, false, }, { "Without DryRun", args{opts: []Option{}}, &options{dryRun: false}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := newOptions(tt.args.opts...) if (err != nil) != tt.wantErr { t.Errorf("newOptions() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("newOptions() got = %v, want %v", got, tt.want) } }) } } golang-github-ovn-org-libovsdb-0.7.0/modelgen/table.go000066400000000000000000000336341464501522100226550ustar00rootroot00000000000000package modelgen import ( "fmt" "sort" "strings" "text/template" "golang.org/x/text/cases" "golang.org/x/text/language" "github.com/ovn-org/libovsdb/ovsdb" ) // extendedGenTemplate include additional code generation that is optional, like // deep copy methods. var extendedGenTemplate = ` {{- define "deepCopyExtraFields" }}{{ end }} {{- define "equalExtraFields" }}{{ end }} {{- define "extendedGenImports" }} {{- if index . "WithExtendedGen" }} import "github.com/ovn-org/libovsdb/model" {{- end }} {{- end }} {{- define "extendedGen" }} {{- if index . "WithExtendedGen" }} {{- $tableName := index . "TableName" }} {{- $structName := index . "StructName" }} {{- range $field := index . "Fields" }} {{- $fieldName := FieldName $field.Column }} {{- $type := "" }} {{- if index $ "WithEnumTypes" }} {{- $type = FieldTypeWithEnums $tableName $field.Column $field.Schema }} {{- else }} {{- $type = FieldType $tableName $field.Column $field.Schema }} {{- end }} func (a *{{ $structName }}) Get{{ $fieldName }}() {{ $type }} { return a.{{ $fieldName }} } {{ if or (eq (index $type 0) '*') (eq (slice $type 0 2) "[]") (eq (slice $type 0 3) "map") }} func copy{{ $structName }}{{ $fieldName }}(a {{ $type }}) {{ $type }} { if a == nil { return nil } {{- if eq (index $type 0) '*' }} b := *a return &b {{- else if eq (slice $type 0 2) "[]" }} b := make({{ $type }}, len(a)) copy(b, a) return b {{- else }} b := make({{ $type }}, len(a)) for k, v := range a { b[k] = v } return b {{- end }} } func equal{{ $structName }}{{ $fieldName }}(a, b {{ $type }}) bool { if (a == nil) != (b == nil) { return false } {{- if eq (index $type 0) '*' }} if a == b { return true } return *a == *b {{- else if eq (slice $type 0 2) "[]" }} if len(a) != len(b) { return false } for i, v := range a { if b[i] != v { return false } } return true {{- else }} if len(a) != len(b) { return false } for k, v := range a { if w, ok := b[k]; !ok || v != w { return false } } return true {{- end }} } {{ end }} {{ end }} func (a *{{ $structName }}) DeepCopyInto(b *{{ $structName }}) { *b = *a {{- range $field := index . "Fields" }} {{- $fieldName := FieldName $field.Column }} {{- $type := "" }} {{- if index $ "WithEnumTypes" }} {{- $type = FieldTypeWithEnums $tableName $field.Column $field.Schema }} {{- else }} {{- $type = FieldType $tableName $field.Column $field.Schema }} {{- end }} {{- if or (eq (index $type 0) '*') (eq (slice $type 0 2) "[]") (eq (slice $type 0 3) "map") }} b.{{ $fieldName }} = copy{{ $structName }}{{ $fieldName }}(a.{{ $fieldName }}) {{- end }} {{- end }} {{- template "deepCopyExtraFields" . }} } func (a *{{ $structName }}) DeepCopy() *{{ $structName }} { b := new({{ $structName }}) a.DeepCopyInto(b) return b } func (a *{{ $structName }}) CloneModelInto(b model.Model) { c := b.(*{{ $structName }}) a.DeepCopyInto(c) } func (a *{{ $structName }}) CloneModel() model.Model { return a.DeepCopy() } func (a *{{ $structName }}) Equals(b *{{ $structName }}) bool { {{- range $i, $field := index . "Fields" }} {{- $fieldName := FieldName $field.Column }} {{- $type := "" }} {{- if index $ "WithEnumTypes" }} {{- $type = FieldTypeWithEnums $tableName $field.Column $field.Schema }} {{- else }} {{- $type = FieldType $tableName $field.Column $field.Schema }} {{- end }} {{- if $i }}&& {{ else }}return {{ end }} {{- if or (eq (index $type 0) '*') (eq (slice $type 0 2) "[]") (eq (slice $type 0 3) "map") -}} equal{{ $structName }}{{ $fieldName }}(a.{{ $fieldName }}, b.{{ $fieldName }}) {{- else -}} a.{{ $fieldName }} == b.{{ $fieldName }} {{- end }} {{- end }} {{- template "equalExtraFields" . }} } func (a *{{ $structName }}) EqualsModel(b model.Model) bool { c := b.(*{{ $structName }}) return a.Equals(c) } var _ model.CloneableModel = &{{ $structName }}{} var _ model.ComparableModel = &{{ $structName }}{} {{- end }} {{- end }} ` // NewTableTemplate returns a new table template. It includes the following // other templates that can be overridden to customize the generated file: // // - `header`: override the comment as header before package definition // - `preStructDefinitions`: deprecated in favor of `extraImports` // - `extraImports`: include additional imports // - `structComment`: override the comment generated for the table // - `extraFields`: add extra fields to the table // - `extraTags`: add tags to the extra fields // - `deepCopyExtraFields`: copy extra fields when copying a table // - `equalExtraFields`: compare extra fields when comparing a table // - `postStructDefinitions`: deprecated in favor of `extraDefinitions` // - `extraDefinitions`: include additional definitions like functions etc. // // It is designed to be used with a map[string] interface and some defined keys // (see GetTableTemplateData). In addition, the following functions can be used // within the template: // // - `PrintVal`: prints a field value // - `FieldName`: prints the name of a field based on its column // - `FieldType`: prints the field type based on its column and schema // - `FieldTypeWithEnums`: same as FieldType but with enum type expansion // - `OvsdbTag`: prints the ovsdb tag func NewTableTemplate() *template.Template { return template.Must(template.New("").Funcs( template.FuncMap{ "PrintVal": printVal, "FieldName": FieldName, "FieldType": FieldType, "FieldTypeWithEnums": FieldTypeWithEnums, "OvsdbTag": Tag, }, ).Parse(extendedGenTemplate + ` {{- define "header" }} // Code generated by "libovsdb.modelgen" // DO NOT EDIT. {{- end }} {{ define "extraImports" }}{{ end }} {{ define "preStructDefinitions" }}{{ end }} {{- define "structComment" }} // {{ index . "StructName" }} defines an object in {{ index . "TableName" }} table {{- end }} {{- define "showTableName" }} const {{ index . "StructName" }}Table = "{{ index . "TableName" }}" {{- end }} {{ define "extraTags" }}{{ end }} {{ define "extraFields" }}{{ end }} {{ define "extraDefinitions" }}{{ end }} {{ define "postStructDefinitions" }}{{ end }} {{ template "header" . }} {{ define "enums" }} {{ if index . "WithEnumTypes" }} {{ if index . "Enums" }} type ( {{ range index . "Enums" }} {{ .Alias }} = {{ .Type }} {{- end }} ) var ( {{ range index . "Enums" }} {{- $e := . }} {{- range .Sets }} {{ $e.Alias }}{{ FieldName . }} {{ $e.Alias }} = {{ PrintVal . $e.Type }} {{- end }} {{- end }} ) {{- end }} {{- end }} {{- end }} package {{ index . "PackageName" }} {{ template "extendedGenImports" . }} {{ template "extraImports" . }} {{ template "preStructDefinitions" . }} {{ template "showTableName" . }} {{ template "enums" . }} {{ template "structComment" . }} type {{ index . "StructName" }} struct { {{- $tableName := index . "TableName" }} {{ if index . "WithEnumTypes" }} {{ range $field := index . "Fields" }} {{ FieldName $field.Column }} {{ FieldTypeWithEnums $tableName $field.Column $field.Schema }} ` + "`" + `{{ OvsdbTag $field.Column }}{{ template "extraTags" . }}` + "`" + ` {{ end }} {{ else }} {{ range $field := index . "Fields" }} {{ FieldName $field.Column }} {{ FieldType $tableName $field.Column $field.Schema }} ` + "`" + `{{ OvsdbTag $field.Column }}{{ template "extraTags" . }}` + "`" + ` {{ end }} {{ end }} {{ template "extraFields" . }} } {{ template "postStructDefinitions" . }} {{ template "extraDefinitions" . }} {{ template "extendedGen" . }} `)) } // Enum represents the enum schema type type Enum struct { Type string Alias string Sets []interface{} } // Field represents the field information type Field struct { Column string Schema *ovsdb.ColumnSchema } // TableTemplateData represents the data used by the Table Template type TableTemplateData map[string]interface{} // WithEnumTypes configures whether the Template should expand enum types or not // Enum expansion (true by default) makes the template define an type alias for each enum type // and a const for each possible enum value func (t TableTemplateData) WithEnumTypes(val bool) { t["WithEnumTypes"] = val } // WithExtendedGen configures whether the Template should generate code to deep // copy models. func (t TableTemplateData) WithExtendedGen(val bool) { t["WithExtendedGen"] = val } // GetTableTemplateData returns the TableTemplateData map. It has the following // keys: // // - `TableName`: (string) the table name // - `TPackageName`: (string) the package name // - `TStructName`: (string) the struct name // - `TFields`: []Field a list of Fields that the struct has func GetTableTemplateData(pkg, name string, table *ovsdb.TableSchema) TableTemplateData { data := map[string]interface{}{} data["TableName"] = name data["PackageName"] = pkg data["StructName"] = StructName(name) Fields := []Field{} Enums := []Enum{} // Map iteration order is random, so for predictable generation // lets sort fields by name var order sort.StringSlice for columnName := range table.Columns { order = append(order, columnName) } order.Sort() for _, columnName := range append([]string{"_uuid"}, order...) { columnSchema := table.Column(columnName) Fields = append(Fields, Field{ Column: columnName, Schema: columnSchema, }) if enum := FieldEnum(name, columnName, columnSchema); enum != nil { Enums = append(Enums, *enum) } } data["Fields"] = Fields data["Enums"] = Enums data["WithEnumTypes"] = true data["WithExtendedGen"] = false return data } // FieldName returns the name of a column field func FieldName(column string) string { return camelCase(strings.Trim(column, "_")) } // StructName returns the name of the table struct func StructName(tableName string) string { return cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(tableName, "_", "")) } func fieldType(tableName, columnName string, column *ovsdb.ColumnSchema, enumTypes bool) string { switch column.Type { case ovsdb.TypeEnum: if enumTypes { return enumName(tableName, columnName) } return AtomicType(column.TypeObj.Key.Type) case ovsdb.TypeMap: return fmt.Sprintf("map[%s]%s", AtomicType(column.TypeObj.Key.Type), AtomicType(column.TypeObj.Value.Type)) case ovsdb.TypeSet: // optional with max 1 element if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 { if enumTypes && FieldEnum(tableName, columnName, column) != nil { return fmt.Sprintf("*%s", enumName(tableName, columnName)) } return fmt.Sprintf("*%s", AtomicType(column.TypeObj.Key.Type)) } // required, max 1 element if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 { if enumTypes && FieldEnum(tableName, columnName, column) != nil { return enumName(tableName, columnName) } return AtomicType(column.TypeObj.Key.Type) } // use a slice if enumTypes && FieldEnum(tableName, columnName, column) != nil { return fmt.Sprintf("[]%s", enumName(tableName, columnName)) } return fmt.Sprintf("[]%s", AtomicType(column.TypeObj.Key.Type)) default: return AtomicType(column.Type) } } // EnumName returns the name of the enum field func enumName(tableName, columnName string) string { return cases.Title(language.Und, cases.NoLower).String(StructName(tableName)) + camelCase(columnName) } // FieldType returns the string representation of a column type without enum types expansion func FieldType(tableName, columnName string, column *ovsdb.ColumnSchema) string { return fieldType(tableName, columnName, column, false) } // FieldTypeWithEnums returns the string representation of a column type where Enums // are expanded into their own types func FieldTypeWithEnums(tableName, columnName string, column *ovsdb.ColumnSchema) string { return fieldType(tableName, columnName, column, true) } // FieldEnum returns the Enum if the column is an enum type func FieldEnum(tableName, columnName string, column *ovsdb.ColumnSchema) *Enum { if column.TypeObj == nil || column.TypeObj.Key.Enum == nil { return nil } return &Enum{ Type: column.TypeObj.Key.Type, Alias: enumName(tableName, columnName), Sets: column.TypeObj.Key.Enum, } } // AtomicType returns the string type of an AtomicType func AtomicType(atype string) string { switch atype { case ovsdb.TypeInteger: return "int" case ovsdb.TypeReal: return "float64" case ovsdb.TypeBoolean: return "bool" case ovsdb.TypeString: return "string" case ovsdb.TypeUUID: return "string" } return "" } // Tag returns the Tag string of a column func Tag(column string) string { return fmt.Sprintf("ovsdb:\"%s\"", column) } // FileName returns the filename of a table func FileName(table string) string { return fmt.Sprintf("%s.go", strings.ToLower(table)) } // common initialisms used in ovsdb schemas var initialisms = map[string]bool{ "ACL": true, "BFD": true, "CFM": true, "CT": true, "CVLAN": true, "DNS": true, "DSCP": true, "ID": true, "IP": true, "IPFIX": true, "LACP": true, "LLDP": true, "MAC": true, "MTU": true, "OVS": true, "QOS": true, "RSTP": true, "SSL": true, "STP": true, "TCP": true, "SCTP": true, "UDP": true, "UUID": true, "VLAN": true, "STT": true, "DNAT": true, "SNAT": true, "ICMP": true, "SLB": true, } func camelCase(field string) string { s := strings.ToLower(field) parts := strings.FieldsFunc(s, func(r rune) bool { return r == '_' || r == '-' }) if len(parts) > 1 { s = "" for _, p := range parts { s += cases.Title(language.Und, cases.NoLower).String(expandInitilaisms(p)) } } else { s = cases.Title(language.Und, cases.NoLower).String(expandInitilaisms(s)) } return s } func expandInitilaisms(s string) string { // check initialisms if u := strings.ToUpper(s); initialisms[u] { return strings.ToUpper(s) } // check for plurals too if strings.HasSuffix(s, "s") { sub := s[:len(s)-1] if u := strings.ToUpper(sub); initialisms[u] { return strings.ToUpper(sub) + "s" } } return s } func printVal(v interface{}, t string) string { switch t { case "int": return fmt.Sprintf(`%d`, v) case "float64": return fmt.Sprintf(`%f`, v) case "bool": return fmt.Sprintf(`%t`, v) case "string": return fmt.Sprintf(`"%s"`, v) } return "" } golang-github-ovn-org-libovsdb-0.7.0/modelgen/table_test.go000066400000000000000000000615701464501522100237140ustar00rootroot00000000000000package modelgen import ( "encoding/json" "fmt" "reflect" "testing" "text/template" "github.com/google/uuid" "github.com/ovn-org/libovsdb/example/vswitchd" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewTableTemplate(t *testing.T) { rawSchema := []byte(` { "name": "AtomicDB", "version": "0.0.0", "tables": { "atomicTable": { "columns": { "str": { "type": "string" }, "int": { "type": "integer" }, "float": { "type": "real" }, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "event_type": {"type": {"key": {"type": "string", "enum": ["set", ["empty_lb_backends"]]}}} } } } }`) test := []struct { name string extend func(tmpl *template.Template, data TableTemplateData) expected string err bool formatErr bool }{ { name: "normal", expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test const AtomicTableTable = "atomicTable" type ( AtomicTableEventType = string AtomicTableProtocol = string ) var ( AtomicTableEventTypeEmptyLbBackends AtomicTableEventType = "empty_lb_backends" AtomicTableProtocolTCP AtomicTableProtocol = "tcp" AtomicTableProtocolUDP AtomicTableProtocol = "udp" AtomicTableProtocolSCTP AtomicTableProtocol = "sctp" ) // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType AtomicTableEventType ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *AtomicTableProtocol ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` } `, }, { name: "no enums", extend: func(tmpl *template.Template, data TableTemplateData) { data.WithEnumTypes(false) }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test const AtomicTableTable = "atomicTable" // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType string ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *string ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` } `, }, { name: "add fields using same data", extend: func(tmpl *template.Template, data TableTemplateData) { extra := `{{ define "extraFields" }} {{- $tableName := index . "TableName" }} {{ range $field := index . "Fields" }} Other{{ FieldName $field.Column }} {{ FieldType $tableName $field.Column $field.Schema }} {{ end }} {{- end }}` _, err := tmpl.Parse(extra) if err != nil { panic(err) } }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test const AtomicTableTable = "atomicTable" type ( AtomicTableEventType = string AtomicTableProtocol = string ) var ( AtomicTableEventTypeEmptyLbBackends AtomicTableEventType = "empty_lb_backends" AtomicTableProtocolTCP AtomicTableProtocol = "tcp" AtomicTableProtocolUDP AtomicTableProtocol = "udp" AtomicTableProtocolSCTP AtomicTableProtocol = "sctp" ) // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType AtomicTableEventType ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *AtomicTableProtocol ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` OtherUUID string OtherEventType string OtherFloat float64 OtherInt int OtherProtocol *string OtherStr string } `, }, { name: "with deep copy code", extend: func(tmpl *template.Template, data TableTemplateData) { data.WithExtendedGen(true) }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test import "github.com/ovn-org/libovsdb/model" const AtomicTableTable = "atomicTable" type ( AtomicTableEventType = string AtomicTableProtocol = string ) var ( AtomicTableEventTypeEmptyLbBackends AtomicTableEventType = "empty_lb_backends" AtomicTableProtocolTCP AtomicTableProtocol = "tcp" AtomicTableProtocolUDP AtomicTableProtocol = "udp" AtomicTableProtocolSCTP AtomicTableProtocol = "sctp" ) // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType AtomicTableEventType ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *AtomicTableProtocol ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` } func (a *AtomicTable) GetUUID() string { return a.UUID } func (a *AtomicTable) GetEventType() AtomicTableEventType { return a.EventType } func (a *AtomicTable) GetFloat() float64 { return a.Float } func (a *AtomicTable) GetInt() int { return a.Int } func (a *AtomicTable) GetProtocol() *AtomicTableProtocol { return a.Protocol } func copyAtomicTableProtocol(a *AtomicTableProtocol) *AtomicTableProtocol { if a == nil { return nil } b := *a return &b } func equalAtomicTableProtocol(a, b *AtomicTableProtocol) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *AtomicTable) GetStr() string { return a.Str } func (a *AtomicTable) DeepCopyInto(b *AtomicTable) { *b = *a b.Protocol = copyAtomicTableProtocol(a.Protocol) } func (a *AtomicTable) DeepCopy() *AtomicTable { b := new(AtomicTable) a.DeepCopyInto(b) return b } func (a *AtomicTable) CloneModelInto(b model.Model) { c := b.(*AtomicTable) a.DeepCopyInto(c) } func (a *AtomicTable) CloneModel() model.Model { return a.DeepCopy() } func (a *AtomicTable) Equals(b *AtomicTable) bool { return a.UUID == b.UUID && a.EventType == b.EventType && a.Float == b.Float && a.Int == b.Int && equalAtomicTableProtocol(a.Protocol, b.Protocol) && a.Str == b.Str } func (a *AtomicTable) EqualsModel(b model.Model) bool { c := b.(*AtomicTable) return a.Equals(c) } var _ model.CloneableModel = &AtomicTable{} var _ model.ComparableModel = &AtomicTable{} `, }, { name: "with deep copy code and extra fields", extend: func(tmpl *template.Template, data TableTemplateData) { data.WithExtendedGen(true) extra := `{{ define "extraFields" }} {{- $tableName := index . "TableName" }} {{ range $field := index . "Fields" }} Other{{ FieldName $field.Column }} {{ FieldType $tableName $field.Column $field.Schema }} {{ end }} {{- end }} {{ define "extraImports" }} import "fmt" {{ end }} {{ define "extraDefinitions" }} func copyAtomicTableOtherProtocol(a *AtomicTableProtocol) *AtomicTableProtocol { if a == nil { return nil } b := *a return &b } func equalAtomicTableOtherProtocol(a, b *AtomicTableProtocol) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *AtomicTable) PrintAtomicTableOtherProtocol() bool { fmt.Printf(a.OtherProtocol) } {{ end }} {{ define "deepCopyExtraFields" }} b.OtherProtocol = copyAtomicTableOtherProtocol(a.OtherProtocol) {{- end }} {{ define "equalExtraFields" }} && equalAtomicTableOtherProtocol(a.OtherProtocol, b.OtherProtocol) {{- end }} ` _, err := tmpl.Parse(extra) if err != nil { panic(err) } }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test import "github.com/ovn-org/libovsdb/model" import "fmt" const AtomicTableTable = "atomicTable" type ( AtomicTableEventType = string AtomicTableProtocol = string ) var ( AtomicTableEventTypeEmptyLbBackends AtomicTableEventType = "empty_lb_backends" AtomicTableProtocolTCP AtomicTableProtocol = "tcp" AtomicTableProtocolUDP AtomicTableProtocol = "udp" AtomicTableProtocolSCTP AtomicTableProtocol = "sctp" ) // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType AtomicTableEventType ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *AtomicTableProtocol ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` OtherUUID string OtherEventType string OtherFloat float64 OtherInt int OtherProtocol *string OtherStr string } func copyAtomicTableOtherProtocol(a *AtomicTableProtocol) *AtomicTableProtocol { if a == nil { return nil } b := *a return &b } func equalAtomicTableOtherProtocol(a, b *AtomicTableProtocol) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *AtomicTable) PrintAtomicTableOtherProtocol() bool { fmt.Printf(a.OtherProtocol) } func (a *AtomicTable) GetUUID() string { return a.UUID } func (a *AtomicTable) GetEventType() AtomicTableEventType { return a.EventType } func (a *AtomicTable) GetFloat() float64 { return a.Float } func (a *AtomicTable) GetInt() int { return a.Int } func (a *AtomicTable) GetProtocol() *AtomicTableProtocol { return a.Protocol } func copyAtomicTableProtocol(a *AtomicTableProtocol) *AtomicTableProtocol { if a == nil { return nil } b := *a return &b } func equalAtomicTableProtocol(a, b *AtomicTableProtocol) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *AtomicTable) GetStr() string { return a.Str } func (a *AtomicTable) DeepCopyInto(b *AtomicTable) { *b = *a b.Protocol = copyAtomicTableProtocol(a.Protocol) b.OtherProtocol = copyAtomicTableOtherProtocol(a.OtherProtocol) } func (a *AtomicTable) DeepCopy() *AtomicTable { b := new(AtomicTable) a.DeepCopyInto(b) return b } func (a *AtomicTable) CloneModelInto(b model.Model) { c := b.(*AtomicTable) a.DeepCopyInto(c) } func (a *AtomicTable) CloneModel() model.Model { return a.DeepCopy() } func (a *AtomicTable) Equals(b *AtomicTable) bool { return a.UUID == b.UUID && a.EventType == b.EventType && a.Float == b.Float && a.Int == b.Int && equalAtomicTableProtocol(a.Protocol, b.Protocol) && a.Str == b.Str && equalAtomicTableOtherProtocol(a.OtherProtocol, b.OtherProtocol) } func (a *AtomicTable) EqualsModel(b model.Model) bool { c := b.(*AtomicTable) return a.Equals(c) } var _ model.CloneableModel = &AtomicTable{} var _ model.ComparableModel = &AtomicTable{} `, }, { name: "with deep copy code but no enums", extend: func(tmpl *template.Template, data TableTemplateData) { data.WithExtendedGen(true) data.WithEnumTypes(false) }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test import "github.com/ovn-org/libovsdb/model" const AtomicTableTable = "atomicTable" // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType string ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *string ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` } func (a *AtomicTable) GetUUID() string { return a.UUID } func (a *AtomicTable) GetEventType() string { return a.EventType } func (a *AtomicTable) GetFloat() float64 { return a.Float } func (a *AtomicTable) GetInt() int { return a.Int } func (a *AtomicTable) GetProtocol() *string { return a.Protocol } func copyAtomicTableProtocol(a *string) *string { if a == nil { return nil } b := *a return &b } func equalAtomicTableProtocol(a, b *string) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *AtomicTable) GetStr() string { return a.Str } func (a *AtomicTable) DeepCopyInto(b *AtomicTable) { *b = *a b.Protocol = copyAtomicTableProtocol(a.Protocol) } func (a *AtomicTable) DeepCopy() *AtomicTable { b := new(AtomicTable) a.DeepCopyInto(b) return b } func (a *AtomicTable) CloneModelInto(b model.Model) { c := b.(*AtomicTable) a.DeepCopyInto(c) } func (a *AtomicTable) CloneModel() model.Model { return a.DeepCopy() } func (a *AtomicTable) Equals(b *AtomicTable) bool { return a.UUID == b.UUID && a.EventType == b.EventType && a.Float == b.Float && a.Int == b.Int && equalAtomicTableProtocol(a.Protocol, b.Protocol) && a.Str == b.Str } func (a *AtomicTable) EqualsModel(b model.Model) bool { c := b.(*AtomicTable) return a.Equals(c) } var _ model.CloneableModel = &AtomicTable{} var _ model.ComparableModel = &AtomicTable{} `, }, { name: "add extra functions using extra data", extend: func(tmpl *template.Template, data TableTemplateData) { extra := `{{ define "postStructDefinitions" }} func {{ index . "TestName" }} () string { return "{{ index . "StructName" }}" } {{ end }} ` _, err := tmpl.Parse(extra) if err != nil { panic(err) } data["TestName"] = "TestFunc" }, expected: `// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package test const AtomicTableTable = "atomicTable" type ( AtomicTableEventType = string AtomicTableProtocol = string ) var ( AtomicTableEventTypeEmptyLbBackends AtomicTableEventType = "empty_lb_backends" AtomicTableProtocolTCP AtomicTableProtocol = "tcp" AtomicTableProtocolUDP AtomicTableProtocol = "udp" AtomicTableProtocolSCTP AtomicTableProtocol = "sctp" ) // AtomicTable defines an object in atomicTable table type AtomicTable struct { UUID string ` + "`" + `ovsdb:"_uuid"` + "`" + ` EventType AtomicTableEventType ` + "`" + `ovsdb:"event_type"` + "`" + ` Float float64 ` + "`" + `ovsdb:"float"` + "`" + ` Int int ` + "`" + `ovsdb:"int"` + "`" + ` Protocol *AtomicTableProtocol ` + "`" + `ovsdb:"protocol"` + "`" + ` Str string ` + "`" + `ovsdb:"str"` + "`" + ` } func TestFunc() string { return "AtomicTable" } `, }, { name: "add bad code", formatErr: true, extend: func(tmpl *template.Template, data TableTemplateData) { extra := `{{ define "preStructDefinitions" }} WRONG FORMAT {{ end }} ` _, err := tmpl.Parse(extra) if err != nil { panic(err) } }, }, } var schema ovsdb.DatabaseSchema err := json.Unmarshal(rawSchema, &schema) if err != nil { t.Fatal(err) } for _, tt := range test { t.Run(fmt.Sprintf("Table Test: %s", tt.name), func(t *testing.T) { fakeTable := "atomicTable" tmpl := NewTableTemplate() table := schema.Tables[fakeTable] data := GetTableTemplateData( "test", fakeTable, &table, ) if tt.err { assert.NotNil(t, err) } else { if tt.extend != nil { tt.extend(tmpl, data) } for i := 0; i < 3; i++ { g, err := NewGenerator() require.NoError(t, err) b, err := g.Format(tmpl, data) if tt.formatErr { assert.NotNil(t, err) } else { require.NoError(t, err) assert.Equal(t, tt.expected, string(b)) } } } }) } } func TestFieldName(t *testing.T) { cases := []struct { in string expected string }{ {"foo", "Foo"}, } for _, tt := range cases { if s := FieldName(tt.in); s != tt.expected { t.Fatalf("got %s, wanted %s", s, tt.expected) } } } func TestStructName(t *testing.T) { if s := StructName("Foo_Bar"); s != "FooBar" { t.Fatalf("got %s, wanted FooBar", s) } } func TestFieldType(t *testing.T) { singleValueSet := `{ "type": { "key": { "type": "string" }, "min": 0 } }` singleValueSetSchema := ovsdb.ColumnSchema{} err := json.Unmarshal([]byte(singleValueSet), &singleValueSetSchema) require.NoError(t, err) multipleValueSet := `{ "type": { "key": { "type": "string" }, "min": 0, "max": 2 } }` multipleValueSetSchema := ovsdb.ColumnSchema{} err = json.Unmarshal([]byte(multipleValueSet), &multipleValueSetSchema) require.NoError(t, err) tests := []struct { tableName string columnName string in *ovsdb.ColumnSchema out string }{ {"t1", "c1", &singleValueSetSchema, "*string"}, {"t1", "c2", &multipleValueSetSchema, "[]string"}, } for _, tt := range tests { if got := FieldType(tt.tableName, tt.columnName, tt.in); got != tt.out { t.Errorf("FieldType() = %v, want %v", got, tt.out) } } } func TestAtomicType(t *testing.T) { tests := []struct { name string in string out string }{ {"IntegerToInt", ovsdb.TypeInteger, "int"}, {"RealToFloat", ovsdb.TypeReal, "float64"}, {"BooleanToBool", ovsdb.TypeBoolean, "bool"}, {"StringToString", ovsdb.TypeString, "string"}, {"UUIDToString", ovsdb.TypeUUID, "string"}, {"Invalid", "notAType", ""}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := AtomicType(tt.in); got != tt.out { t.Errorf("got %s, wanted %s", got, tt.out) } }) } } func TestTag(t *testing.T) { if s := Tag("Foo_Bar"); s != "ovsdb:\"Foo_Bar\"" { t.Fatalf("got %s, wanted ovsdb:\"Foo_Bar\"", s) } } func TestFileName(t *testing.T) { if s := FileName("foo"); s != "foo.go" { t.Fatalf("got %s, wanted foo.go", s) } } func TestCamelCase(t *testing.T) { cases := []struct { in string expected string }{ {"foo_bar_baz", "FooBarBaz"}, {"foo-bar-baz", "FooBarBaz"}, {"foos-bars-bazs", "FoosBarsBazs"}, {"ip_port_mappings", "IPPortMappings"}, {"external_ids", "ExternalIDs"}, {"ip_prefix", "IPPrefix"}, {"dns_records", "DNSRecords"}, {"logical_ip", "LogicalIP"}, {"ip", "IP"}, } for _, tt := range cases { if s := camelCase(tt.in); s != tt.expected { t.Fatalf("got %s, wanted %s", s, tt.expected) } } } func ExampleNewTableTemplate() { schemaString := []byte(` { "name": "MyDB", "version": "0.0.0", "tables": { "table1": { "columns": { "string_column": { "type": "string" }, "some_integer": { "type": "integer" } } } } }`) var schema ovsdb.DatabaseSchema err := json.Unmarshal(schemaString, &schema) if err != nil { panic(err) } base := NewTableTemplate() data := GetTableTemplateData("mypackage", "table1", schema.Table("table1")) // Add a function at after the struct definition // It can access the default data values plus any extra field that is added to data _, err = base.Parse(`{{define "postStructDefinitions"}} func (t {{ index . "StructName" }}) {{ index . "FuncName"}}() string { return "bar" }{{end}}`) if err != nil { panic(err) } data["FuncName"] = "TestFunc" gen, err := NewGenerator(WithDryRun()) if err != nil { panic(err) } err = gen.Generate("generated.go", base, data) if err != nil { panic(err) } } func TestExtendedGenCloneableModel(t *testing.T) { a := &vswitchd.Bridge{} func(a interface{}) { _, ok := a.(model.CloneableModel) assert.True(t, ok, "is not cloneable") }(a) } func TestExtendedGenComparableModel(t *testing.T) { a := &vswitchd.Bridge{} func(a interface{}) { _, ok := a.(model.ComparableModel) assert.True(t, ok, "is not comparable") }(a) } func doGenDeepCopy(data model.CloneableModel, b *testing.B) { _ = data.CloneModel() } func doJSONDeepCopy(data model.CloneableModel, b *testing.B) { aBytes, err := json.Marshal(data) if err != nil { b.Fatal(err) } err = json.Unmarshal(aBytes, data) if err != nil { b.Fatal(err) } } func buildRandStr() *string { str := uuid.New().String() return &str } func buildTestBridge() *vswitchd.Bridge { return &vswitchd.Bridge{ UUID: *buildRandStr(), AutoAttach: buildRandStr(), Controller: []string{*buildRandStr(), *buildRandStr()}, DatapathID: buildRandStr(), DatapathType: *buildRandStr(), DatapathVersion: *buildRandStr(), ExternalIDs: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, FailMode: &vswitchd.BridgeFailModeSecure, FloodVLANs: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, FlowTables: map[int]string{1: *buildRandStr(), 2: *buildRandStr()}, IPFIX: buildRandStr(), McastSnoopingEnable: false, Mirrors: []string{*buildRandStr(), *buildRandStr()}, Name: *buildRandStr(), Netflow: buildRandStr(), OtherConfig: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, Ports: []string{*buildRandStr(), *buildRandStr()}, Protocols: []string{*buildRandStr(), *buildRandStr()}, RSTPEnable: true, RSTPStatus: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, Sflow: buildRandStr(), Status: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, STPEnable: false, } } func buildTestInterface() *vswitchd.Interface { aBool := false aInt := 0 return &vswitchd.Interface{ UUID: *buildRandStr(), AdminState: buildRandStr(), BFD: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, BFDStatus: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, CFMFault: &aBool, CFMFaultStatus: []string{*buildRandStr(), *buildRandStr()}, CFMFlapCount: &aInt, CFMHealth: &aInt, CFMMpid: &aInt, CFMRemoteMpids: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, CFMRemoteOpstate: buildRandStr(), Duplex: buildRandStr(), Error: buildRandStr(), ExternalIDs: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, Ifindex: &aInt, IngressPolicingBurst: aInt, IngressPolicingKpktsBurst: aInt, IngressPolicingKpktsRate: aInt, IngressPolicingRate: aInt, LACPCurrent: &aBool, LinkResets: &aInt, LinkSpeed: &aInt, LinkState: buildRandStr(), LLDP: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, MAC: buildRandStr(), MACInUse: buildRandStr(), MTU: &aInt, MTURequest: &aInt, Name: *buildRandStr(), Ofport: &aInt, OfportRequest: &aInt, Options: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, OtherConfig: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, Statistics: map[string]int{*buildRandStr(): 0, *buildRandStr(): 1}, Status: map[string]string{*buildRandStr(): *buildRandStr(), *buildRandStr(): *buildRandStr()}, Type: *buildRandStr(), } } func BenchmarkDeepCopy(b *testing.B) { bridge := buildTestBridge() intf := buildTestInterface() benchmarks := []struct { name string data model.CloneableModel deepCopier func(model.CloneableModel, *testing.B) }{ {"modelgen Bridge", bridge, doGenDeepCopy}, {"json Bridge", bridge, doJSONDeepCopy}, {"modelgen Interface", intf, doGenDeepCopy}, {"json Interface", intf, doJSONDeepCopy}, } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { for i := 0; i < b.N; i++ { bm.deepCopier(bm.data, b) } }) } } func doGenEquals(l, r model.ComparableModel, b *testing.B) { l.EqualsModel(r) } func doDeepEqual(l, r model.ComparableModel, b *testing.B) { reflect.DeepEqual(l, r) } func BenchmarkDeepEqual(b *testing.B) { bridge := buildTestBridge() intf := buildTestInterface() benchmarks := []struct { name string left model.ComparableModel right model.ComparableModel comparator func(model.ComparableModel, model.ComparableModel, *testing.B) }{ {"modelgen Bridge", bridge, bridge.DeepCopy(), doGenEquals}, {"reflect Bridge", bridge, bridge.DeepCopy(), doDeepEqual}, {"modelgen Interface", intf, intf.DeepCopy(), doGenEquals}, {"reflect Interface", intf, intf.DeepCopy(), doDeepEqual}, } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { for i := 0; i < b.N; i++ { bm.comparator(bm.left, bm.right, b) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovs/000077500000000000000000000000001464501522100202435ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/ovs/Dockerfile000066400000000000000000000016471464501522100222450ustar00rootroot00000000000000FROM gcc:10.3.0 as build ARG OVS_VERSION=v2.15.0 ADD https://github.com/openvswitch/ovs/archive/${OVS_VERSION}.tar.gz /src/ WORKDIR /src RUN mkdir -p /src/ovs RUN tar --strip-components=1 -C ovs -xvf ${OVS_VERSION}.tar.gz WORKDIR /src/ovs RUN ./boot.sh RUN ./configure RUN make && make install FROM debian:bullseye-slim COPY --from=build /usr/local/bin/ovs* /usr/local/bin/ COPY --from=build /usr/local/bin/vtep* /usr/local/bin/ COPY --from=build /usr/local/sbin/* /usr/local/sbin/ COPY --from=build /usr/local/etc/openvswitch /usr/local/etc/openvswitch COPY --from=build /usr/local/share/openvswitch /usr/local/share/openvswitch COPY --from=build /usr/local/lib/lib* /usr/local/lib/ RUN apt-get update && apt-get -qy install libatomic1 RUN ovsdb-tool create /usr/local/etc/openvswitch/conf.db /usr/local/share/openvswitch/vswitch.ovsschema RUN mkdir -p /usr/local/var/run/openvswitch ADD start.sh / ENTRYPOINT ["/start.sh"]golang-github-ovn-org-libovsdb-0.7.0/ovs/start.sh000077500000000000000000000016121464501522100217370ustar00rootroot00000000000000#!/bin/sh set -mex ovs_version=$(ovs-vsctl -V | grep ovs-vsctl | awk '{print $4}') ovs_db_version=$(ovsdb-tool schema-version /usr/local/share/openvswitch/vswitch.ovsschema) ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock --remote=ptcp:6640 --pidfile=ovsdb-server.pid & # wait for ovsdb server to start sleep 0.1 # begin configuring ovs-vsctl --no-wait -- init ovs-vsctl --no-wait -- set Open_vSwitch . db-version="${ovs_db_version}" ovs-vsctl --no-wait -- set Open_vSwitch . ovs-version="${ovs_version}" ovs-vsctl --no-wait -- set Open_vSwitch . system-type="docker-ovs" ovs-vsctl --no-wait -- set Open_vSwitch . system-version="0.1" ovs-vsctl --no-wait -- set Open_vSwitch . external-ids:system-id=`cat /proc/sys/kernel/random/uuid` ovs-vsctl --no-wait -- set-manager ptcp:6640 ovs-appctl -t ovsdb-server ovsdb-server/add-remote db:Open_vSwitch,Open_vSwitch,manager_options fg %1golang-github-ovn-org-libovsdb-0.7.0/ovsdb/000077500000000000000000000000001464501522100205515ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/ovsdb/bindings.go000066400000000000000000000317621464501522100227060ustar00rootroot00000000000000package ovsdb import ( "fmt" "reflect" ) var ( intType = reflect.TypeOf(0) realType = reflect.TypeOf(0.0) boolType = reflect.TypeOf(true) strType = reflect.TypeOf("") ) // ErrWrongType describes typing error type ErrWrongType struct { from string expected string got interface{} } func (e *ErrWrongType) Error() string { return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)", e.from, e.expected, e.got, reflect.TypeOf(e.got)) } // NewErrWrongType creates a new ErrWrongType func NewErrWrongType(from, expected string, got interface{}) error { return &ErrWrongType{ from: from, expected: expected, got: got, } } // NativeTypeFromAtomic returns the native type that can hold a value of an // AtomicType func NativeTypeFromAtomic(basicType string) reflect.Type { switch basicType { case TypeInteger: return intType case TypeReal: return realType case TypeBoolean: return boolType case TypeString: return strType case TypeUUID: return strType default: panic("Unknown basic type %s basicType") } } // NativeType returns the reflect.Type that can hold the value of a column // OVS Type to Native Type convertions: // // OVS sets -> go slices or a go native type depending on the key // OVS uuid -> go strings // OVS map -> go map // OVS enum -> go native type depending on the type of the enum key func NativeType(column *ColumnSchema) reflect.Type { switch column.Type { case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString: return NativeTypeFromAtomic(column.Type) case TypeEnum: return NativeTypeFromAtomic(column.TypeObj.Key.Type) case TypeMap: keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type) return reflect.MapOf(keyType, valueType) case TypeSet: keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) // optional type if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 { return reflect.PtrTo(keyType) } // non-optional type with max 1 if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 { return keyType } return reflect.SliceOf(keyType) default: panic(fmt.Errorf("unknown extended type %s", column.Type)) } } // OvsToNativeAtomic returns the native type of the basic ovs type func OvsToNativeAtomic(basicType string, ovsElem interface{}) (interface{}, error) { switch basicType { case TypeReal, TypeString, TypeBoolean: naType := NativeTypeFromAtomic(basicType) if reflect.TypeOf(ovsElem) != naType { return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem) } return ovsElem, nil case TypeInteger: naType := NativeTypeFromAtomic(basicType) // Default decoding of numbers is float64, convert them to int if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) { return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem) } return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil case TypeUUID: uuid, ok := ovsElem.(UUID) if !ok { return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem) } return uuid.GoUUID, nil default: panic(fmt.Errorf("unknown atomic type %s", basicType)) } } func OvsToNativeSlice(baseType string, ovsElem interface{}) (interface{}, error) { naType := NativeTypeFromAtomic(baseType) var nativeSet reflect.Value switch ovsSet := ovsElem.(type) { case OvsSet: nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet)) for _, v := range ovsSet.GoSet { nv, err := OvsToNativeAtomic(baseType, v) if err != nil { return nil, err } nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) } default: nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1) nv, err := OvsToNativeAtomic(baseType, ovsElem) if err != nil { return nil, err } nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) } return nativeSet.Interface(), nil } // OvsToNative transforms an ovs type to native one based on the column type information func OvsToNative(column *ColumnSchema, ovsElem interface{}) (interface{}, error) { switch column.Type { case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID: return OvsToNativeAtomic(column.Type, ovsElem) case TypeEnum: return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) case TypeSet: naType := NativeType(column) // The inner slice is []interface{} // We need to convert it to the real type os slice switch naType.Kind() { case reflect.Ptr: switch ovsSet := ovsElem.(type) { case OvsSet: if len(ovsSet.GoSet) > 1 { return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet)) } if len(ovsSet.GoSet) == 0 { return reflect.Zero(naType).Interface(), nil } native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0]) if err != nil { return nil, err } pv := reflect.New(naType.Elem()) pv.Elem().Set(reflect.ValueOf(native)) return pv.Interface(), nil default: native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) if err != nil { return nil, err } pv := reflect.New(naType.Elem()) pv.Elem().Set(reflect.ValueOf(native)) return pv.Interface(), nil } case reflect.Slice: return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem) default: return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind()) } case TypeMap: naType := NativeType(column) ovsMap, ok := ovsElem.(OvsMap) if !ok { return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem) } // The inner slice is map[interface]interface{} // We need to convert it to the real type os slice nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap)) for k, v := range ovsMap.GoMap { nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k) if err != nil { return nil, err } nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v) if err != nil { return nil, err } nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv)) } return nativeMap.Interface(), nil default: panic(fmt.Sprintf("Unknown Type: %v", column.Type)) } } // NativeToOvsAtomic returns the OVS type of the atomic native value func NativeToOvsAtomic(basicType string, nativeElem interface{}) (interface{}, error) { naType := NativeTypeFromAtomic(basicType) if reflect.TypeOf(nativeElem) != naType { return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem) } switch basicType { case TypeUUID: return UUID{GoUUID: nativeElem.(string)}, nil default: return nativeElem, nil } } // NativeToOvs transforms an native type to a ovs type based on the column type information func NativeToOvs(column *ColumnSchema, rawElem interface{}) (interface{}, error) { naType := NativeType(column) if t := reflect.TypeOf(rawElem); t != naType { return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem) } switch column.Type { case TypeInteger, TypeReal, TypeString, TypeBoolean, TypeEnum: return rawElem, nil case TypeUUID: return UUID{GoUUID: rawElem.(string)}, nil case TypeSet: var ovsSet OvsSet if column.TypeObj.Key.Type == TypeUUID { ovsSlice := []interface{}{} if _, ok := rawElem.([]string); ok { for _, v := range rawElem.([]string) { uuid := UUID{GoUUID: v} ovsSlice = append(ovsSlice, uuid) } } else if _, ok := rawElem.(*string); ok { v := rawElem.(*string) if v != nil { uuid := UUID{GoUUID: *v} ovsSlice = append(ovsSlice, uuid) } } else { return nil, fmt.Errorf("uuid slice was neither []string or *string") } ovsSet = OvsSet{GoSet: ovsSlice} } else { var err error ovsSet, err = NewOvsSet(rawElem) if err != nil { return nil, err } } return ovsSet, nil case TypeMap: nativeMapVal := reflect.ValueOf(rawElem) ovsMap := make(map[interface{}]interface{}, nativeMapVal.Len()) for _, key := range nativeMapVal.MapKeys() { ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface()) if err != nil { return nil, err } ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface()) if err != nil { return nil, err } ovsMap[ovsKey] = ovsVal } return OvsMap{GoMap: ovsMap}, nil default: panic(fmt.Sprintf("Unknown Type: %v", column.Type)) } } // IsDefaultValue checks if a provided native element corresponds to the default value of its // designated column type func IsDefaultValue(column *ColumnSchema, nativeElem interface{}) bool { switch column.Type { case TypeEnum: return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type) default: return isDefaultBaseValue(nativeElem, column.Type) } } // ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType func validateMutationAtomic(atype string, mutator Mutator, value interface{}) error { nType := NativeTypeFromAtomic(atype) if reflect.TypeOf(value) != nType { return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value) } switch atype { case TypeUUID, TypeString, TypeBoolean: return fmt.Errorf("atomictype %s does not support mutation", atype) case TypeReal: switch mutator { case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide: return nil default: return fmt.Errorf("wrong mutator for real type %s", mutator) } case TypeInteger: switch mutator { case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo: return nil default: return fmt.Errorf("wrong mutator for integer type: %s", mutator) } default: panic("Unsupported Atomic Type") } } // ValidateMutation checks if the mutation value and mutator string area appropriate // for a given column based on the rules specified RFC7047 func ValidateMutation(column *ColumnSchema, mutator Mutator, value interface{}) error { if !column.Mutable() { return fmt.Errorf("column is not mutable") } switch column.Type { case TypeSet: switch mutator { case MutateOperationInsert, MutateOperationDelete: // RFC7047 says a may be an with a single // element. Check if we can store this value in our column if reflect.TypeOf(value).Kind() != reflect.Slice { if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) { return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column), NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String()) } return nil } if NativeType(column) != reflect.TypeOf(value) { return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), NativeType(column).String(), value) } return nil default: return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value) } case TypeMap: switch mutator { case MutateOperationInsert: // Value must be a map of the same kind if reflect.TypeOf(value) != NativeType(column) { return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), NativeType(column).String(), value) } return nil case MutateOperationDelete: // Value must be a map of the same kind or a set of keys to delete if reflect.TypeOf(value) != NativeType(column) && reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) { return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), "compatible map type", value) } return nil default: return fmt.Errorf("wrong mutator for map type: %s", mutator) } case TypeEnum: // RFC does not clarify what to do with enums. return fmt.Errorf("enums do not support mutation") default: return validateMutationAtomic(column.Type, mutator, value) } } func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue interface{}) error { if NativeType(column) != reflect.TypeOf(nativeValue) { return NewErrWrongType(fmt.Sprintf("Condition for column %s", column), NativeType(column).String(), nativeValue) } switch column.Type { case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID: switch function { case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes: return nil default: return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type) } case TypeInteger, TypeReal: // All functions are valid return nil default: panic("Unsupported Type") } } func isDefaultBaseValue(elem interface{}, etype ExtendedType) bool { value := reflect.ValueOf(elem) if !value.IsValid() { return true } if reflect.TypeOf(elem).Kind() == reflect.Ptr { return reflect.ValueOf(elem).IsZero() } switch etype { case TypeUUID: return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == "" case TypeMap, TypeSet: if value.Kind() == reflect.Array { return value.Len() == 0 } return value.IsNil() || value.Len() == 0 case TypeString: return elem.(string) == "" case TypeInteger: return elem.(int) == 0 case TypeReal: return elem.(float64) == 0 default: return false } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/bindings_test.go000066400000000000000000000605501464501522100237420ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( aString = "foo" aEnum = "enum1" aEnumSet = []string{"enum1", "enum2", "enum3"} aSet = []string{"a", "set", "of", "strings"} aUUID0 = "2f77b348-9768-4866-b761-89d5177ecda0" aUUID1 = "2f77b348-9768-4866-b761-89d5177ecda1" aUUID2 = "2f77b348-9768-4866-b761-89d5177ecda2" aUUID3 = "2f77b348-9768-4866-b761-89d5177ecda3" aSingleUUIDSet, _ = NewOvsSet(UUID{GoUUID: aUUID0}) aUUIDSet = []string{ aUUID0, aUUID1, aUUID2, aUUID3, } aIntSet = []int{ 3, 2, 42, } aFloat = 42.00 aInt = 42 aFloatSet = []float64{ 3.0, 2.0, 42.0, } aMap = map[string]string{ "key1": "value1", "key2": "value2", "key3": "value3", } aUUIDMap = map[string]string{ "key1": aUUID0, "key2": aUUID1, "key3": aUUID2, } aEmptySet = []string{} ) func TestOvsToNativeAndNativeToOvs(t *testing.T) { s, _ := NewOvsSet(aSet) s1, _ := NewOvsSet([]string{aString}) us := make([]UUID, 0) for _, u := range aUUIDSet { us = append(us, UUID{GoUUID: u}) } uss, _ := NewOvsSet(us) us1 := []UUID{{GoUUID: aUUID0}} uss1, _ := NewOvsSet(us1) is, _ := NewOvsSet(aIntSet) fs, _ := NewOvsSet(aFloatSet) sis, _ := NewOvsSet([]int{aInt}) sfs, _ := NewOvsSet([]float64{aFloat}) es, _ := NewOvsSet(aEmptySet) ens, _ := NewOvsSet(aEnumSet) m, _ := NewOvsMap(aMap) um, _ := NewOvsMap(map[string]UUID{ "key1": {GoUUID: aUUID0}, "key2": {GoUUID: aUUID1}, "key3": {GoUUID: aUUID2}, }) singleStringSet, _ := NewOvsSet([]string{"foo"}) tests := []struct { name string schema []byte input interface{} native interface{} ovs interface{} }{ { name: "String", schema: []byte(`{"type":"string"}`), input: aString, native: aString, ovs: aString, }, { name: "Float", schema: []byte(`{"type":"real"}`), input: aFloat, native: aFloat, ovs: aFloat, }, { name: "Integers with float ovs type", schema: []byte(`{"type":"integer"}`), input: aFloat, native: aInt, ovs: aInt, }, { name: "Integers", schema: []byte(`{"type":"integer"}`), input: aInt, native: aInt, ovs: aInt, }, { name: "Integer set with float ovs type ", schema: []byte(`{"type":"integer", "min":0}`), input: aFloat, native: aInt, ovs: aInt, }, { name: "String Set", schema: []byte(`{"type": { "key": "string", "max": "unlimited", "min": 0 }}`), input: s, native: aSet, ovs: s, }, { // string with exactly one element can also be represented // as the element itself. On ovs2native, we keep the slice representation name: "String Set with exactly one field", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), input: aString, native: []string{aString}, ovs: s1, }, { name: "UUID Set", schema: []byte(`{ "type":{ "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 0, "max": "unlimited" } }`), input: uss, native: aUUIDSet, ovs: uss, }, { name: "UUID Set with exactly one field", schema: []byte(`{ "type":{ "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 0, "max": "unlimited" } }`), input: UUID{GoUUID: aUUID0}, native: []string{aUUID0}, ovs: uss1, }, { name: "Integer Set", schema: []byte(`{ "type":{ "key": { "type": "integer" }, "min": 0, "max": "unlimited" } }`), input: is, native: aIntSet, ovs: is, }, { name: "Integer Set single with float ovs input", schema: []byte(`{ "type":{ "key": { "type": "integer" }, "min": 0, "max": "unlimited" } }`), input: fs, native: aIntSet, ovs: is, }, { // A single-value integer set with integer ovs input name: "Integer Set single", schema: []byte(`{ "type":{ "key": { "type": "integer" }, "min": 0, "max": "unlimited" } }`), input: sis, native: []int{aInt}, ovs: sis, }, { // A single-value integer set with float ovs input name: "Integer Set single", schema: []byte(`{ "type":{ "key": { "type": "integer" }, "min": 0, "max": "unlimited" } }`), input: sfs, native: []int{aInt}, ovs: sis, }, { // A float set name: "Float Set", schema: []byte(`{ "type":{ "key": { "type": "real" }, "min": 0, "max": "unlimited" } }`), input: fs, native: aFloatSet, ovs: fs, }, { // A empty string set name: "Empty String Set", schema: []byte(`{ "type":{ "key": { "type": "string" }, "min": 0, "max": "unlimited" } }`), input: es, native: aEmptySet, ovs: es, }, { // Enum name: "Enum (string)", schema: []byte(`{ "type":{ "key": { "enum": [ "set", [ "enum1", "enum2", "enum3" ] ], "type": "string" } } }`), input: aEnum, native: aEnum, ovs: aEnum, }, { // Enum set name: "Enum Set (string)", schema: []byte(`{ "type":{ "key": { "enum": [ "set", [ "enum1", "enum2", "enum3" ] ], "type": "string" }, "max": "unlimited", "min": 0 } }`), input: ens, native: aEnumSet, ovs: ens, }, { name: "Map (string->string)", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } }`), input: m, native: aMap, ovs: m, }, { name: "Map (string->uuid)", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0, "value": "uuid" } }`), input: um, native: aUUIDMap, ovs: um, }, { name: "String set with min 0 max 1", schema: []byte(`{ "type":{ "key": { "type": "string" }, "min": 0, "max": 1 } }`), input: singleStringSet, native: &aString, ovs: singleStringSet, }, { name: "UUID set with min 0 max 1", schema: []byte(`{ "type":{ "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 0, "max": 1 } }`), input: aSingleUUIDSet, native: &aUUID0, ovs: aSingleUUIDSet, }, { name: "null UUID set with min 0 max 1", schema: []byte(`{ "type":{ "key": { "refTable": "SomeOtherTAble", "refType": "weak", "type": "uuid" }, "min": 0, "max": 1 } }`), input: es, native: (*string)(nil), ovs: es, }, { name: "A string with min 0 max 1", schema: []byte(`{ "type":{ "key": { "type": "string" }, "min": 0, "max": 1 } }`), input: aString, native: &aString, ovs: singleStringSet, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var column ColumnSchema err := json.Unmarshal(tt.schema, &column) require.NoError(t, err) native, err := OvsToNative(&column, tt.input) require.NoError(t, err) require.Equalf(t, tt.native, native, "fail to convert ovs2native. input: %v(%s). expected %v(%s). got %v (%s)", tt.input, reflect.TypeOf(tt.input), tt.native, reflect.TypeOf(tt.native), native, reflect.TypeOf(native), ) ovs, err := NativeToOvs(&column, native) require.NoErrorf(t, err, "failed to convert %s: %s", tt, err) assert.Equalf(t, tt.ovs, ovs, "fail to convert native2ovs. native: %v(%s). expected %v(%s). got %v (%s)", native, reflect.TypeOf(native), tt.ovs, reflect.TypeOf(tt.ovs), ovs, reflect.TypeOf(ovs), ) }) } } func TestOvsToNativeErr(t *testing.T) { as, _ := NewOvsSet([]string{"foo"}) s, _ := NewOvsMap(map[string]string{"foo": "bar"}) m, _ := NewOvsMap(map[int]string{1: "one", 2: "two"}) tests := []struct { name string schema []byte input interface{} }{ { name: "Wrong Atomic Type", schema: []byte(`{"type":"string"}`), input: 42, }, { name: "Wrong Atomic Numeric Type: Float", schema: []byte(`{"type":"real"}`), input: 42, }, { name: "Set instead of Atomic Type", schema: []byte(`{"type":"string"}`), input: as, }, { name: "Wrong Set Type", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), input: []int{1, 2}, }, { name: "Wrong Map instead of Set", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), input: s, }, { name: "Wrong Map key type", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } }`), input: m, }, } for _, tt := range tests { t.Run(fmt.Sprintf(tt.name), func(t *testing.T) { var column ColumnSchema err := json.Unmarshal(tt.schema, &column) require.NoError(t, err) res, err := OvsToNative(&column, tt.input) assert.Errorf(t, err, "conversion %+v should have failed, instead it has returned %v (%s)", tt, res, reflect.TypeOf(res), ) }) } } func TestNativeToOvsErr(t *testing.T) { tests := []struct { name string schema []byte input interface{} }{ { name: "Wrong Atomic Type", schema: []byte(`{"type":"string"}`), input: 42, }, { // OVS floats should be convertible to integers since encoding/json will use float64 as // the default numeric type. However, native types should match name: "Wrong Atomic Numeric Type: Int", schema: []byte(`{"type":"integer"}`), input: 42.0, }, { name: "Wrong Atomic Numeric Type: Float", schema: []byte(`{"type":"real"}`), input: 42, }, { name: "Set instead of Atomic Type", schema: []byte(`{"type":"string"}`), input: []string{"foo"}, }, { name: "Wrong Set Type", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), input: []int{1, 2}, }, { name: "Wrong Map instead of Set", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), input: map[string]string{"foo": "bar"}, }, { name: "Wrong Map key type", schema: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0, "value": "string" } }`), input: map[int]string{1: "one", 2: "two"}, }, } for _, tt := range tests { t.Run(fmt.Sprintf(tt.name), func(t *testing.T) { var column ColumnSchema if err := json.Unmarshal(tt.schema, &column); err != nil { t.Fatal(err) } res, err := NativeToOvs(&column, tt.input) if err == nil { t.Errorf("conversion %s should have failed, instead it has returned %v (%s)", tt, res, reflect.TypeOf(res)) t.Logf("Conversion schema %v", string(tt.schema)) } }) } } func TestIsDefault(t *testing.T) { type Test struct { name string column []byte elem interface{} expected bool } tests := []Test{ { name: "empty string", column: []byte(`{"type":"string"}`), elem: "", expected: true, }, { name: "non string", column: []byte(`{"type":"string"}`), elem: "something", expected: false, }, { name: "empty uuid", column: []byte(`{"type":"uuid"}`), elem: "", expected: true, }, { name: "default uuid", column: []byte(`{"type":"uuid"}`), elem: "00000000-0000-0000-0000-000000000000", expected: true, }, { name: "non-empty uuid", column: []byte(`{"type":"uuid"}`), elem: aUUID0, expected: false, }, { name: "zero int", column: []byte(`{"type":"integer"}`), elem: 0, expected: true, }, { name: "non-zero int", column: []byte(`{"type":"integer"}`), elem: 42, expected: false, }, { name: "non-zero float", column: []byte(`{"type":"real"}`), elem: 42.0, expected: false, }, { name: "zero float", column: []byte(`{"type":"real"}`), elem: 0.0, expected: true, }, { name: "empty set ", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), elem: []string{}, expected: true, }, { name: "empty set allocated", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), elem: make([]string, 0, 10), expected: true, }, { name: "non-empty set", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), elem: []string{"something"}, expected: false, }, { name: "empty map allocated", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), elem: make(map[string]string), expected: true, }, { name: "nil map", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), elem: nil, expected: true, }, { name: "empty map", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), elem: map[string]string{}, expected: true, }, { name: "non-empty map", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), elem: map[string]string{"foo": "bar"}, expected: false, }, { name: "empty enum", column: []byte(`{ "type":{ "key": { "enum": [ "set", [ "enum1", "enum2", "enum3" ] ], "type": "string" } } }`), elem: "", expected: true, }, { name: "non-empty enum", column: []byte(`{ "type":{ "key": { "enum": [ "set", [ "enum1", "enum2", "enum3" ] ], "type": "string" } } }`), elem: "enum1", expected: false, }, } for _, test := range tests { t.Run(fmt.Sprintf("IsDefault: %s", test.name), func(t *testing.T) { var column ColumnSchema if err := json.Unmarshal(test.column, &column); err != nil { t.Fatal(err) } result := IsDefaultValue(&column, test.elem) if result != test.expected { t.Errorf("failed to determine if %v is default. expected %t got %t", test, test.expected, result) t.Logf("Conversion schema %v", string(test.column)) } }) } } func TestMutationValidation(t *testing.T) { type Test struct { name string column []byte mutators []Mutator value interface{} valid bool } tests := []Test{ { name: "string", column: []byte(`{"type":"string"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: "foo", valid: false, }, { name: "string", column: []byte(`{"type":"uuid"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: "foo", valid: false, }, { name: "boolean", column: []byte(`{"type":"boolean"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: true, valid: false, }, { name: "integer", column: []byte(`{"type":"integer"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: 4, valid: true, }, { name: "unmutable", column: []byte(`{"type":"integer", "mutable": false}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: 4, valid: false, }, { name: "integer wrong mutator", column: []byte(`{"type":"integer"}`), mutators: []Mutator{"some", "wrong", "mutator"}, value: 4, valid: false, }, { name: "integer wrong type", column: []byte(`{"type":"integer"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: "foo", valid: false, }, { name: "real", column: []byte(`{"type":"real"}`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide}, value: 4.0, valid: true, }, { name: "real-%/", column: []byte(`{"type":"real"}`), mutators: []Mutator{MutateOperationModulo}, value: 4.0, valid: false, }, { name: "integer set", column: []byte(`{ "type": { "key": "integer", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo}, value: 4, valid: true, }, { name: "float set /", column: []byte(`{ "type": { "key": "real", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide}, value: 4.0, valid: true, }, { name: "string set wrong mutator", column: []byte(`{ "type": { "key": "real", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationAdd, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply}, value: "foo", valid: false, }, { name: "string set insert single string", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationInsert}, value: "foo", valid: true, }, { name: "string set insert single int", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationInsert}, value: 42, valid: false, }, { name: "string set insert/delete", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationInsert, MutateOperationDelete}, value: []string{"foo", "bar"}, valid: true, }, { name: "integer set insert/delete", column: []byte(`{ "type": { "key": "integer", "max": "unlimited", "min": 0 } }`), mutators: []Mutator{MutateOperationInsert, MutateOperationDelete}, value: []int{45, 11}, valid: true, }, { name: "map insert, wrong type", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), mutators: []Mutator{MutateOperationInsert}, value: []string{"foo"}, valid: false, }, { name: "map insert", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), mutators: []Mutator{MutateOperationInsert}, value: map[string]string{"foo": "bar"}, valid: true, }, { name: "map delete k-v", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), mutators: []Mutator{MutateOperationDelete}, value: map[string]string{"foo": "bar"}, valid: true, }, { name: "map delete k set", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), mutators: []Mutator{MutateOperationDelete}, value: []string{"foo", "bar"}, valid: true, }, } for _, test := range tests { t.Run(fmt.Sprintf("MutationValidation: %s", test.name), func(t *testing.T) { var column ColumnSchema if err := json.Unmarshal(test.column, &column); err != nil { t.Fatal(err) } for _, m := range test.mutators { result := ValidateMutation(&column, m, test.value) if test.valid { assert.Nil(t, result) } else { assert.NotNil(t, result) } } }) } } func TestConditionValidation(t *testing.T) { type Test struct { name string column []byte functions []ConditionFunction value interface{} valid bool } tests := []Test{ { name: "string", column: []byte(`{"type":"string"}`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: "foo", valid: true, }, { name: "uuid", column: []byte(`{"type":"uuid"}`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: "foo", valid: true, }, { name: "string wrong type", column: []byte(`{"type":"string"}`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: 42, valid: false, }, { name: "numeric", column: []byte(`{"type":"integer"}`), functions: []ConditionFunction{ConditionGreaterThanOrEqual, ConditionGreaterThan, ConditionLessThan, ConditionLessThanOrEqual, ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: 1000, valid: true, }, { name: "numeric wrong type", column: []byte(`{"type":"integer"}`), functions: []ConditionFunction{ConditionGreaterThanOrEqual, ConditionGreaterThan, ConditionLessThan, ConditionLessThanOrEqual, ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: "foo", valid: false, }, { name: "set", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: []string{"foo", "bar"}, valid: true, }, { name: "set wrong type", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: 32, valid: false, }, { name: "set wrong type2", column: []byte(`{ "type": { "key": "string", "max": "unlimited", "min": 0 } }`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: "foo", valid: false, }, { name: "map", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: map[string]string{"foo": "bar"}, valid: true, }, { name: "map wrong type", column: []byte(`{ "type": { "key": "string", "value": "string" } }`), functions: []ConditionFunction{ConditionEqual, ConditionIncludes, ConditionNotEqual, ConditionExcludes}, value: map[string]int{"foo": 42}, valid: false, }, } for _, test := range tests { t.Run(fmt.Sprintf("ConditionValidation: %s", test.name), func(t *testing.T) { var column ColumnSchema err := json.Unmarshal(test.column, &column) assert.Nil(t, err) for _, f := range test.functions { result := ValidateCondition(&column, f, test.value) if test.valid { assert.Nil(t, result) } else { assert.NotNil(t, result) } } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/condition.go000066400000000000000000000136711464501522100230760ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "reflect" ) type ConditionFunction string type WaitCondition string const ( // ConditionLessThan is the less than condition ConditionLessThan ConditionFunction = "<" // ConditionLessThanOrEqual is the less than or equal condition ConditionLessThanOrEqual ConditionFunction = "<=" // ConditionEqual is the equal condition ConditionEqual ConditionFunction = "==" // ConditionNotEqual is the not equal condition ConditionNotEqual ConditionFunction = "!=" // ConditionGreaterThan is the greater than condition ConditionGreaterThan ConditionFunction = ">" // ConditionGreaterThanOrEqual is the greater than or equal condition ConditionGreaterThanOrEqual ConditionFunction = ">=" // ConditionIncludes is the includes condition ConditionIncludes ConditionFunction = "includes" // ConditionExcludes is the excludes condition ConditionExcludes ConditionFunction = "excludes" // WaitConditionEqual is the equal condition WaitConditionEqual WaitCondition = "==" // WaitConditionNotEqual is the not equal condition WaitConditionNotEqual WaitCondition = "!=" ) // Condition is described in RFC 7047: 5.1 type Condition struct { Column string Function ConditionFunction Value interface{} } func (c Condition) String() string { return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value) } // NewCondition returns a new condition func NewCondition(column string, function ConditionFunction, value interface{}) Condition { return Condition{ Column: column, Function: function, Value: value, } } // MarshalJSON marshals a condition to a 3 element JSON array func (c Condition) MarshalJSON() ([]byte, error) { v := []interface{}{c.Column, c.Function, c.Value} return json.Marshal(v) } // UnmarshalJSON converts a 3 element JSON array to a Condition func (c *Condition) UnmarshalJSON(b []byte) error { var v []interface{} err := json.Unmarshal(b, &v) if err != nil { return err } if len(v) != 3 { return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) } c.Column = v[0].(string) function := ConditionFunction(v[1].(string)) switch function { case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes, ConditionGreaterThan, ConditionGreaterThanOrEqual, ConditionLessThan, ConditionLessThanOrEqual: c.Function = function default: return fmt.Errorf("%s is not a valid function", function) } vv, err := ovsSliceToGoNotation(v[2]) if err != nil { return err } c.Value = vv return nil } // Evaluate will evaluate the condition on the two provided values // The conditions operately differently depending on the type of // the provided values. The behavior is as described in RFC7047 func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) { x := reflect.ValueOf(a) y := reflect.ValueOf(b) if x.Kind() != y.Kind() { return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind()) } switch c { case ConditionEqual: return reflect.DeepEqual(a, b), nil case ConditionNotEqual: return !reflect.DeepEqual(a, b), nil case ConditionIncludes: switch x.Kind() { case reflect.Slice: return sliceContains(x, y), nil case reflect.Map: return mapContains(x, y), nil case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: return reflect.DeepEqual(a, b), nil default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } case ConditionExcludes: switch x.Kind() { case reflect.Slice: return !sliceContains(x, y), nil case reflect.Map: return !mapContains(x, y), nil case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: return !reflect.DeepEqual(a, b), nil default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } case ConditionGreaterThan: switch x.Kind() { case reflect.Int: return x.Int() > y.Int(), nil case reflect.Float64: return x.Float() > y.Float(), nil case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } case ConditionGreaterThanOrEqual: switch x.Kind() { case reflect.Int: return x.Int() >= y.Int(), nil case reflect.Float64: return x.Float() >= y.Float(), nil case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } case ConditionLessThan: switch x.Kind() { case reflect.Int: return x.Int() < y.Int(), nil case reflect.Float64: return x.Float() < y.Float(), nil case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } case ConditionLessThanOrEqual: switch x.Kind() { case reflect.Int: return x.Int() <= y.Int(), nil case reflect.Float64: return x.Float() <= y.Float(), nil case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: default: return false, fmt.Errorf("condition not supported on %s", x.Kind()) } default: return false, fmt.Errorf("unsupported condition function %s", c) } // we should never get here return false, fmt.Errorf("unreachable condition") } func sliceContains(x, y reflect.Value) bool { for i := 0; i < y.Len(); i++ { found := false vy := y.Index(i) for j := 0; j < x.Len(); j++ { vx := x.Index(j) if vy.Kind() == reflect.Interface { if vy.Elem() == vx.Elem() { found = true break } } else { if vy.Interface() == vx.Interface() { found = true break } } } if !found { return false } } return true } func mapContains(x, y reflect.Value) bool { iter := y.MapRange() for iter.Next() { k := iter.Key() v := iter.Value() vx := x.MapIndex(k) if !vx.IsValid() { return false } if v.Kind() != reflect.Interface { if v.Interface() != vx.Interface() { return false } } else { if v.Elem() != vx.Elem() { return false } } } return true } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/condition_test.go000066400000000000000000000270541464501522100241350ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "reflect" "testing" "github.com/stretchr/testify/assert" ) func TestConditionMarshalUnmarshalJSON(t *testing.T) { tests := []struct { name string condition Condition want string wantErr bool }{ { "test <", Condition{"foo", ConditionLessThan, "bar"}, `[ "foo", "<", "bar" ]`, false, }, { "test <=", Condition{"foo", ConditionLessThanOrEqual, "bar"}, `[ "foo", "<=", "bar" ]`, false, }, { "test >", Condition{"foo", ConditionGreaterThan, "bar"}, `[ "foo", ">", "bar" ]`, false, }, { "test >=", Condition{"foo", ConditionGreaterThanOrEqual, "bar"}, `[ "foo", ">=", "bar" ]`, false, }, { "test ==", Condition{"foo", ConditionEqual, "bar"}, `[ "foo", "==", "bar" ]`, false, }, { "test !=", Condition{"foo", ConditionNotEqual, "bar"}, `[ "foo", "!=", "bar" ]`, false, }, { "test includes", Condition{"foo", ConditionIncludes, "bar"}, `[ "foo", "includes", "bar" ]`, false, }, { "test excludes", Condition{"foo", ConditionExcludes, "bar"}, `[ "foo", "excludes", "bar" ]`, false, }, { "test uuid", Condition{"foo", ConditionExcludes, UUID{GoUUID: "foo"}}, `[ "foo", "excludes", ["named-uuid", "foo"] ]`, false, }, { "test set", Condition{"foo", ConditionExcludes, OvsSet{GoSet: []interface{}{"foo", "bar", "baz"}}}, `[ "foo", "excludes", ["set",["foo", "bar", "baz"]] ]`, false, }, { "test map", Condition{"foo", ConditionExcludes, OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar", "baz": "quux"}}}, `[ "foo", "excludes", ["map",[["foo", "bar"], ["baz", "quux"]]]]`, false, }, { "test uuid set", Condition{"foo", ConditionExcludes, OvsSet{GoSet: []interface{}{UUID{GoUUID: "foo"}, UUID{GoUUID: "bar"}}}}, `[ "foo", "excludes", ["set",[["named-uuid", "foo"], ["named-uuid", "bar"]]] ]`, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := json.Marshal(tt.condition) if err != nil { t.Fatal(err) } // testing JSON equality is flaky for ovsdb notated maps // it's safe to skip this as we test from json->object later if tt.name != "test map" { assert.JSONEq(t, tt.want, string(got)) } var c Condition if err := json.Unmarshal(got, &c); err != nil { t.Fatal(err) } assert.Equal(t, tt.condition.Column, c.Column) assert.Equal(t, tt.condition.Function, c.Function) v := reflect.TypeOf(tt.condition.Value) vv := reflect.ValueOf(c.Value) if !vv.IsValid() { t.Fatalf("c.Value is empty: %v", c.Value) } assert.Equal(t, v, vv.Type()) assert.Equal(t, tt.condition.Value, vv.Convert(v).Interface()) if vv.Kind() == reflect.String { assert.Equal(t, tt.condition.Value, vv.String()) } }) } } func TestCondition_UnmarshalJSON(t *testing.T) { type fields struct { Column string Function ConditionFunction Value interface{} } type args struct { b []byte } tests := []struct { name string fields fields args args wantErr bool }{ { "success", fields{"foo", ConditionEqual, "bar"}, args{[]byte(`[ "foo", "==", "bar" ]`)}, false, }, { "bad function", fields{}, args{[]byte(`[ "foo", "baz", "bar" ]`)}, true, }, { "too many elements", fields{}, args{[]byte(`[ "foo", "bar", "baz", "quuz" ]`)}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := Condition{ Column: tt.fields.Column, Function: tt.fields.Function, Value: tt.fields.Value, } if err := c.UnmarshalJSON(tt.args.b); (err != nil) != tt.wantErr { t.Errorf("Condition.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestConditionFunctionEvaluate(t *testing.T) { tests := []struct { name string c ConditionFunction a interface{} b interface{} want bool wantErr bool }{ { "equal string true", ConditionEqual, "foo", "foo", true, false, }, { "equal string false", ConditionEqual, "foo", "bar", false, false, }, { "equal int true", ConditionEqual, 1024, 1024, true, false, }, { "equal int false", ConditionEqual, 1024, 2048, false, false, }, { "equal real true", ConditionEqual, float64(42.0), float64(42.0), true, false, }, { "equal real false", ConditionEqual, float64(42.0), float64(420.0), false, false, }, { "equal map true", ConditionEqual, map[string]string{"foo": "bar"}, map[string]string{"foo": "bar"}, true, false, }, { "equal map false", ConditionEqual, map[string]string{"foo": "bar"}, map[string]string{"bar": "baz"}, false, false, }, { "equal slice true", ConditionEqual, []string{"foo", "bar"}, []string{"foo", "bar"}, true, false, }, { "equal slice false", ConditionEqual, []string{"foo", "bar"}, []string{"foo", "baz"}, false, false, }, { "notequal string true", ConditionNotEqual, "foo", "bar", true, false, }, { "notequal string false", ConditionNotEqual, "foo", "foo", false, false, }, { "notequal int true", ConditionNotEqual, 1024, 2048, true, false, }, { "notequal int false", ConditionNotEqual, 1024, 1024, false, false, }, { "notequal real true", ConditionNotEqual, float64(42.0), float64(24.0), true, false, }, { "notequal real false", ConditionNotEqual, float64(42.0), float64(42.0), false, false, }, { "notequal map true", ConditionNotEqual, map[string]string{"foo": "bar"}, map[string]string{"bar": "baz"}, true, false, }, { "notequal map false", ConditionNotEqual, map[string]string{"foo": "bar"}, map[string]string{"foo": "bar"}, false, false, }, { "notequal slice true", ConditionNotEqual, []string{"foo", "bar"}, []string{"foo", "baz"}, true, false, }, { "notequal slice false", ConditionNotEqual, []string{"foo", "bar"}, []string{"foo", "bar"}, false, false, }, { "includes string true", ConditionIncludes, "foo", "foo", true, false, }, { "includes string false", ConditionIncludes, "foo", "bar", false, false, }, { "incldes int true", ConditionIncludes, 1024, 1024, true, false, }, { "includes int false", ConditionIncludes, 1024, 2048, false, false, }, { "includes real true", ConditionIncludes, float64(42.0), float64(42.0), true, false, }, { "includes real false", ConditionIncludes, float64(42.0), float64(420.0), false, false, }, { "includes map true", ConditionIncludes, map[interface{}]interface{}{1: "bar", "bar": "baz", "baz": "quux"}, map[interface{}]interface{}{1: "bar"}, true, false, }, { "includes map false", ConditionIncludes, map[string]string{"foo": "bar", "bar": "baz", "baz": "quux"}, map[string]string{"quux": "foobar"}, false, false, }, { "includes slice true", ConditionIncludes, []string{"foo", "bar", "baz", "quux"}, []string{"foo", "bar"}, true, false, }, { "includes slice false", ConditionIncludes, []string{"foo", "bar", "baz", "quux"}, []string{"foobar", "quux"}, false, false, }, { "excludes string true", ConditionExcludes, "foo", "bar", true, false, }, { "excludes string false", ConditionExcludes, "foo", "foo", false, false, }, { "excludes int true", ConditionExcludes, 1024, 2048, true, false, }, { "excludes int false", ConditionExcludes, 1024, 1024, false, false, }, { "excludes real true", ConditionExcludes, float64(42.0), float64(24.0), true, false, }, { "excludes real false", ConditionExcludes, float64(42.0), float64(42.0), false, false, }, { "excludes map true", ConditionExcludes, map[interface{}]interface{}{1: "bar", "bar": "baz", "baz": "quux"}, map[interface{}]interface{}{1: "foo"}, true, false, }, { "excludes map false", ConditionExcludes, map[string]string{"foo": "bar", "bar": "baz", "baz": "quux"}, map[string]string{"foo": "bar"}, false, false, }, { "excludes slice true", ConditionExcludes, []string{"foo", "bar", "baz", "quux"}, []string{"foobar"}, true, false, }, { "excludes slice false", ConditionExcludes, []string{"foobar", "bar", "baz", "quux"}, []string{"foobar", "quux"}, false, false, }, { "lt unsuported", ConditionLessThan, "foo", "foo", false, true, }, { "lteq unsupported", ConditionLessThanOrEqual, []string{"foo"}, []string{"foo"}, false, true, }, { "gt unsupported", ConditionGreaterThan, map[string]string{"foo": "foo"}, map[string]string{"foo": "foo"}, false, true, }, { "gteq unsupported", ConditionGreaterThanOrEqual, true, true, false, true, }, { "lt true", ConditionLessThan, 0, 42, true, false, }, { "lteq true", ConditionLessThanOrEqual, 42, 42, true, false, }, { "gt true", ConditionGreaterThan, float64(420.0), float64(42.0), true, false, }, { "gteq true", ConditionGreaterThanOrEqual, float64(420.00), float64(419.99), true, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.c.Evaluate(tt.a, tt.b) if (err != nil) != tt.wantErr { t.Errorf("ConditionFunction.Evaluate() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { t.Errorf("ConditionFunction.Evaluate() = %v, want %v", got, tt.want) } }) } } func TestSliceContains(t *testing.T) { tests := []struct { name string a interface{} b interface{} want bool }{ { "string slice", []string{"foo", "bar", "baz"}, []string{"foo", "bar"}, true, }, { "int slice", []int{1, 2, 3}, []int{2, 3}, true, }, { "real slice", []float64{42.0, 42.0, 24.0}, []float64{42.0, 24.0}, true, }, { "interface slice", []interface{}{1, "bar", "baz"}, []interface{}{1, "bar"}, true, }, { "no match", []interface{}{1, "bar", "baz"}, []interface{}{2, "bar"}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { x := reflect.ValueOf(tt.a) y := reflect.ValueOf(tt.b) if got := sliceContains(x, y); got != tt.want { t.Errorf("compareSlice() = %v, want %v", got, tt.want) } }) } } func TestMapContains(t *testing.T) { tests := []struct { name string a interface{} b interface{} want bool }{ { "string map", map[string]string{"foo": "bar", "bar": "baz"}, map[string]string{"foo": "bar"}, true, }, { "int keys", map[int]string{1: "bar", 2: "baz"}, map[int]string{1: "bar"}, true, }, { "interface keys", map[interface{}]interface{}{1: 1024, 2: "baz"}, map[interface{}]interface{}{2: "baz"}, true, }, { "no key match", map[string]string{"foo": "bar", "bar": "baz"}, map[string]string{"quux": "bar"}, false, }, { "no value match", map[string]string{"foo": "bar", "bar": "baz"}, map[string]string{"foo": "quux"}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { x := reflect.ValueOf(tt.a) y := reflect.ValueOf(tt.b) if got := mapContains(x, y); got != tt.want { t.Errorf("mapContains() = %v, want %v", got, tt.want) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/encoding_test.go000066400000000000000000000066601464501522100237350ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "testing" "github.com/stretchr/testify/assert" ) var validUUIDStr0 = `00000000-0000-0000-0000-000000000000` var validUUIDStr1 = `11111111-1111-1111-1111-111111111111` var validUUID0 = UUID{GoUUID: validUUIDStr0} var validUUID1 = UUID{GoUUID: validUUIDStr1} func TestMap(t *testing.T) { tests := []struct { name string input map[string]string expected string }{ { "empty map", map[string]string{}, `["map",[]]`, }, { "single element map", map[string]string{`v0`: `k0`}, `["map",[["v0","k0"]]]`, }, { "multiple element map", map[string]string{`v0`: `k0`, `v1`: `k1`}, `["map",[["v0","k0"],["v1","k1"]]]`, }, } for _, tt := range tests { m, err := NewOvsMap(tt.input) assert.Nil(t, err) jsonStr, err := json.Marshal(m) assert.Nil(t, err) // Compare unmarshalled data since the order of the elements of the map might not // have been preserved var expectedSlice []interface{} var jsonSlice []interface{} err = json.Unmarshal([]byte(tt.expected), &expectedSlice) assert.Nil(t, err) err = json.Unmarshal(jsonStr, &jsonSlice) assert.Nil(t, err) assert.Equal(t, expectedSlice[0], jsonSlice[0], "they should both start with 'map'") assert.ElementsMatch(t, expectedSlice[1].([]interface{}), jsonSlice[1].([]interface{}), "they should have the same elements\n") var res OvsMap err = json.Unmarshal(jsonStr, &res) assert.Nil(t, err) assert.Equal(t, m, res, "they should be equal\n") } } func TestSet(t *testing.T) { var x *int var y *string tests := []struct { name string input interface{} expected string }{ { "empty set", []string{}, `["set",[]]`, }, { "string", `aa`, `"aa"`, }, { "bool", false, `false`, }, { "float 64", float64(10), `10`, }, { "float", 10.2, `10.2`, }, { "string slice", []string{`aa`}, `"aa"`, }, { "string slice with multiple elements", []string{`aa`, `bb`}, `["set",["aa","bb"]]`, }, { "float slice", []float64{10.2, 15.4}, `["set",[10.2,15.4]]`, }, { "empty uuid", []UUID{}, `["set",[]]`, }, { "uuid", UUID{GoUUID: `aa`}, `["named-uuid","aa"]`, }, { "uuid slice single element", []UUID{{GoUUID: `aa`}}, `["named-uuid","aa"]`, }, { "uuid slice multiple elements", []UUID{{GoUUID: `aa`}, {GoUUID: `bb`}}, `["set",[["named-uuid","aa"],["named-uuid","bb"]]]`, }, { "valid uuid", validUUID0, fmt.Sprintf(`["uuid","%v"]`, validUUIDStr0), }, { "valid uuid set single element", []UUID{validUUID0}, fmt.Sprintf(`["uuid","%v"]`, validUUIDStr0), }, { "valid uuid set multiple elements", []UUID{validUUID0, validUUID1}, fmt.Sprintf(`["set",[["uuid","%v"],["uuid","%v"]]]`, validUUIDStr0, validUUIDStr1), }, { name: "nil pointer of valid *int type", input: x, expected: `["set",[]]`, }, { name: "nil pointer of valid *string type", input: y, expected: `["set",[]]`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { set, err := NewOvsSet(tt.input) assert.Nil(t, err) jsonStr, err := json.Marshal(set) assert.Nil(t, err) assert.JSONEqf(t, tt.expected, string(jsonStr), "they should be equal\n") var res OvsSet err = json.Unmarshal(jsonStr, &res) assert.Nil(t, err) assert.Equal(t, set.GoSet, res.GoSet, "they should have the same elements\n") }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/error.go000066400000000000000000000226741464501522100222440ustar00rootroot00000000000000package ovsdb import "fmt" const ( referentialIntegrityViolation = "referential integrity violation" constraintViolation = "constraint violation" resourcesExhausted = "resources exhausted" ioError = "I/O error" duplicateUUIDName = "duplicate uuid name" domainError = "domain error" rangeError = "range error" timedOut = "timed out" notSupported = "not supported" aborted = "aborted" notOwner = "not owner" ) // errorFromResult returns an specific OVSDB error type from // an OperationResult func errorFromResult(op *Operation, r OperationResult) OperationError { if r.Error == "" { return nil } switch r.Error { case referentialIntegrityViolation: return &ReferentialIntegrityViolation{r.Details, op} case constraintViolation: return &ConstraintViolation{r.Details, op} case resourcesExhausted: return &ResourcesExhausted{r.Details, op} case ioError: return &IOError{r.Details, op} case duplicateUUIDName: return &DuplicateUUIDName{r.Details, op} case domainError: return &DomainError{r.Details, op} case rangeError: return &RangeError{r.Details, op} case timedOut: return &TimedOut{r.Details, op} case notSupported: return &NotSupported{r.Details, op} case aborted: return &Aborted{r.Details, op} case notOwner: return &NotOwner{r.Details, op} default: return &Error{r.Error, r.Details, op} } } func ResultFromError(err error) OperationResult { if err == nil { panic("Program error: passed nil error to resultFromError") } switch e := err.(type) { case *ReferentialIntegrityViolation: return OperationResult{Error: referentialIntegrityViolation, Details: e.details} case *ConstraintViolation: return OperationResult{Error: constraintViolation, Details: e.details} case *ResourcesExhausted: return OperationResult{Error: resourcesExhausted, Details: e.details} case *IOError: return OperationResult{Error: ioError, Details: e.details} case *DuplicateUUIDName: return OperationResult{Error: duplicateUUIDName, Details: e.details} case *DomainError: return OperationResult{Error: domainError, Details: e.details} case *RangeError: return OperationResult{Error: rangeError, Details: e.details} case *TimedOut: return OperationResult{Error: timedOut, Details: e.details} case *NotSupported: return OperationResult{Error: notSupported, Details: e.details} case *Aborted: return OperationResult{Error: aborted, Details: e.details} case *NotOwner: return OperationResult{Error: notOwner, Details: e.details} default: return OperationResult{Error: e.Error()} } } // CheckOperationResults checks whether the provided operation was a success // If the operation was a success, it will return nil, nil // If the operation failed, due to a error committing the transaction it will // return nil, error. // Finally, in the case where one or more of the operations in the transaction // failed, we return []OperationErrors, error // Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in // the original Operations struct. You may also perform type assertions against // the error so the caller can decide how best to handle it func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) { // this shouldn't happen, but we'll cover the case to be certain if len(result) < len(ops) { return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result)) } var errs []OperationError for i, op := range result { // RFC 7047: if all of the operations succeed, but the results cannot // be committed, then "result" will have one more element than "params", // with the additional element being an . if i >= len(ops) { return errs, errorFromResult(nil, op) } if err := errorFromResult(&ops[i], op); err != nil { errs = append(errs, err) } } if len(errs) > 0 { return errs, fmt.Errorf("%d ovsdb operations failed", len(errs)) } return nil, nil } // OperationError represents an error that occurred as part of an // OVSDB Operation type OperationError interface { error // Operation is a pointer to the operation which caused the error Operation() *Operation } // ReferentialIntegrityViolation is explained in RFC 7047 4.1.3 type ReferentialIntegrityViolation struct { details string operation *Operation } func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation { return &ReferentialIntegrityViolation{details: details} } // Error implements the error interface func (e *ReferentialIntegrityViolation) Error() string { msg := referentialIntegrityViolation if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *ReferentialIntegrityViolation) Operation() *Operation { return e.operation } // ConstraintViolation is described in RFC 7047: 4.1.3 type ConstraintViolation struct { details string operation *Operation } func NewConstraintViolation(details string) *ConstraintViolation { return &ConstraintViolation{details: details} } // Error implements the error interface func (e *ConstraintViolation) Error() string { msg := constraintViolation if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *ConstraintViolation) Operation() *Operation { return e.operation } // ResourcesExhausted is described in RFC 7047: 4.1.3 type ResourcesExhausted struct { details string operation *Operation } // Error implements the error interface func (e *ResourcesExhausted) Error() string { msg := resourcesExhausted if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *ResourcesExhausted) Operation() *Operation { return e.operation } // IOError is described in RFC7047: 4.1.3 type IOError struct { details string operation *Operation } // Error implements the error interface func (e *IOError) Error() string { msg := ioError if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *IOError) Operation() *Operation { return e.operation } // DuplicateUUIDName is described in RFC7047 5.2.1 type DuplicateUUIDName struct { details string operation *Operation } // Error implements the error interface func (e *DuplicateUUIDName) Error() string { msg := duplicateUUIDName if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *DuplicateUUIDName) Operation() *Operation { return e.operation } // DomainError is described in RFC 7047: 5.2.4 type DomainError struct { details string operation *Operation } // Error implements the error interface func (e *DomainError) Error() string { msg := domainError if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *DomainError) Operation() *Operation { return e.operation } // RangeError is described in RFC 7047: 5.2.4 type RangeError struct { details string operation *Operation } // Error implements the error interface func (e *RangeError) Error() string { msg := rangeError if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *RangeError) Operation() *Operation { return e.operation } // TimedOut is described in RFC 7047: 5.2.6 type TimedOut struct { details string operation *Operation } // Error implements the error interface func (e *TimedOut) Error() string { msg := timedOut if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *TimedOut) Operation() *Operation { return e.operation } // NotSupported is described in RFC 7047: 5.2.7 type NotSupported struct { details string operation *Operation } // Error implements the error interface func (e *NotSupported) Error() string { msg := notSupported if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *NotSupported) Operation() *Operation { return e.operation } // Aborted is described in RFC 7047: 5.2.8 type Aborted struct { details string operation *Operation } // Error implements the error interface func (e *Aborted) Error() string { msg := aborted if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *Aborted) Operation() *Operation { return e.operation } // NotOwner is described in RFC 7047: 5.2.9 type NotOwner struct { details string operation *Operation } // Error implements the error interface func (e *NotOwner) Error() string { msg := notOwner if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *NotOwner) Operation() *Operation { return e.operation } // Error is a generic OVSDB Error type that implements the // OperationError and error interfaces type Error struct { name string details string operation *Operation } // Error implements the error interface func (e *Error) Error() string { msg := e.name if e.details != "" { msg += ": " + e.details } return msg } // Operation implements the OperationError interface func (e *Error) Operation() *Operation { return e.operation } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/error_test.go000066400000000000000000000055711464501522100233000ustar00rootroot00000000000000package ovsdb import ( "reflect" "testing" "github.com/stretchr/testify/assert" ) func TestErrorFromResult(t *testing.T) { type args struct { op *Operation r OperationResult } tests := []struct { name string args args expected interface{} }{ { referentialIntegrityViolation, args{nil, OperationResult{Error: referentialIntegrityViolation}}, &ReferentialIntegrityViolation{}, }, { constraintViolation, args{nil, OperationResult{Error: constraintViolation}}, &ConstraintViolation{}, }, { resourcesExhausted, args{nil, OperationResult{Error: resourcesExhausted}}, &ResourcesExhausted{}, }, { ioError, args{nil, OperationResult{Error: ioError}}, &IOError{}, }, { duplicateUUIDName, args{nil, OperationResult{Error: duplicateUUIDName}}, &DuplicateUUIDName{}, }, { domainError, args{nil, OperationResult{Error: domainError}}, &DomainError{}, }, { rangeError, args{nil, OperationResult{Error: rangeError}}, &RangeError{}, }, { timedOut, args{nil, OperationResult{Error: timedOut}}, &TimedOut{}, }, { notSupported, args{nil, OperationResult{Error: notSupported}}, &NotSupported{}, }, { aborted, args{nil, OperationResult{Error: aborted}}, &Aborted{}, }, { notOwner, args{nil, OperationResult{Error: notOwner}}, &NotOwner{}, }, { "generic error", args{nil, OperationResult{Error: "foo"}}, &Error{}, }, { "nil", args{nil, OperationResult{Error: ""}}, nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := errorFromResult(tt.args.op, tt.args.r) assert.IsType(t, tt.expected, err) }) } } func TestCheckOperationResults(t *testing.T) { type args struct { result []OperationResult ops []Operation } tests := []struct { name string args args want []OperationError wantErr bool }{ { "success", args{[]OperationResult{{}}, []Operation{{Op: "insert"}}}, nil, false, }, { "commit error", args{[]OperationResult{{}, {Error: constraintViolation}}, []Operation{{Op: "insert"}}}, nil, true, }, { "transaction error", args{[]OperationResult{{Error: constraintViolation, Details: "foo"}, {Error: constraintViolation, Details: "bar"}}, []Operation{{Op: "insert"}, {Op: "mutate"}}}, []OperationError{&ConstraintViolation{details: "foo", operation: &Operation{Op: "insert"}}, &ConstraintViolation{details: "bar", operation: &Operation{Op: "mutate"}}}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := CheckOperationResults(tt.args.result, tt.args.ops) if (err != nil) != tt.wantErr { t.Errorf("CheckOperationResults() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("CheckOperationResults() = %v, want %v", got, tt.want) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/map.go000066400000000000000000000051021464501522100216530ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "reflect" ) // OvsMap is the JSON map structure used for OVSDB // RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps. // A 2-element JSON array that represents a database map value. The // first element of the array must be the string "map", and the // second element must be an array of zero or more s giving the // values in the map. All of the s must have the same key and // value types. type OvsMap struct { GoMap map[interface{}]interface{} } // MarshalJSON marshalls an OVSDB style Map to a byte array func (o OvsMap) MarshalJSON() ([]byte, error) { if len(o.GoMap) > 0 { var ovsMap, innerMap []interface{} ovsMap = append(ovsMap, "map") for key, val := range o.GoMap { var mapSeg []interface{} mapSeg = append(mapSeg, key) mapSeg = append(mapSeg, val) innerMap = append(innerMap, mapSeg) } ovsMap = append(ovsMap, innerMap) return json.Marshal(ovsMap) } return []byte("[\"map\",[]]"), nil } // UnmarshalJSON unmarshals an OVSDB style Map from a byte array func (o *OvsMap) UnmarshalJSON(b []byte) (err error) { var oMap []interface{} o.GoMap = make(map[interface{}]interface{}) if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 { innerSlice := oMap[1].([]interface{}) for _, val := range innerSlice { f := val.([]interface{}) var k interface{} switch f[0].(type) { case []interface{}: vSet := f[0].([]interface{}) if len(vSet) != 2 || vSet[0] == "map" { return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} } goSlice, err := ovsSliceToGoNotation(vSet) if err != nil { return err } k = goSlice default: k = f[0] } switch f[1].(type) { case []interface{}: vSet := f[1].([]interface{}) if len(vSet) != 2 || vSet[0] == "map" { return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} } goSlice, err := ovsSliceToGoNotation(vSet) if err != nil { return err } o.GoMap[k] = goSlice default: o.GoMap[k] = f[1] } } } return err } // NewOvsMap will return an OVSDB style map from a provided Golang Map func NewOvsMap(goMap interface{}) (OvsMap, error) { v := reflect.ValueOf(goMap) if v.Kind() != reflect.Map { return OvsMap{}, fmt.Errorf("ovsmap supports only go map types") } genMap := make(map[interface{}]interface{}) keys := v.MapKeys() for _, key := range keys { genMap[key.Interface()] = v.MapIndex(key).Interface() } return OvsMap{genMap}, nil } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/map_test.go000066400000000000000000000040451464501522100227170ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "testing" ) func benchmarkMap(m map[string]string, b *testing.B) { testMap, err := NewOvsMap(m) if err != nil { b.Fatal(err) } for n := 0; n < b.N; n++ { _, err := json.Marshal(testMap) if err != nil { b.Fatal(err) } } } func BenchmarkMapMarshalJSON1(b *testing.B) { benchmarkMap(map[string]string{"foo": "bar"}, b) } func BenchmarkMapMarshalJSON2(b *testing.B) { benchmarkMap(map[string]string{"foo": "bar", "baz": "quuz"}, b) } func BenchmarkMapMarshalJSON3(b *testing.B) { benchmarkMap(map[string]string{"foo": "bar", "baz": "quuz", "foobar": "foobaz"}, b) } func BenchmarkMapMarshalJSON5(b *testing.B) { benchmarkMap(map[string]string{"foo": "bar", "baz": "quuz", "foofoo": "foobar", "foobaz": "fooquuz", "barfoo": "barbar"}, b) } func BenchmarkMapMarshalJSON8(b *testing.B) { benchmarkMap(map[string]string{"foo": "bar", "baz": "quuz", "foofoo": "foobar", "foobaz": "fooquuz", "barfoo": "barbar", "barbaz": "barquuz", "bazfoo": "bazbar", "bazbaz": "bazquux"}, b) } func benchmarkMapUnmarshalJSON(data []byte, b *testing.B) { for n := 0; n < b.N; n++ { var m OvsMap err := json.Unmarshal(data, &m) if err != nil { b.Fatal(err) } } } func BenchmarkMapUnmarshalJSON1(b *testing.B) { benchmarkMapUnmarshalJSON([]byte(`[ "map", [["foo","bar"]]]`), b) } func BenchmarkMapUnmarshalJSON2(b *testing.B) { benchmarkMapUnmarshalJSON([]byte(`[ "map", [["foo","bar"],["baz", "quuz"]]]`), b) } func BenchmarkMapUnmarshalJSON3(b *testing.B) { benchmarkMapUnmarshalJSON([]byte(`[ "map", [["foo","bar"],["baz", "quuz"],["foofoo", "foobar"]]]`), b) } func BenchmarkMapUnmarshalJSON5(b *testing.B) { benchmarkMapUnmarshalJSON([]byte(`[ "map", [["foo","bar"],["baz", "quuz"],["foofoo", "foobar"],["foobaz", "fooquuz"], ["barfoo", "barbar"]]]`), b) } func BenchmarkMapUnmarshalJSON8(b *testing.B) { benchmarkMapUnmarshalJSON([]byte(`[ "map", [["foo","bar"],["baz", "quuz"],["foofoo", "foobar"],["foobaz", "fooquuz"], ["barfoo", "barbar"],["barbaz", "barquux"],["bazfoo", "bazbar"], ["bazbaz", "bazquux"]]]`), b) } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/monitor_select.go000066400000000000000000000036151464501522100241330ustar00rootroot00000000000000package ovsdb import "encoding/json" // MonitorSelect represents a monitor select according to RFC7047 type MonitorSelect struct { initial *bool insert *bool delete *bool modify *bool } // NewMonitorSelect returns a new MonitorSelect with the provided values func NewMonitorSelect(initial, insert, delete, modify bool) *MonitorSelect { return &MonitorSelect{ initial: &initial, insert: &insert, delete: &delete, modify: &modify, } } // NewDefaultMonitorSelect returns a new MonitorSelect with default values func NewDefaultMonitorSelect() *MonitorSelect { return NewMonitorSelect(true, true, true, true) } // Initial returns whether or not an initial response will be sent func (m MonitorSelect) Initial() bool { if m.initial == nil { return true } return *m.initial } // Insert returns whether we will receive updates for inserts func (m MonitorSelect) Insert() bool { if m.insert == nil { return true } return *m.insert } // Delete returns whether we will receive updates for deletions func (m MonitorSelect) Delete() bool { if m.delete == nil { return true } return *m.delete } // Modify returns whether we will receive updates for modifications func (m MonitorSelect) Modify() bool { if m.modify == nil { return true } return *m.modify } type monitorSelect struct { Initial *bool `json:"initial,omitempty"` Insert *bool `json:"insert,omitempty"` Delete *bool `json:"delete,omitempty"` Modify *bool `json:"modify,omitempty"` } func (m MonitorSelect) MarshalJSON() ([]byte, error) { ms := monitorSelect{ Initial: m.initial, Insert: m.insert, Delete: m.delete, Modify: m.modify, } return json.Marshal(ms) } func (m *MonitorSelect) UnmarshalJSON(data []byte) error { var ms monitorSelect err := json.Unmarshal(data, &ms) if err != nil { return err } m.initial = ms.Initial m.insert = ms.Insert m.delete = ms.Delete m.modify = ms.Modify return nil } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/monitor_select_test.go000066400000000000000000000047061464501522100251740ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "testing" "github.com/stretchr/testify/assert" ) func TestNewMonitorSelect(t *testing.T) { ms := NewMonitorSelect(true, false, true, false) assert.True(t, ms.Initial(), "initial") assert.False(t, ms.Insert(), "insert") assert.True(t, ms.Delete(), "delete") assert.False(t, ms.Modify(), "modify") } func TestNewDefaultMonitorSelect(t *testing.T) { ms := NewDefaultMonitorSelect() assert.True(t, ms.Initial(), "initial") assert.True(t, ms.Insert(), "insert") assert.True(t, ms.Delete(), "delete") assert.True(t, ms.Modify(), "modify") } func TestMonitorSelectInitial(t *testing.T) { tt := true f := false ms1 := MonitorSelect{initial: nil} ms2 := MonitorSelect{initial: &tt} ms3 := MonitorSelect{initial: &f} assert.True(t, ms1.Initial(), "nil") assert.True(t, ms2.Initial(), "true") assert.False(t, ms3.Initial(), "false") } func TestMonitorSelectInsert(t *testing.T) { tt := true f := false ms1 := MonitorSelect{insert: nil} ms2 := MonitorSelect{insert: &tt} ms3 := MonitorSelect{insert: &f} assert.True(t, ms1.Insert(), "nil") assert.True(t, ms2.Insert(), "true") assert.False(t, ms3.Insert(), "false") } func TestMonitorSelectDelete(t *testing.T) { tt := true f := false ms1 := MonitorSelect{delete: nil} ms2 := MonitorSelect{delete: &tt} ms3 := MonitorSelect{delete: &f} assert.True(t, ms1.Delete(), "nil") assert.True(t, ms2.Delete(), "true") assert.False(t, ms3.Delete(), "false") } func TestMonitorSelectModify(t *testing.T) { tt := true f := false ms1 := MonitorSelect{modify: nil} ms2 := MonitorSelect{modify: &tt} ms3 := MonitorSelect{modify: &f} assert.True(t, ms1.Modify(), "nil") assert.True(t, ms2.Modify(), "true") assert.False(t, ms3.Modify(), "false") } func TestMonitorSelectMarshalUnmarshalJSON(t *testing.T) { tests := []struct { name string ms *MonitorSelect want string }{ { "nil", &MonitorSelect{}, `{}`, }, { "default", NewDefaultMonitorSelect(), `{"delete":true, "initial":true, "insert":true, "modify":true}`, }, { "falsey", NewMonitorSelect(false, false, false, false), `{"delete":false, "initial":false, "insert":false, "modify":false}`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := json.Marshal(tt.ms) assert.Nil(t, err) assert.JSONEq(t, tt.want, string(got)) var ms2 MonitorSelect err = json.Unmarshal(got, &ms2) assert.Nil(t, err) assert.Equal(t, tt.ms, &ms2) }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/mutation.go000066400000000000000000000042121464501522100227370ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" ) type Mutator string const ( // MutateOperationDelete is the delete mutator MutateOperationDelete Mutator = "delete" // MutateOperationInsert is the insert mutator MutateOperationInsert Mutator = "insert" // MutateOperationAdd is the add mutator MutateOperationAdd Mutator = "+=" // MutateOperationSubtract is the subtract mutator MutateOperationSubtract Mutator = "-=" // MutateOperationMultiply is the multiply mutator MutateOperationMultiply Mutator = "*=" // MutateOperationDivide is the divide mutator MutateOperationDivide Mutator = "/=" // MutateOperationModulo is the modulo mutator MutateOperationModulo Mutator = "%=" ) // Mutation is described in RFC 7047: 5.1 type Mutation struct { Column string Mutator Mutator Value interface{} } // NewMutation returns a new mutation func NewMutation(column string, mutator Mutator, value interface{}) *Mutation { return &Mutation{ Column: column, Mutator: mutator, Value: value, } } // MarshalJSON marshals a mutation to a 3 element JSON array func (m Mutation) MarshalJSON() ([]byte, error) { v := []interface{}{m.Column, m.Mutator, m.Value} return json.Marshal(v) } // UnmarshalJSON converts a 3 element JSON array to a Mutation func (m *Mutation) UnmarshalJSON(b []byte) error { var v []interface{} err := json.Unmarshal(b, &v) if err != nil { return err } if len(v) != 3 { return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) } ok := false m.Column, ok = v[0].(string) if !ok { return fmt.Errorf("expected column name %v to be a valid string", v[0]) } mutatorString, ok := v[1].(string) if !ok { return fmt.Errorf("expected mutator %v to be a valid string", v[1]) } mutator := Mutator(mutatorString) switch mutator { case MutateOperationDelete, MutateOperationInsert, MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo: m.Mutator = mutator default: return fmt.Errorf("%s is not a valid mutator", mutator) } vv, err := ovsSliceToGoNotation(v[2]) if err != nil { return err } m.Value = vv return nil } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/mutation_test.go000066400000000000000000000053071464501522100240040ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "reflect" "testing" "github.com/stretchr/testify/assert" ) func TestMutationMarshalUnmarshalJSON(t *testing.T) { tests := []struct { name string mutation Mutation want string wantErr bool }{ { "test delete", Mutation{"foo", MutateOperationDelete, "bar"}, `[ "foo", "delete", "bar" ]`, false, }, { "test insert", Mutation{"foo", MutateOperationInsert, "bar"}, `[ "foo", "insert", "bar" ]`, false, }, { "test add", Mutation{"foo", MutateOperationAdd, "bar"}, `[ "foo", "+=", "bar" ]`, false, }, { "test subtract", Mutation{"foo", MutateOperationSubtract, "bar"}, `[ "foo", "-=", "bar" ]`, false, }, { "test multiply", Mutation{"foo", MutateOperationMultiply, "bar"}, `[ "foo", "*=", "bar" ]`, false, }, { "test divide", Mutation{"foo", MutateOperationDivide, "bar"}, `[ "foo", "/=", "bar" ]`, false, }, { "test modulo", Mutation{"foo", MutateOperationModulo, "bar"}, `[ "foo", "%=", "bar" ]`, false, }, { "test uuid", Mutation{"foo", MutateOperationInsert, UUID{GoUUID: "foo"}}, `[ "foo", "insert", ["named-uuid", "foo"] ]`, false, }, { "test set", Mutation{"foo", MutateOperationInsert, OvsSet{GoSet: []interface{}{"foo", "bar", "baz"}}}, `[ "foo", "insert", ["set",["foo", "bar", "baz"]] ]`, false, }, { "test map", Mutation{"foo", MutateOperationInsert, OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar", "baz": "quux"}}}, `[ "foo", "insert", ["map",[["foo", "bar"], ["baz", "quux"]]]]`, false, }, { "test uuid set", Mutation{"foo", MutateOperationInsert, OvsSet{GoSet: []interface{}{UUID{GoUUID: "foo"}, UUID{GoUUID: "bar"}}}}, `[ "foo", "insert", ["set",[["named-uuid", "foo"], ["named-uuid", "bar"]]] ]`, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := json.Marshal(tt.mutation) if err != nil { t.Fatal(err) } // testing JSON equality is flaky for ovsdb notated maps // it's safe to skip this as we test from json->object later if tt.name != "test map" { assert.JSONEq(t, tt.want, string(got)) } var c Mutation if err := json.Unmarshal(got, &c); err != nil { t.Fatal(err) } assert.Equal(t, tt.mutation.Column, c.Column) assert.Equal(t, tt.mutation.Mutator, c.Mutator) v := reflect.TypeOf(tt.mutation.Value) vv := reflect.ValueOf(c.Value) if !vv.IsValid() { t.Fatalf("c.Value is empty: %v", c.Value) } assert.Equal(t, v, vv.Type()) assert.Equal(t, tt.mutation.Value, vv.Convert(v).Interface()) if vv.Kind() == reflect.String { assert.Equal(t, tt.mutation.Value, vv.String()) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/named_uuid.go000066400000000000000000000111451464501522100232140ustar00rootroot00000000000000package ovsdb import ( "fmt" ) // ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types // throughout the operation. The caller must ensure each input operation has // a valid UUID, which may be replaced if a previous operation created a // matching named UUID mapping. Returns the updated operations or an error. func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) { uuidMap := make(map[string]string) // Pass 1: replace the named UUID with a real UUID for each operation and // build the substitution map for i := range ops { op := &ops[i] if op.Op != OperationInsert { // Only Insert operations can specify a Named UUID continue } if err := ValidateUUID(op.UUID); err != nil { return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err) } if op.UUIDName != "" { if uuid, ok := uuidMap[op.UUIDName]; ok { if op.UUID != "" && op.UUID != uuid { return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q", op.UUIDName, uuid, op.UUID) } // If there's already a mapping for this named UUID use it op.UUID = uuid } else { uuidMap[op.UUIDName] = op.UUID } op.UUIDName = "" } } // Pass 2: replace named UUIDs in operation fields with the real UUID for i := range ops { op := &ops[i] tableSchema := schema.Table(op.Table) if tableSchema == nil { return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name) } for i, condition := range op.Where { newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap) if err != nil { return nil, err } op.Where[i].Value = newVal } for i, mutation := range op.Mutations { newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap) if err != nil { return nil, err } op.Mutations[i].Value = newVal } for _, row := range op.Rows { for k, v := range row { newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) if err != nil { return nil, err } row[k] = newVal } } for k, v := range op.Row { newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) if err != nil { return nil, err } op.Row[k] = newVal } } return ops, nil } func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value interface{}, uuidMap map[string]string) (interface{}, error) { column := tableSchema.Column(columnName) if column == nil { return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName) } return expandNamedUUID(column, value, uuidMap), nil } func expandNamedUUID(column *ColumnSchema, value interface{}, namedUUIDs map[string]string) interface{} { var keyType, valType ExtendedType switch column.Type { case TypeUUID: keyType = column.Type case TypeSet: keyType = column.TypeObj.Key.Type case TypeMap: keyType = column.TypeObj.Key.Type valType = column.TypeObj.Value.Type } if valType == TypeUUID { if m, ok := value.(OvsMap); ok { for k, v := range m.GoMap { if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok { m.GoMap[newUUID] = m.GoMap[k] delete(m.GoMap, k) k = newUUID } if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok { m.GoMap[k] = newUUID } } } } else if keyType == TypeUUID { if ovsSet, ok := value.(OvsSet); ok { for i, s := range ovsSet.GoSet { if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { ovsSet.GoSet[i] = newUUID } } return value } else if strSet, ok := value.([]string); ok { for i, s := range strSet { if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { strSet[i] = newUUID.(string) } } return value } else if uuidSet, ok := value.([]UUID); ok { for i, s := range uuidSet { if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { uuidSet[i] = newUUID.(UUID) } } return value } if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok { return newUUID } } // No expansion required; return original value return value } func expandNamedUUIDAtomic(valueType ExtendedType, value interface{}, namedUUIDs map[string]string) (interface{}, bool) { if valueType == TypeUUID { if uuid, ok := value.(UUID); ok { if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok { return UUID{GoUUID: newUUID}, true } } else if uuid, ok := value.(string); ok { if newUUID, ok := namedUUIDs[uuid]; ok { return newUUID, true } } } return value, false } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/named_uuid_test.go000066400000000000000000000355411464501522100242610ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const uuidTestSchema = ` { "name": "UUID_Test", "version": "0.0.1", "tables": { "UUID_Test": { "columns": { "_uuid": { "type": "uuid" }, "real_uuid": { "type": "uuid" }, "str": { "type": "string" }, "int": { "type": "integer" }, "uuidset": { "type": { "key": { "type": "uuid" }, "min": 0, "max": "unlimited" } }, "real_uuidset": { "type": { "key": { "type": "uuid" }, "min": 0, "max": "unlimited" } }, "strset": { "type": { "key": { "type": "string" }, "min": 0, "max": "unlimited" } }, "uuidmap": { "type": { "key": { "type": "uuid" }, "value": { "type": "uuid" }, "min": 1, "max": "unlimited" } }, "real_uuidmap": { "type": { "key": { "type": "uuid" }, "value": { "type": "uuid" }, "min": 1, "max": "unlimited" } }, "struuidmap": { "type": { "key": { "type": "string" }, "value": { "type": "uuid" }, "min": 1, "max": "unlimited" } }, "real_struuidmap": { "type": { "key": { "type": "string" }, "value": { "type": "uuid" }, "min": 1, "max": "unlimited" } }, "strmap": { "type": { "key": { "type": "string" }, "value": { "type": "string" }, "min": 1, "max": "unlimited" } } }, "isRoot": true } } } ` type UUIDTestType struct { UUID string `ovsdb:"_uuid"` RealUUID UUID `ovsdb:"real_uuid"` String string `ovsdb:"str"` Int string `ovsdb:"int"` UUIDSet []string `ovsdb:"uuidset"` RealUUIDSet []UUID `ovsdb:"real_uuidset"` StrSet []string `ovsdb:"strset"` UUIDMap map[string]string `ovsdb:"uuidmap"` RealUUIDMap map[UUID]UUID `ovsdb:"real_uuidmap"` StrUUIDMap map[string]string `ovsdb:"struuidmap"` RealStrUUIDMap map[string]UUID `ovsdb:"real_struuidmap"` StrMap map[string]string `ovsdb:"strmap"` } func getUUIDTestSchema() (DatabaseSchema, error) { var dbSchema DatabaseSchema err := json.Unmarshal([]byte(uuidTestSchema), &dbSchema) return dbSchema, err } func TestStandaloneExpandNamedUUID(t *testing.T) { testUUID := uuid.NewString() testUUID1 := uuid.NewString() tests := []struct { name string namedUUIDs map[string]string column string value interface{} expected interface{} }{ { "uuid", map[string]string{"foo": testUUID}, "_uuid", "foo", testUUID, }, { "real uuid", map[string]string{"foo": testUUID}, "real_uuid", UUID{GoUUID: "foo"}, UUID{GoUUID: testUUID}, }, { "string (no replace)", map[string]string{"foo": testUUID}, "str", "foo", "foo", }, { "int (no replace)", map[string]string{"foo": testUUID}, "int", 15, 15, }, // OVS []UUID == Go []string { "UUID set", map[string]string{"foo": testUUID}, "uuidset", OvsSet{GoSet: []interface{}{"foo"}}, OvsSet{GoSet: []interface{}{testUUID}}, }, // OVS []UUID == Go []UUID { "real UUID set", map[string]string{"foo": testUUID}, "real_uuidset", OvsSet{GoSet: []interface{}{UUID{GoUUID: "foo"}}}, OvsSet{GoSet: []interface{}{UUID{GoUUID: testUUID}}}, }, { "set multiple", map[string]string{"foo": testUUID, "bar": testUUID1}, "uuidset", OvsSet{GoSet: []interface{}{"foo", "bar", "baz"}}, OvsSet{GoSet: []interface{}{testUUID, testUUID1, "baz"}}, }, // OVS [UUID]UUID == Go [string]string { "map key", map[string]string{"foo": testUUID}, "uuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{testUUID: "bar"}}, }, { "map values", map[string]string{"bar": testUUID1}, "uuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": testUUID1}}, }, { "map key and values", map[string]string{"foo": testUUID, "bar": testUUID1}, "uuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{testUUID: testUUID1}}, }, // OVS [UUID]UUID == Go [UUID]UUID { "real UUID map key", map[string]string{"foo": testUUID}, "real_uuidmap", OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: "foo"}: UUID{GoUUID: "bar"}}}, OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: testUUID}: UUID{GoUUID: "bar"}}}, }, { "real UUID map values", map[string]string{"bar": testUUID1}, "real_uuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "bar"}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: testUUID1}}}, }, { "real UUID map key and values", map[string]string{"foo": testUUID, "bar": testUUID1}, "real_uuidmap", OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: "foo"}: UUID{GoUUID: "bar"}}}, OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: testUUID}: UUID{GoUUID: testUUID1}}}, }, // OVS [string]UUID == Go [string]string { "string UUID map key (no replace)", map[string]string{"foo": testUUID}, "struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, }, { "string UUID map values (replace)", map[string]string{"foo": testUUID}, "struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "foo"}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": testUUID}}, }, { "string UUID map key (no replace) and values (replace)", map[string]string{"foo": testUUID, "bar": testUUID1}, "struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": testUUID1}}, }, // OVS [string]UUID == Go [string]UUID { "real string UUID map key (no replace)", map[string]string{"foo": testUUID}, "real_struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "bar"}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "bar"}}}, }, { "real string UUID map values (replace)", map[string]string{"foo": testUUID}, "real_struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "foo"}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: testUUID}}}, }, { "real string UUID map key (no replace) and values (replace)", map[string]string{"foo": testUUID, "bar": testUUID1}, "real_struuidmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "bar"}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: testUUID1}}}, }, // OVS [string]string == Go [string]string { "string map key and values (no replace)", map[string]string{"foo": testUUID, "bar": testUUID1}, "strmap", OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar"}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { schema, err := getUUIDTestSchema() require.Nil(t, err) ts := schema.Table("UUID_Test") require.NotNil(t, ts) cs := ts.Column(tt.column) require.NotNil(t, cs) got := expandNamedUUID(cs, tt.value, tt.namedUUIDs) assert.Equal(t, tt.expected, got) }) } } func makeOp(table, uuid, uuidName string, rows ...Row) Operation { op := Operation{ Op: OperationInsert, Table: table, UUID: uuid, UUIDName: uuidName, } if len(rows) == 1 { op.Row = rows[0] } else { op.Rows = rows } return op } func makeOpWhere(table, uuid, uuidName string, row Row, w ...Condition) Operation { op := makeOp(table, uuid, uuidName, row) op.Where = w return op } func makeOpMutation(table, uuid, uuidName string, row Row, m ...Mutation) Operation { op := makeOp(table, uuid, uuidName, row) op.Mutations = m return op } func TestOperationExpandNamedUUID(t *testing.T) { testUUID := uuid.NewString() testUUID1 := uuid.NewString() testUUID2 := uuid.NewString() namedUUID := "adsfasdfadsf" namedUUID1 := "142124521551" badUUID := "asdfadsfasdfasf" namedUUIDSet, _ := NewOvsSet([]UUID{{GoUUID: namedUUID}}) testUUIDSet, _ := NewOvsSet([]UUID{{GoUUID: testUUID}}) namedUUID1Map, _ := NewOvsMap(map[string]string{"foo": namedUUID1}) testUUID1Map, _ := NewOvsMap(map[string]string{"foo": testUUID1}) tests := []struct { name string ops []Operation expected []Operation expectedErr string }{ { "simple replace", []Operation{ makeOp("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"uuidset": []string{namedUUID}})), }, []Operation{ makeOp("UUID_Test", testUUID, "", Row(map[string]interface{}{"uuidset": []string{testUUID}})), }, "", }, { "simple replace multiple rows", []Operation{ makeOp("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"uuidset": []string{namedUUID}}), Row(map[string]interface{}{"real_uuidset": namedUUIDSet}), ), }, []Operation{ makeOp("UUID_Test", testUUID, "", Row(map[string]interface{}{"uuidset": []string{testUUID}}), Row(map[string]interface{}{"real_uuidset": testUUIDSet}), ), }, "", }, { "chained ops", []Operation{ makeOp("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"uuidset": []string{namedUUID}})), makeOp("UUID_Test", testUUID1, namedUUID1, Row(map[string]interface{}{"real_uuid": UUID{GoUUID: namedUUID}})), makeOp("UUID_Test", testUUID2, "", Row(map[string]interface{}{"struuidmap": namedUUID1Map})), }, []Operation{ makeOp("UUID_Test", testUUID, "", Row(map[string]interface{}{"uuidset": []string{testUUID}})), makeOp("UUID_Test", testUUID1, "", Row(map[string]interface{}{"real_uuid": UUID{GoUUID: testUUID}})), makeOp("UUID_Test", testUUID2, "", Row(map[string]interface{}{"struuidmap": testUUID1Map})), }, "", }, { "reverse ordered ops", []Operation{ makeOp("UUID_Test", testUUID1, namedUUID1, Row(map[string]interface{}{"real_uuid": UUID{GoUUID: namedUUID}})), makeOp("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"uuidset": []string{namedUUID}})), }, []Operation{ makeOp("UUID_Test", testUUID1, "", Row(map[string]interface{}{"real_uuid": UUID{GoUUID: testUUID}})), makeOp("UUID_Test", testUUID, "", Row(map[string]interface{}{"uuidset": []string{testUUID}})), }, "", }, { "where ops", []Operation{ makeOpWhere("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"_uuid": namedUUID}), NewCondition("_uuid", ConditionEqual, namedUUID), ), makeOpWhere("UUID_Test", testUUID1, namedUUID1, Row(map[string]interface{}{"real_uuid": UUID{GoUUID: namedUUID}}), NewCondition("_uuid", ConditionEqual, namedUUID), ), }, []Operation{ makeOpWhere("UUID_Test", testUUID, "", Row(map[string]interface{}{"_uuid": testUUID}), NewCondition("_uuid", ConditionEqual, testUUID), ), makeOpWhere("UUID_Test", testUUID1, "", Row(map[string]interface{}{"real_uuid": UUID{GoUUID: testUUID}}), NewCondition("_uuid", ConditionEqual, testUUID), ), }, "", }, { "mutation ops", []Operation{ makeOpMutation("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"_uuid": namedUUID}), *NewMutation("_uuid", MutateOperationAdd, namedUUID), ), makeOpMutation("UUID_Test", testUUID1, namedUUID1, Row(map[string]interface{}{"real_uuid": UUID{GoUUID: namedUUID}}), *NewMutation("_uuid", MutateOperationAdd, namedUUID), ), }, []Operation{ makeOpMutation("UUID_Test", testUUID, "", Row(map[string]interface{}{"_uuid": testUUID}), *NewMutation("_uuid", MutateOperationAdd, testUUID), ), makeOpMutation("UUID_Test", testUUID1, "", Row(map[string]interface{}{"real_uuid": UUID{GoUUID: testUUID}}), *NewMutation("_uuid", MutateOperationAdd, testUUID), ), }, "", }, { "invalid UUID", []Operation{ makeOp("UUID_Test", badUUID, "", Row(map[string]interface{}{"uuidset": []string{namedUUID}})), }, []Operation{}, fmt.Sprintf("operation UUID %q invalid", badUUID), }, { "mismatched UUID for named UUID", []Operation{ makeOp("UUID_Test", testUUID, namedUUID, Row(map[string]interface{}{"uuidset": []string{namedUUID}})), makeOp("UUID_Test", testUUID1, namedUUID, Row(map[string]interface{}{"real_uuid": UUID{GoUUID: namedUUID}})), }, []Operation{}, fmt.Sprintf("named UUID %q maps to UUID %q but found existing UUID %q", namedUUID, testUUID, testUUID1), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { schema, err := getUUIDTestSchema() require.Nil(t, err) got, err := ExpandNamedUUIDs(tt.ops, &schema) if tt.expectedErr != "" { require.Error(t, err, tt.expectedErr) } else { require.NoError(t, err) assert.Equal(t, tt.expected, got) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/notation.go000066400000000000000000000072751464501522100227460ustar00rootroot00000000000000package ovsdb import ( "encoding/json" ) const ( // OperationInsert is an insert operation OperationInsert = "insert" // OperationSelect is a select operation OperationSelect = "select" // OperationUpdate is an update operation OperationUpdate = "update" // OperationMutate is a mutate operation OperationMutate = "mutate" // OperationDelete is a delete operation OperationDelete = "delete" // OperationWait is a wait operation OperationWait = "wait" // OperationCommit is a commit operation OperationCommit = "commit" // OperationAbort is an abort operation OperationAbort = "abort" // OperationComment is a comment operation OperationComment = "comment" // OperationAssert is an assert operation OperationAssert = "assert" ) // Operation represents an operation according to RFC7047 section 5.2 type Operation struct { Op string `json:"op"` Table string `json:"table,omitempty"` Row Row `json:"row,omitempty"` Rows []Row `json:"rows,omitempty"` Columns []string `json:"columns,omitempty"` Mutations []Mutation `json:"mutations,omitempty"` Timeout *int `json:"timeout,omitempty"` Where []Condition `json:"where,omitempty"` Until string `json:"until,omitempty"` Durable *bool `json:"durable,omitempty"` Comment *string `json:"comment,omitempty"` Lock *string `json:"lock,omitempty"` UUID string `json:"uuid,omitempty"` UUIDName string `json:"uuid-name,omitempty"` } // MarshalJSON marshalls 'Operation' to a byte array // For 'select' operations, we don't omit the 'Where' field // to allow selecting all rows of a table func (o Operation) MarshalJSON() ([]byte, error) { type OpAlias Operation switch o.Op { case "select": where := o.Where if where == nil { where = make([]Condition, 0) } return json.Marshal(&struct { Where []Condition `json:"where"` OpAlias }{ Where: where, OpAlias: (OpAlias)(o), }) default: return json.Marshal(&struct { OpAlias }{ OpAlias: (OpAlias)(o), }) } } // MonitorRequests represents a group of monitor requests according to RFC7047 // We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it. // The only option is to go with raw map[string]interface{} option :-( that sucks ! // Refer to client.go : MonitorAll() function for more details type MonitorRequests struct { Requests map[string]MonitorRequest `json:"requests"` } // MonitorRequest represents a monitor request according to RFC7047 type MonitorRequest struct { Columns []string `json:"columns,omitempty"` Where []Condition `json:"where,omitempty"` Select *MonitorSelect `json:"select,omitempty"` } // TransactResponse represents the response to a Transact Operation type TransactResponse struct { Result []OperationResult `json:"result"` Error string `json:"error"` } // OperationResult is the result of an Operation type OperationResult struct { Count int `json:"count,omitempty"` Error string `json:"error,omitempty"` Details string `json:"details,omitempty"` UUID UUID `json:"uuid,omitempty"` Rows []Row `json:"rows,omitempty"` } func ovsSliceToGoNotation(val interface{}) (interface{}, error) { switch sl := val.(type) { case []interface{}: bsliced, err := json.Marshal(sl) if err != nil { return nil, err } switch sl[0] { case "uuid", "named-uuid": var uuid UUID err = json.Unmarshal(bsliced, &uuid) return uuid, err case "set": var oSet OvsSet err = json.Unmarshal(bsliced, &oSet) return oSet, err case "map": var oMap OvsMap err = json.Unmarshal(bsliced, &oMap) return oMap, err } return val, nil } return val, nil } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/notation_test.go000066400000000000000000000161661464501522100240040ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "log" "reflect" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOpRowSerialization(t *testing.T) { var commentString = "this is a comment" tests := []struct { name string op Operation expected string }{ { "insert", Operation{ Op: "insert", Table: "Bridge", }, `{"op":"insert","table":"Bridge"}`, }, { "insert with row", Operation{ Op: "insert", Table: "Bridge", Row: Row(map[string]interface{}{"name": "docker-ovs"}), }, `{"op":"insert","table":"Bridge","row":{"name":"docker-ovs"}}`, }, { "comment", Operation{ Op: "comment", Comment: &commentString, }, fmt.Sprintf(`{"op":"comment","comment":"%s"}`, commentString), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { str, err := json.Marshal(test.op) if err != nil { log.Fatal("serialization error:", err) } if string(str) != test.expected { t.Error("Expected: ", test.expected, "Got", string(str)) } }) } } func TestOpRowsSerialization(t *testing.T) { operation := Operation{ Op: "insert", Table: "Interface", } iface1 := Row(map[string]interface{}{ "name": "test-iface1", "mac": "0000ffaaaa", "ofport": 1, }) iface2 := Row(map[string]interface{}{ "name": "test-iface2", "mac": "0000ffaabb", "ofport": 2, }) operation.Rows = []Row{iface1, iface2} str, err := json.Marshal(operation) if err != nil { log.Fatal("serialization error:", err) } expected := `{"op":"insert","table":"Interface","rows":[{"mac":"0000ffaaaa","name":"test-iface1","ofport":1},{"mac":"0000ffaabb","name":"test-iface2","ofport":2}]}` if string(str) != expected { t.Error("Expected: ", expected, "Got", string(str)) } } func TestValidateOvsSet(t *testing.T) { goSlice := []int{1, 2, 3, 4} oSet, err := NewOvsSet(goSlice) if err != nil { t.Error("Error creating OvsSet ", err) } data, err := json.Marshal(oSet) if err != nil { t.Error("Error Marshalling OvsSet", err) } expected := `["set",[1,2,3,4]]` if string(data) != expected { t.Error("Expected: ", expected, "Got", string(data)) } // Negative condition test oSet, err = NewOvsSet(struct{ foo string }{}) if err == nil { t.Error("OvsSet must fail for anything other than Slices and atomic types") t.Error("Got", oSet) } } func TestValidateOvsMap(t *testing.T) { myMap := make(map[int]string) myMap[1] = "hello" myMap[2] = "world" oMap, err := NewOvsMap(myMap) if err != nil { t.Error("Error creating OvsMap ", err) } data, err := json.Marshal(oMap) if err != nil { t.Error("Error Marshalling OvsMap", err) } expected1 := `["map",[[1,"hello"],[2,"world"]]]` expected2 := `["map",[[2,"world"],[1,"hello"]]]` if string(data) != expected1 && string(data) != expected2 { t.Error("Expected: ", expected1, "Got", string(data)) } // Negative condition test integer := 5 _, err = NewOvsMap(integer) if err == nil { t.Error("OvsMap must fail for anything other than Maps") } } func TestValidateUuid(t *testing.T) { uuid1 := "this is a bad uuid" // Bad uuid2 := "alsoabaduuid" // Bad uuid3 := "550e8400-e29b-41d4-a716-446655440000" // Good uuid4 := "thishoul-dnot-pass-vali-dationchecks" // Bad if IsValidUUID(uuid1) { t.Error(uuid1, " is not a valid UUID") } if IsValidUUID(uuid2) { t.Error(uuid2, " is not a valid UUID") } if !IsValidUUID(uuid3) { t.Error(uuid3, " is a valid UUID") } if IsValidUUID(uuid4) { t.Error(uuid4, " is not a valid UUID") } } func TestNewUUID(t *testing.T) { uuid := UUID{"550e8400-e29b-41d4-a716-446655440000"} uuidStr, _ := json.Marshal(uuid) expected := `["uuid","550e8400-e29b-41d4-a716-446655440000"]` if string(uuidStr) != expected { t.Error("uuid is not correctly formatted") } } func TestNewNamedUUID(t *testing.T) { uuid := UUID{"test-uuid"} uuidStr, _ := json.Marshal(uuid) expected := `["named-uuid","test-uuid"]` if string(uuidStr) != expected { t.Error("uuid is not correctly formatted") } } func TestNewMutation(t *testing.T) { mutation := NewMutation("column", "+=", 1) mutationStr, _ := json.Marshal(mutation) expected := `["column","+=",1]` if string(mutationStr) != expected { t.Error("mutation is not correctly formatted") } } func TestOperationsMarshalUnmarshalJSON(t *testing.T) { in := []byte(`{"op":"mutate","table":"Open_vSwitch","mutations":[["bridges","insert",["named-uuid","foo"]]],"where":[["_uuid","==",["named-uuid","ovs"]]]}`) var op Operation err := json.Unmarshal(in, &op) if err != nil { t.Fatal(err) } assert.Equal(t, OperationMutate, op.Op) assert.Equal(t, "Open_vSwitch", op.Table) assert.Equal(t, 1, len(op.Mutations)) assert.Equal(t, Mutation{ Column: "bridges", Mutator: OperationInsert, Value: UUID{GoUUID: "foo"}, }, op.Mutations[0]) } func TestOvsSliceToGoNotation(t *testing.T) { tests := []struct { name string value interface{} want interface{} wantErr bool }{ { "scalar value", "foo", "foo", false, }, { "empty set", []interface{}{"set", []interface{}{}}, OvsSet{GoSet: []interface{}{}}, false, }, { "set", []interface{}{"set", []interface{}{"foo", "bar", "baz"}}, OvsSet{GoSet: []interface{}{"foo", "bar", "baz"}}, false, }, { "uuid set", []interface{}{"set", []interface{}{[]interface{}{"named-uuid", "foo"}, []interface{}{"named-uuid", "bar"}}}, OvsSet{GoSet: []interface{}{UUID{GoUUID: "foo"}, UUID{GoUUID: "bar"}}}, false, }, { "empty map", []interface{}{"map", []interface{}{}}, OvsMap{GoMap: map[interface{}]interface{}{}}, false, }, { "map", []interface{}{"map", []interface{}{[]interface{}{"foo", "bar"}, []interface{}{"baz", "quux"}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar", "baz": "quux"}}, false, }, { "map uuid values", []interface{}{"map", []interface{}{[]interface{}{"foo", []interface{}{"named-uuid", "bar"}}, []interface{}{"baz", []interface{}{"named-uuid", "quux"}}}}, OvsMap{GoMap: map[interface{}]interface{}{"foo": UUID{GoUUID: "bar"}, "baz": UUID{GoUUID: "quux"}}}, false, }, { "map uuid keys", []interface{}{"map", []interface{}{[]interface{}{[]interface{}{"named-uuid", "bar"}, "foo"}, []interface{}{[]interface{}{"named-uuid", "quux"}, "baz"}}}, OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: "bar"}: "foo", UUID{GoUUID: "quux"}: "baz"}}, false, }, { "map uuid keys and values", []interface{}{"map", []interface{}{[]interface{}{[]interface{}{"named-uuid", "bar"}, "foo"}, []interface{}{[]interface{}{"named-uuid", "quux"}, "baz"}}}, OvsMap{GoMap: map[interface{}]interface{}{UUID{GoUUID: "bar"}: "foo", UUID{GoUUID: "quux"}: "baz"}}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := ovsSliceToGoNotation(tt.value) if tt.wantErr { assert.Error(t, err) return } require.NoError(t, err) wantValue := reflect.ValueOf(tt.want) gotValue := reflect.ValueOf(got) assert.Equal(t, wantValue.Type(), gotValue.Type()) assert.Equal(t, wantValue.Interface(), gotValue.Interface()) }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/row.go000066400000000000000000000010611464501522100217050ustar00rootroot00000000000000package ovsdb import "encoding/json" // Row is a table Row according to RFC7047 type Row map[string]interface{} // UnmarshalJSON unmarshalls a byte array to an OVSDB Row func (r *Row) UnmarshalJSON(b []byte) (err error) { *r = make(map[string]interface{}) var raw map[string]interface{} err = json.Unmarshal(b, &raw) for key, val := range raw { val, err = ovsSliceToGoNotation(val) if err != nil { return err } (*r)[key] = val } return err } // NewRow returns a new empty row func NewRow() Row { return Row(make(map[string]interface{})) } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/rpc.go000066400000000000000000000047271464501522100216760ustar00rootroot00000000000000package ovsdb const ( // MonitorRPC is the monitor RPC method MonitorRPC = "monitor" // ConditionalMonitorRPC is the monitor_cond ConditionalMonitorRPC = "monitor_cond" // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method ConditionalMonitorSinceRPC = "monitor_cond_since" ) // NewEchoArgs creates a new set of arguments for an echo RPC func NewEchoArgs() []interface{} { return []interface{}{"libovsdb echo"} } // NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC func NewGetSchemaArgs(schema string) []interface{} { return []interface{}{schema} } // NewTransactArgs creates a new set of arguments for a transact RPC func NewTransactArgs(database string, operations ...Operation) []interface{} { dbSlice := make([]interface{}, 1) dbSlice[0] = database opsSlice := make([]interface{}, len(operations)) for i, d := range operations { opsSlice[i] = d } ops := append(dbSlice, opsSlice...) return ops } // NewCancelArgs creates a new set of arguments for a cancel RPC func NewCancelArgs(id interface{}) []interface{} { return []interface{}{id} } // NewMonitorArgs creates a new set of arguments for a monitor RPC func NewMonitorArgs(database string, value interface{}, requests map[string]MonitorRequest) []interface{} { return []interface{}{database, value, requests} } // NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC func NewMonitorCondSinceArgs(database string, value interface{}, requests map[string]MonitorRequest, lastTransactionID string) []interface{} { return []interface{}{database, value, requests, lastTransactionID} } // NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC func NewMonitorCancelArgs(value interface{}) []interface{} { return []interface{}{value} } // NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC func NewLockArgs(id interface{}) []interface{} { return []interface{}{id} } // NotificationHandler is the interface that must be implemented to receive notifications type NotificationHandler interface { // RFC 7047 section 4.1.6 Update Notification Update(context interface{}, tableUpdates TableUpdates) // ovsdb-server.7 update2 notifications Update2(context interface{}, tableUpdates TableUpdates2) // RFC 7047 section 4.1.9 Locked Notification Locked([]interface{}) // RFC 7047 section 4.1.10 Stolen Notification Stolen([]interface{}) // RFC 7047 section 4.1.11 Echo Notification Echo([]interface{}) Disconnected() } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/rpc_test.go000066400000000000000000000056141464501522100227310ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "testing" ) func TestNewGetSchemaArgs(t *testing.T) { database := "Open_vSwitch" args := NewGetSchemaArgs(database) argString, _ := json.Marshal(args) expected := `["Open_vSwitch"]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewWaitTransactArgs(t *testing.T) { database := "Open_vSwitch" i := 0 operation := Operation{Op: "wait", Table: "Bridge", Timeout: &i} args := NewTransactArgs(database, operation) argString, _ := json.Marshal(args) expected := `["Open_vSwitch",{"op":"wait","table":"Bridge","timeout":0}]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewTransactArgs(t *testing.T) { database := "Open_vSwitch" operation := Operation{Op: "insert", Table: "Bridge"} args := NewTransactArgs(database, operation) argString, _ := json.Marshal(args) expected := `["Open_vSwitch",{"op":"insert","table":"Bridge"}]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewMultipleTransactArgs(t *testing.T) { database := "Open_vSwitch" operation1 := Operation{Op: "insert", Table: "Bridge"} operation2 := Operation{Op: "delete", Table: "Bridge"} args := NewTransactArgs(database, operation1, operation2) argString, _ := json.Marshal(args) expected := `["Open_vSwitch",{"op":"insert","table":"Bridge"},{"op":"delete","table":"Bridge"}]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewCancelArgs(t *testing.T) { id := 1 args := NewCancelArgs(id) argString, _ := json.Marshal(args) expected := `[1]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewMonitorArgs(t *testing.T) { database := "Open_vSwitch" value := 1 r := MonitorRequest{ Columns: []string{"name", "ports", "external_ids"}, Select: NewDefaultMonitorSelect(), } requests := make(map[string]MonitorRequest) requests["Bridge"] = r args := NewMonitorArgs(database, value, requests) argString, _ := json.Marshal(args) expected := `["Open_vSwitch",1,{"Bridge":{"columns":["name","ports","external_ids"],"select":{"initial":true,"insert":true,"delete":true,"modify":true}}}]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewMonitorCancelArgs(t *testing.T) { value := 1 args := NewMonitorCancelArgs(value) argString, _ := json.Marshal(args) expected := `[1]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } func TestNewLockArgs(t *testing.T) { id := "testId" args := NewLockArgs(id) argString, _ := json.Marshal(args) expected := `["testId"]` if string(argString) != expected { t.Error("Expected: ", expected, " Got: ", string(argString)) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/schema.go000066400000000000000000000420721464501522100223450ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "io" "io/ioutil" "math" "os" "strings" ) // DatabaseSchema is a database schema according to RFC7047 type DatabaseSchema struct { Name string `json:"name"` Version string `json:"version"` Tables map[string]TableSchema `json:"tables"` allTablesRoot *bool } // UUIDColumn is a static column that represents the _uuid column, common to all tables var UUIDColumn = ColumnSchema{ Type: TypeUUID, } // Table returns a TableSchema Schema for a given table and column name func (schema DatabaseSchema) Table(tableName string) *TableSchema { if table, ok := schema.Tables[tableName]; ok { return &table } return nil } // IsRoot whether a table is root or not func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) { t := schema.Table(tableName) if t == nil { return false, fmt.Errorf("Table %s not in schame", tableName) } if t.IsRoot { return true, nil } // As per RFC7047, for compatibility with schemas created before // "isRoot" was introduced, if "isRoot" is omitted or false in every // in a given , then every table is part // of the root set. if schema.allTablesRoot == nil { allTablesRoot := true for _, tSchema := range schema.Tables { if tSchema.IsRoot { allTablesRoot = false break } } schema.allTablesRoot = &allTablesRoot } return *schema.allTablesRoot, nil } // Print will print the contents of the DatabaseSchema func (schema DatabaseSchema) Print(w io.Writer) { fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version) for table, tableSchema := range schema.Tables { fmt.Fprintf(w, "\t %s", table) if len(tableSchema.Indexes) > 0 { fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes) } else { fmt.Fprintf(w, "\n") } for column, columnSchema := range tableSchema.Columns { fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema) } } } // SchemaFromFile returns a DatabaseSchema from a file func SchemaFromFile(f *os.File) (DatabaseSchema, error) { data, err := ioutil.ReadAll(f) if err != nil { return DatabaseSchema{}, err } var schema DatabaseSchema err = json.Unmarshal(data, &schema) if err != nil { return DatabaseSchema{}, err } return schema, nil } // ValidateOperations performs basic validation for operations against a DatabaseSchema func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool { for _, op := range operations { switch op.Op { case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait: continue case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete: table, ok := schema.Tables[op.Table] if ok { for column := range op.Row { if _, ok := table.Columns[column]; !ok { if column != "_uuid" && column != "_version" { return false } } } for _, row := range op.Rows { for column := range row { if _, ok := table.Columns[column]; !ok { if column != "_uuid" && column != "_version" { return false } } } } for _, column := range op.Columns { if _, ok := table.Columns[column]; !ok { if column != "_uuid" && column != "_version" { return false } } } } else { return false } } } return true } // TableSchema is a table schema according to RFC7047 type TableSchema struct { Columns map[string]*ColumnSchema `json:"columns"` Indexes [][]string `json:"indexes,omitempty"` IsRoot bool `json:"isRoot,omitempty"` } // Column returns the Column object for a specific column name func (t TableSchema) Column(columnName string) *ColumnSchema { if columnName == "_uuid" { return &UUIDColumn } if column, ok := t.Columns[columnName]; ok { return column } return nil } /*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type can also hold other more complex types such as set, enum and map. The way to determine the type depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage of this library, we define an ExtendedType that includes all possible column types (including atomic fields). */ // ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set type ExtendedType = string // RefType is used to define the possible RefTypes type RefType = string // unlimited is not constant as we can't take the address of int constants var ( // Unlimited is used to express unlimited "Max" Unlimited = -1 ) const ( unlimitedString = "unlimited" //Strong RefType Strong RefType = "strong" //Weak RefType Weak RefType = "weak" //ExtendedType associated with Atomic Types //TypeInteger is equivalent to 'int' TypeInteger ExtendedType = "integer" //TypeReal is equivalent to 'float64' TypeReal ExtendedType = "real" //TypeBoolean is equivalent to 'bool' TypeBoolean ExtendedType = "boolean" //TypeString is equivalent to 'string' TypeString ExtendedType = "string" //TypeUUID is equivalent to 'libovsdb.UUID' TypeUUID ExtendedType = "uuid" //Extended Types used to summarize the internal type of the field. //TypeEnum is an enumerator of type defined by Key.Type TypeEnum ExtendedType = "enum" //TypeMap is a map whose type depend on Key.Type and Value.Type TypeMap ExtendedType = "map" //TypeSet is a set whose type depend on Key.Type TypeSet ExtendedType = "set" ) // BaseType is a base-type structure as per RFC7047 type BaseType struct { Type string Enum []interface{} minReal *float64 maxReal *float64 minInteger *int maxInteger *int minLength *int maxLength *int refTable *string refType *RefType } func (b *BaseType) simpleAtomic() bool { return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil } // MinReal returns the minimum real value // RFC7047 does not define a default, but we assume this to be // the smallest non zero value a float64 could hold func (b *BaseType) MinReal() (float64, error) { if b.Type != TypeReal { return 0, fmt.Errorf("%s is not a real", b.Type) } if b.minReal != nil { return *b.minReal, nil } return math.SmallestNonzeroFloat64, nil } // MaxReal returns the maximum real value // RFC7047 does not define a default, but this would be the maximum // value held by a float64 func (b *BaseType) MaxReal() (float64, error) { if b.Type != TypeReal { return 0, fmt.Errorf("%s is not a real", b.Type) } if b.maxReal != nil { return *b.maxReal, nil } return math.MaxFloat64, nil } // MinInteger returns the minimum integer value // RFC7047 specifies the minimum to be -2^63 func (b *BaseType) MinInteger() (int, error) { if b.Type != TypeInteger { return 0, fmt.Errorf("%s is not an integer", b.Type) } if b.minInteger != nil { return *b.minInteger, nil } return int(math.Pow(-2, 63)), nil } // MaxInteger returns the minimum integer value // RFC7047 specifies the minimum to be 2^63-1 func (b *BaseType) MaxInteger() (int, error) { if b.Type != TypeInteger { return 0, fmt.Errorf("%s is not an integer", b.Type) } if b.maxInteger != nil { return *b.maxInteger, nil } return int(math.Pow(2, 63)) - 1, nil } // MinLength returns the minimum string length // RFC7047 doesn't specify a default, but we assume // that it must be >= 0 func (b *BaseType) MinLength() (int, error) { if b.Type != TypeString { return 0, fmt.Errorf("%s is not an string", b.Type) } if b.minLength != nil { return *b.minLength, nil } return 0, nil } // MaxLength returns the maximum string length // RFC7047 doesn't specify a default, but we assume // that it must 2^63-1 func (b *BaseType) MaxLength() (int, error) { if b.Type != TypeString { return 0, fmt.Errorf("%s is not an string", b.Type) } if b.maxLength != nil { return *b.maxLength, nil } return int(math.Pow(2, 63)) - 1, nil } // RefTable returns the table to which a UUID type refers // It will return an empty string if not set func (b *BaseType) RefTable() (string, error) { if b.Type != TypeUUID { return "", fmt.Errorf("%s is not a uuid", b.Type) } if b.refTable != nil { return *b.refTable, nil } return "", nil } // RefType returns the reference type for a UUID field // RFC7047 infers the RefType is strong if omitted func (b *BaseType) RefType() (RefType, error) { if b.Type != TypeUUID { return "", fmt.Errorf("%s is not a uuid", b.Type) } if b.refType != nil { return *b.refType, nil } return Strong, nil } // UnmarshalJSON unmarshals a json-formatted base type func (b *BaseType) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err == nil { if isAtomicType(s) { b.Type = s } else { return fmt.Errorf("non atomic type %s in ", s) } return nil } // temporary type to avoid recursive call to unmarshal var bt struct { Type string `json:"type"` Enum interface{} `json:"enum,omitempty"` MinReal *float64 `json:"minReal,omitempty"` MaxReal *float64 `json:"maxReal,omitempty"` MinInteger *int `json:"minInteger,omitempty"` MaxInteger *int `json:"maxInteger,omitempty"` MinLength *int `json:"minLength,omitempty"` MaxLength *int `json:"maxLength,omitempty"` RefTable *string `json:"refTable,omitempty"` RefType *RefType `json:"refType,omitempty"` } err := json.Unmarshal(data, &bt) if err != nil { return err } if bt.Enum != nil { // 'enum' is a list or a single element representing a list of exactly one element switch bt.Enum.(type) { case []interface{}: // it's an OvsSet oSet := bt.Enum.([]interface{}) innerSet := oSet[1].([]interface{}) b.Enum = make([]interface{}, len(innerSet)) copy(b.Enum, innerSet) default: b.Enum = []interface{}{bt.Enum} } } b.Type = bt.Type b.minReal = bt.MinReal b.maxReal = bt.MaxReal b.minInteger = bt.MinInteger b.maxInteger = bt.MaxInteger b.minLength = bt.MaxLength b.maxLength = bt.MaxLength b.refTable = bt.RefTable b.refType = bt.RefType return nil } // MarshalJSON marshals a base type to JSON func (b BaseType) MarshalJSON() ([]byte, error) { j := struct { Type string `json:"type,omitempty"` Enum *OvsSet `json:"enum,omitempty"` MinReal *float64 `json:"minReal,omitempty"` MaxReal *float64 `json:"maxReal,omitempty"` MinInteger *int `json:"minInteger,omitempty"` MaxInteger *int `json:"maxInteger,omitempty"` MinLength *int `json:"minLength,omitempty"` MaxLength *int `json:"maxLength,omitempty"` RefTable *string `json:"refTable,omitempty"` RefType *RefType `json:"refType,omitempty"` }{ Type: b.Type, MinReal: b.minReal, MaxReal: b.maxReal, MinInteger: b.minInteger, MaxInteger: b.maxInteger, MinLength: b.maxLength, MaxLength: b.maxLength, RefTable: b.refTable, RefType: b.refType, } if len(b.Enum) > 0 { set, err := NewOvsSet(b.Enum) if err != nil { return nil, err } j.Enum = &set } return json.Marshal(j) } // ColumnType is a type object as per RFC7047 // "key": required // "value": optional // "min": optional (default: 1) // "max": or "unlimited" optional (default: 1) type ColumnType struct { Key *BaseType Value *BaseType min *int max *int } // Max returns the maximum value of a ColumnType. -1 is Unlimited func (c *ColumnType) Max() int { if c.max == nil { return 1 } return *c.max } // Min returns the minimum value of a ColumnType func (c *ColumnType) Min() int { if c.min == nil { return 1 } return *c.min } // UnmarshalJSON unmarshals a json-formatted column type func (c *ColumnType) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err == nil { if isAtomicType(s) { c.Key = &BaseType{Type: s} } else { return fmt.Errorf("non atomic type %s in ", s) } return nil } var colType struct { Key *BaseType `json:"key"` Value *BaseType `json:"value"` Min *int `json:"min"` Max interface{} `json:"max"` } err := json.Unmarshal(data, &colType) if err != nil { return err } c.Key = colType.Key c.Value = colType.Value c.min = colType.Min switch v := colType.Max.(type) { case string: if v == unlimitedString { c.max = &Unlimited } else { return fmt.Errorf("unexpected string value in max field") } case float64: i := int(v) c.max = &i default: c.max = nil } return nil } // MarshalJSON marshalls a column type to JSON func (c ColumnType) MarshalJSON() ([]byte, error) { if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() { return json.Marshal(c.Key.Type) } if c.Max() == Unlimited { colType := struct { Key *BaseType `json:"key"` Value *BaseType `json:"value,omitempty"` Min *int `json:"min,omitempty"` Max string `json:"max,omitempty"` }{ Key: c.Key, Value: c.Value, Min: c.min, Max: unlimitedString, } return json.Marshal(&colType) } colType := struct { Key *BaseType `json:"key"` Value *BaseType `json:"value,omitempty"` Min *int `json:"min,omitempty"` Max *int `json:"max,omitempty"` }{ Key: c.Key, Value: c.Value, Min: c.min, Max: c.max, } return json.Marshal(&colType) } // ColumnSchema is a column schema according to RFC7047 type ColumnSchema struct { // According to RFC7047, "type" field can be, either an // Or a ColumnType defined below. To try to simplify the usage, the // json message will be parsed manually and Type will indicate the "extended" // type. Depending on its value, more information may be available in TypeObj. // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values Type ExtendedType TypeObj *ColumnType ephemeral *bool mutable *bool } // Mutable returns whether a column is mutable func (c *ColumnSchema) Mutable() bool { if c.mutable != nil { return *c.mutable } // default true return true } // Ephemeral returns whether a column is ephemeral func (c *ColumnSchema) Ephemeral() bool { if c.ephemeral != nil { return *c.ephemeral } // default false return false } // UnmarshalJSON unmarshals a json-formatted column func (c *ColumnSchema) UnmarshalJSON(data []byte) error { // ColumnJSON represents the known json values for a Column var colJSON struct { Type *ColumnType `json:"type"` Ephemeral *bool `json:"ephemeral,omitempty"` Mutable *bool `json:"mutable,omitempty"` } // Unmarshal known keys if err := json.Unmarshal(data, &colJSON); err != nil { return fmt.Errorf("cannot parse column object %s", err) } c.ephemeral = colJSON.Ephemeral c.mutable = colJSON.Mutable c.TypeObj = colJSON.Type // Infer the ExtendedType from the TypeObj if c.TypeObj.Value != nil { c.Type = TypeMap } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 { c.Type = TypeSet } else if len(c.TypeObj.Key.Enum) > 0 { c.Type = TypeEnum } else { c.Type = c.TypeObj.Key.Type } return nil } // MarshalJSON marshalls a column schema to JSON func (c ColumnSchema) MarshalJSON() ([]byte, error) { type colJSON struct { Type *ColumnType `json:"type"` Ephemeral *bool `json:"ephemeral,omitempty"` Mutable *bool `json:"mutable,omitempty"` } column := colJSON{ Type: c.TypeObj, Ephemeral: c.ephemeral, Mutable: c.mutable, } return json.Marshal(column) } // String returns a string representation of the (native) column type func (c *ColumnSchema) String() string { var flags []string var flagStr string var typeStr string if c.Ephemeral() { flags = append(flags, "E") } if c.Mutable() { flags = append(flags, "M") } if len(flags) > 0 { flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ",")) } switch c.Type { case TypeInteger, TypeReal, TypeBoolean, TypeString: typeStr = string(c.Type) case TypeUUID: if c.TypeObj != nil && c.TypeObj.Key != nil { // ignore err as we've already asserted this is a uuid reftable, _ := c.TypeObj.Key.RefTable() reftype := "" if s, err := c.TypeObj.Key.RefType(); err != nil { reftype = s } typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype) } else { typeStr = "uuid" } case TypeEnum: typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum) case TypeMap: typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type) case TypeSet: var keyStr string if c.TypeObj.Key.Type == TypeUUID { // ignore err as we've already asserted this is a uuid reftable, _ := c.TypeObj.Key.RefTable() reftype, _ := c.TypeObj.Key.RefType() keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype) } else { keyStr = string(c.TypeObj.Key.Type) } typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max()) default: panic(fmt.Sprintf("Unsupported type %s", c.Type)) } return strings.Join([]string{typeStr, flagStr}, " ") } func isAtomicType(atype string) bool { switch atype { case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID: return true default: return false } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/schema_test.go000066400000000000000000000547671464501522100234220ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "math" "reflect" "testing" "github.com/stretchr/testify/assert" ) func TestSchema(t *testing.T) { type schemaTest struct { name string schema []byte expectedErr bool expectedSchema DatabaseSchema } zero := 0 one := 1 two := 2 boolFalse := false schemaTestSuite := []schemaTest{ { name: "Simple AtomicType columns", schema: []byte(` {"name": "AtomicDB", "version": "0.0.0", "tables": { "atomicTable": { "columns": { "str": { "type": "string" }, "int": { "type": "integer" }, "float": { "type": "real" }, "uuid": { "type": "uuid", "mutable": false } } } } }`), expectedErr: false, expectedSchema: DatabaseSchema{ Name: "AtomicDB", Version: "0.0.0", Tables: map[string]TableSchema{ "atomicTable": { Columns: map[string]*ColumnSchema{ "str": { Type: TypeString, TypeObj: &ColumnType{Key: &BaseType{Type: TypeString}}, }, "int": { Type: TypeInteger, TypeObj: &ColumnType{Key: &BaseType{Type: TypeInteger}}, }, "float": { Type: TypeReal, TypeObj: &ColumnType{Key: &BaseType{Type: TypeReal}}, }, "uuid": { Type: TypeUUID, TypeObj: &ColumnType{Key: &BaseType{Type: TypeUUID}}, mutable: &boolFalse, }, }, }, }, }, }, { name: "Sets", schema: []byte(` {"name": "SetsDB", "version": "0.0.0", "tables": { "setTable": { "columns": { "single": { "type": { "key": {"type":"string"}, "max": 1, "min": 1 } }, "oneElem": { "type": { "key": {"type":"uuid"}, "max": 1, "min": 0 } }, "multipleElem": { "type": { "key": {"type":"real"}, "max": 2, "min": 0 } }, "unlimitedElem": { "type": { "key": {"type":"integer"}, "max": "unlimited", "min": 0 } }, "enumSet": { "type": { "key": { "type": "string", "enum": ["set", ["one", "two"]] }, "max": "unlimited", "min": 0 } } } } } }`), expectedErr: false, expectedSchema: DatabaseSchema{ Name: "SetsDB", Version: "0.0.0", Tables: map[string]TableSchema{ "setTable": { Columns: map[string]*ColumnSchema{ "single": { Type: TypeString, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeString}, min: &one, max: &one, }, }, "oneElem": { Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: "uuid"}, max: &one, min: &zero, }, }, "multipleElem": { Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: "real"}, max: &two, min: &zero, }, }, "unlimitedElem": { Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: "integer"}, max: &Unlimited, min: &zero, }, }, "enumSet": { Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{ Type: "string", Enum: []interface{}{"one", "two"}, }, max: &Unlimited, min: &zero, }, }, }, }, }, }, }, { name: "Maps", schema: []byte(` {"name": "MapsDB", "version": "0.0.0", "tables": { "mapTable": { "columns": { "str_str": { "type": { "key": {"type":"string"}, "value": {"type":"string"} } }, "str_int": { "type": { "key": {"type":"string"}, "value": {"type":"integer"} } }, "int_real": { "type": { "key": {"type":"integer"}, "value": {"type":"real"} } }, "str_uuid": { "type": { "key": {"type":"string"}, "value": {"type":"uuid"} } }, "str_enum": { "type": { "key": {"type":"string"}, "value": { "type": "string", "enum": ["set", ["one", "two"]] } } } } } } }`), expectedErr: false, expectedSchema: DatabaseSchema{ Name: "MapsDB", Version: "0.0.0", Tables: map[string]TableSchema{ "mapTable": { Columns: map[string]*ColumnSchema{ "str_str": { Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "string"}, }, }, "str_int": { Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "integer"}, }, }, "int_real": { Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{Type: "integer"}, Value: &BaseType{Type: "real"}, }, }, "str_uuid": { Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "uuid"}, }, }, "str_enum": { Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{ Type: "string", }, Value: &BaseType{ Type: "string", Enum: []interface{}{"one", "two"}, }, }, }, }, }, }, }, }, { name: "Invalid type", schema: []byte(` {"name": "ErrorDB", "version": "0.0.0", "tables": { "errorsTable": { "columns": { "wrongType": { "type": { "key": "unknown" } } } } } }`), expectedErr: true, }, { name: "Invalid json", schema: []byte(`invalid json`), expectedErr: true, }, } for _, test := range schemaTestSuite { t.Run(fmt.Sprintf("Schema Test %s", test.name), func(t *testing.T) { var schema DatabaseSchema err := json.Unmarshal(test.schema, &schema) if (err != nil) != test.expectedErr { t.Fatalf("Expected error to be %t, but got error: %s", test.expectedErr, err.Error()) } if err != nil { return } if !reflect.DeepEqual(test.expectedSchema, schema) { t.Errorf("expected schema to be %+#v, but got: %+#v", test.expectedSchema, schema) // Struct Introspection for debugging purposes for tname, table := range schema.Tables { for n, c := range table.Columns { ec := test.expectedSchema.Tables[tname].Columns[n] t.Logf("column name %s", n) t.Logf(" Expected: %+#v", ec) t.Logf(" Got: %+#v", c) if ec.TypeObj != nil { t.Logf(" Expected.Obj: %+#v", ec.TypeObj) if ec.TypeObj.Key != nil { t.Logf(" Expected.Obj.Key: %+#v", ec.TypeObj.Key) } if ec.TypeObj.Value != nil { t.Logf(" Expected.Obj.Value: %+#v", ec.TypeObj.Value) } } if c.TypeObj != nil { t.Logf(" Got.Obj: %+#v", c.TypeObj) if c.TypeObj.Key != nil { t.Logf(" Got.Obj.Key: %+#v", c.TypeObj.Key) } if c.TypeObj.Value != nil { t.Logf(" Got.Obj.Value: %+#v", c.TypeObj.Value) } } } } } b, err := json.Marshal(schema) assert.Nil(t, err) assert.JSONEq(t, string(test.schema), string(b)) }) } } func TestTable(t *testing.T) { schemaJ := []byte(`{"name": "TestSchema", "version": "0.0.0", "tables": { "test": { "columns": { "foo": { "type": { "key": "string", "value": "string" } }, "bar": { "type": "string" } } } } }`) var schema DatabaseSchema err := json.Unmarshal(schemaJ, &schema) assert.Nil(t, err) t.Run("GetTable_exists", func(t *testing.T) { table := schema.Table("test") assert.NotNil(t, table) }) t.Run("GetTable_not_exists", func(t *testing.T) { table := schema.Table("notexists") assert.Nil(t, table) }) t.Run("GetColumn_exists", func(t *testing.T) { table := schema.Table("test") assert.NotNil(t, table) column := table.Column("foo") assert.NotNil(t, column) }) t.Run("GetColumn_not_exists", func(t *testing.T) { table := schema.Table("test") assert.NotNil(t, table) column := table.Column("notexists") assert.Nil(t, column) }) t.Run("GetColumn_uuid", func(t *testing.T) { table := schema.Table("test") assert.NotNil(t, table) column := table.Column("_uuid") assert.NotNil(t, column) }) } func TestBaseTypeMarshalUnmarshalJSON(t *testing.T) { datapath := "Datapath" zero := 0 max := 4294967295 strong := "strong" tests := []struct { name string in []byte expected BaseType expectedJSON []byte wantErr bool }{ { "string", []byte(`"string"`), BaseType{Type: TypeString}, []byte(`{"type":"string"}`), false, }, { "integer", []byte(`"integer"`), BaseType{Type: TypeInteger}, []byte(`{"type":"integer"}`), false, }, { "boolean", []byte(`"boolean"`), BaseType{Type: TypeBoolean}, []byte(`{"type":"boolean"}`), false, }, { "real", []byte(`"real"`), BaseType{Type: TypeReal}, []byte(`{"type":"real"}`), false, }, { "uuid", []byte(`"uuid"`), BaseType{Type: TypeUUID}, []byte(`{"type":"uuid"}`), false, }, { "uuid", []byte(`{"type": "uuid", "refTable": "Datapath", "refType": "strong"}`), BaseType{Type: TypeUUID, refTable: &datapath, refType: &strong}, []byte(`{"type": "uuid", "refTable": "Datapath", "refType": "strong"}`), false, }, { "enum", []byte(`{"type": "string","enum": ["set", ["OpenFlow10","OpenFlow11","OpenFlow12","OpenFlow13","OpenFlow14","OpenFlow15"]]}`), BaseType{Type: TypeString, Enum: []interface{}{"OpenFlow10", "OpenFlow11", "OpenFlow12", "OpenFlow13", "OpenFlow14", "OpenFlow15"}}, []byte(`{"type": "string","enum": ["set", ["OpenFlow10","OpenFlow11","OpenFlow12","OpenFlow13","OpenFlow14","OpenFlow15"]]}`), false, }, { "int with min and max", []byte(`{"type":"integer","minInteger":0,"maxInteger": 4294967295}`), BaseType{Type: TypeInteger, minInteger: &zero, maxInteger: &max}, []byte(`{"type":"integer","minInteger":0,"maxInteger": 4294967295}`), false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var b BaseType err := b.UnmarshalJSON(tt.in) assert.Nil(t, err) assert.Equal(t, tt.expected, b) raw, err := b.MarshalJSON() assert.Nil(t, err) assert.JSONEq(t, string(tt.expectedJSON), string(raw)) }) } } func TestColumnTypeMarshalUnmarshalJSON(t *testing.T) { one := 1 tests := []struct { name string in []byte expected ColumnType expectedJSON []byte }{ { "string", []byte(`"string"`), ColumnType{ Key: &BaseType{Type: "string"}, }, []byte(`"string"`), }, { "map string string", []byte(`{"value":"string","key":{"type":"string"},"min":1,"max":1}`), ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "string"}, min: &one, max: &one, }, []byte(`{"key":{"type":"string"},"value":{"type":"string"},"min":1,"max":1}`), }, { "map str int", []byte(`{"key":"string","value":"integer","min":1,"max":1}`), ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "integer"}, min: &one, max: &one, }, []byte(`{"key":{"type": "string"},"value":{"type":"integer"},"min":1,"max":1}`), }, { "map int real", []byte(`{"key":{"type":"integer"},"value":{"type":"real"},"min":1,"max":"unlimited"}`), ColumnType{ Key: &BaseType{Type: "integer"}, Value: &BaseType{Type: "real"}, min: &one, max: &Unlimited, }, []byte(`{"key":{"type":"integer"},"value":{"type":"real"},"min":1,"max":"unlimited"}`), }, { "map str uuid", []byte(`{"key":{"type":"string"},"value":{"type":"uuid"},"min":1,"max":"unlimited"}`), ColumnType{ Key: &BaseType{Type: "string"}, Value: &BaseType{Type: "uuid"}, min: &one, max: &Unlimited, }, []byte(`{"key":{"type":"string"},"value":{"type":"uuid"},"min":1,"max":"unlimited"}`), }, { "string enum", []byte(`{"key":{"type":"string"},"value":{"type":"string","enum":["set", ["one","two"]]},"min":1,"max":1}`), ColumnType{ Key: &BaseType{ Type: "string", }, Value: &BaseType{ Type: "string", Enum: []interface{}{"one", "two"}, }, min: &one, max: &one, }, []byte(`{"key":{"type":"string"},"value":{"type":"string","enum":["set",["one","two"]]},"min":1,"max":1}`), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var c ColumnType err := c.UnmarshalJSON(tt.in) assert.Nil(t, err) assert.Equal(t, tt.expected, c) raw, err := c.MarshalJSON() assert.Nil(t, err) assert.JSONEq(t, string(tt.expectedJSON), string(raw)) }) } } func TestColumnSchemaMutable(t *testing.T) { boolTrue := true boolFalse := false m1 := ColumnSchema{mutable: nil} m2 := ColumnSchema{mutable: &boolTrue} m3 := ColumnSchema{mutable: &boolFalse} assert.True(t, m1.Mutable()) assert.True(t, m2.Mutable()) assert.False(t, m3.Mutable()) } func TestColumnSchemaEphemeral(t *testing.T) { boolTrue := true boolFalse := false e1 := ColumnSchema{ephemeral: nil} e2 := ColumnSchema{ephemeral: &boolTrue} e3 := ColumnSchema{ephemeral: &boolFalse} assert.False(t, e1.Ephemeral()) assert.True(t, e2.Ephemeral()) assert.False(t, e3.Ephemeral()) } func TestColumnSchemaMarshalUnmarshalJSON(t *testing.T) { datapath := "Datapath" unlimited := -1 zero := 0 one := 1 tests := []struct { name string in []byte expected ColumnSchema expectedJSON []byte }{ { "simple string", []byte(`{"type": "string"}`), ColumnSchema{ Type: TypeString, TypeObj: &ColumnType{Key: &BaseType{Type: TypeString}}, }, []byte(`{"type": "string"}`), }, { "map", []byte(`{"type":{"key": {"type": "string"},"value": {"type": "uuid","refTable": "Datapath"},"min": 0, "max": "unlimited"}}`), ColumnSchema{ Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeString}, Value: &BaseType{Type: TypeUUID, refTable: &datapath}, min: &zero, max: &unlimited, }, }, []byte(`{"type":{"key": {"type": "string"},"value": {"type": "uuid","refTable": "Datapath"},"min": 0, "max": "unlimited"}}`), }, { "set", []byte(`{"type": {"key": {"type": "uuid","refTable": "Datapath"},"min": 0, "max": "unlimited"}}`), ColumnSchema{ Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeUUID, refTable: &datapath}, min: &zero, max: &unlimited, }}, []byte(`{"type": {"key": {"type": "uuid","refTable": "Datapath"},"min": 0, "max": "unlimited"}}`), }, { "enum", []byte(`{"type": {"key": {"type": "string","enum": ["set", ["one", "two"]]},"max": 1,"min": 1}}`), ColumnSchema{ Type: TypeEnum, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeString, Enum: []interface{}{"one", "two"}}, max: &one, min: &one, }, }, []byte(`{"type": {"key": {"type": "string","enum": ["set", ["one", "two"]]},"max": 1,"min": 1}}`), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var c ColumnSchema err := c.UnmarshalJSON(tt.in) assert.Nil(t, err) assert.Equal(t, tt.expected, c) assert.True(t, c.Mutable()) raw, err := c.MarshalJSON() assert.Nil(t, err) assert.JSONEq(t, string(tt.expectedJSON), string(raw)) }) } } func TestBaseTypeSimpleAtomic(t *testing.T) { b := BaseType{Type: TypeString} assert.True(t, b.simpleAtomic()) max := 1024 b1 := BaseType{Type: TypeInteger, maxInteger: &max} assert.False(t, b1.simpleAtomic()) } func TestBaseTypeMinReal(t *testing.T) { value := float64(1024) tests := []struct { name string bt *BaseType want float64 wantErr bool }{ { "not a real", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeReal}, math.SmallestNonzeroFloat64, false, }, { "set", &BaseType{Type: TypeReal, minReal: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MinReal() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeMaxReal(t *testing.T) { value := float64(1024) tests := []struct { name string bt *BaseType want float64 wantErr bool }{ { "not a real", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeReal}, math.MaxFloat64, false, }, { "set", &BaseType{Type: TypeReal, maxReal: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MaxReal() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeMinInteger(t *testing.T) { value := 1024 tests := []struct { name string bt *BaseType want int wantErr bool }{ { "not an int", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeInteger}, int(math.Pow(-2, 63)), false, }, { "set", &BaseType{Type: TypeInteger, minInteger: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MinInteger() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeMaxInteger(t *testing.T) { value := 1024 tests := []struct { name string bt *BaseType want int wantErr bool }{ { "not an int", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeInteger}, int(math.Pow(2, 63)) - 1, false, }, { "set", &BaseType{Type: TypeInteger, maxInteger: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MaxInteger() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeMinLength(t *testing.T) { value := 12 tests := []struct { name string bt *BaseType want int wantErr bool }{ { "not a string", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeString}, 0, false, }, { "set", &BaseType{Type: TypeString, minLength: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MinLength() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeMaxLength(t *testing.T) { value := 1024 tests := []struct { name string bt *BaseType want int wantErr bool }{ { "not a string", &BaseType{Type: TypeUUID}, 0, true, }, { "nil", &BaseType{Type: TypeString}, int(math.Pow(2, 63)) - 1, false, }, { "set", &BaseType{Type: TypeString, maxLength: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.MaxLength() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeRefTable(t *testing.T) { value := "Bridge" tests := []struct { name string bt *BaseType want string wantErr bool }{ { "not a uuid", &BaseType{Type: TypeString}, "", true, }, { "nil", &BaseType{Type: TypeUUID}, "", false, }, { "set", &BaseType{Type: TypeUUID, refTable: &value}, value, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.RefTable() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestBaseTypeRefType(t *testing.T) { value := "weak" tests := []struct { name string bt *BaseType want RefType wantErr bool }{ { "not a uuid", &BaseType{Type: TypeString}, "", true, }, { "nil", &BaseType{Type: TypeUUID}, Strong, false, }, { "set", &BaseType{Type: TypeUUID, refType: &value}, Weak, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.bt.RefType() if tt.wantErr { assert.Error(t, err) } else { assert.Nil(t, err) } assert.Equal(t, tt.want, got) }) } } func TestColumnSchema_String(t *testing.T) { datapath := "Connection" unlimited := -1 zero := 0 strong := "strong" weak := "weak" type fields struct { Type ExtendedType TypeObj *ColumnType ephemeral *bool mutable *bool } tests := []struct { name string fields fields want string }{ { "str", fields{ Type: TypeString, }, "string [M]", }, { "str map", fields{ Type: TypeMap, TypeObj: &ColumnType{ Key: &BaseType{ Type: TypeString, }, Value: &BaseType{ Type: TypeString, }, }, }, "[string]string [M]", }, { "ref", fields{ Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeUUID, refTable: &datapath}, min: &zero, max: &unlimited, }, }, "[] [Connection (strong)] (min: 0, max: -1) [M]", }, { "ref 1", fields{ Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeUUID, refTable: &datapath, refType: &strong}, min: &zero, max: &unlimited, }, }, "[] [Connection (strong)] (min: 0, max: -1) [M]", }, { "ref 2", fields{ Type: TypeSet, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeUUID, refTable: &datapath, refType: &weak}, min: &zero, max: &unlimited, }, }, "[] [Connection (weak)] (min: 0, max: -1) [M]", }, { "enum", fields{ Type: TypeEnum, TypeObj: &ColumnType{ Key: &BaseType{Type: TypeString, Enum: []interface{}{"permit", "deny"}}, max: &unlimited, min: &zero, }, }, "enum (type: string): [permit deny] [M]", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { column := &ColumnSchema{ Type: tt.fields.Type, TypeObj: tt.fields.TypeObj, ephemeral: tt.fields.ephemeral, mutable: tt.fields.mutable, } if got := column.String(); got != tt.want { t.Errorf("String() = %v, want %v", got, tt.want) } }) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/serverdb/000077500000000000000000000000001464501522100223655ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/ovsdb/serverdb/.gitignore000066400000000000000000000000131464501522100243470ustar00rootroot00000000000000*.ovsschemagolang-github-ovn-org-libovsdb-0.7.0/ovsdb/serverdb/database.go000066400000000000000000000062631464501522100244670ustar00rootroot00000000000000// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package serverdb import "github.com/ovn-org/libovsdb/model" const DatabaseTable = "Database" type ( DatabaseModel = string ) var ( DatabaseModelStandalone DatabaseModel = "standalone" DatabaseModelClustered DatabaseModel = "clustered" DatabaseModelRelay DatabaseModel = "relay" ) // Database defines an object in Database table type Database struct { UUID string `ovsdb:"_uuid"` Cid *string `ovsdb:"cid"` Connected bool `ovsdb:"connected"` Index *int `ovsdb:"index"` Leader bool `ovsdb:"leader"` Model DatabaseModel `ovsdb:"model"` Name string `ovsdb:"name"` Schema *string `ovsdb:"schema"` Sid *string `ovsdb:"sid"` } func (a *Database) GetUUID() string { return a.UUID } func (a *Database) GetCid() *string { return a.Cid } func copyDatabaseCid(a *string) *string { if a == nil { return nil } b := *a return &b } func equalDatabaseCid(a, b *string) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *Database) GetConnected() bool { return a.Connected } func (a *Database) GetIndex() *int { return a.Index } func copyDatabaseIndex(a *int) *int { if a == nil { return nil } b := *a return &b } func equalDatabaseIndex(a, b *int) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *Database) GetLeader() bool { return a.Leader } func (a *Database) GetModel() DatabaseModel { return a.Model } func (a *Database) GetName() string { return a.Name } func (a *Database) GetSchema() *string { return a.Schema } func copyDatabaseSchema(a *string) *string { if a == nil { return nil } b := *a return &b } func equalDatabaseSchema(a, b *string) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *Database) GetSid() *string { return a.Sid } func copyDatabaseSid(a *string) *string { if a == nil { return nil } b := *a return &b } func equalDatabaseSid(a, b *string) bool { if (a == nil) != (b == nil) { return false } if a == b { return true } return *a == *b } func (a *Database) DeepCopyInto(b *Database) { *b = *a b.Cid = copyDatabaseCid(a.Cid) b.Index = copyDatabaseIndex(a.Index) b.Schema = copyDatabaseSchema(a.Schema) b.Sid = copyDatabaseSid(a.Sid) } func (a *Database) DeepCopy() *Database { b := new(Database) a.DeepCopyInto(b) return b } func (a *Database) CloneModelInto(b model.Model) { c := b.(*Database) a.DeepCopyInto(c) } func (a *Database) CloneModel() model.Model { return a.DeepCopy() } func (a *Database) Equals(b *Database) bool { return a.UUID == b.UUID && equalDatabaseCid(a.Cid, b.Cid) && a.Connected == b.Connected && equalDatabaseIndex(a.Index, b.Index) && a.Leader == b.Leader && a.Model == b.Model && a.Name == b.Name && equalDatabaseSchema(a.Schema, b.Schema) && equalDatabaseSid(a.Sid, b.Sid) } func (a *Database) EqualsModel(b model.Model) bool { c := b.(*Database) return a.Equals(c) } var _ model.CloneableModel = &Database{} var _ model.ComparableModel = &Database{} golang-github-ovn-org-libovsdb-0.7.0/ovsdb/serverdb/gen.go000066400000000000000000000004001464501522100234570ustar00rootroot00000000000000package serverdb // server_model is a database model for the special _Server database that all // ovsdb instances export. It reports back status of the server process itself. //go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema golang-github-ovn-org-libovsdb-0.7.0/ovsdb/serverdb/model.go000066400000000000000000000035471464501522100240250ustar00rootroot00000000000000// Code generated by "libovsdb.modelgen" // DO NOT EDIT. package serverdb import ( "encoding/json" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb func FullDatabaseModel() (model.ClientDBModel, error) { return model.NewClientDBModel("_Server", map[string]model.Model{ "Database": &Database{}, }) } var schema = `{ "name": "_Server", "version": "1.2.0", "tables": { "Database": { "columns": { "cid": { "type": { "key": { "type": "uuid" }, "min": 0, "max": 1 } }, "connected": { "type": "boolean" }, "index": { "type": { "key": { "type": "integer" }, "min": 0, "max": 1 } }, "leader": { "type": "boolean" }, "model": { "type": { "key": { "type": "string", "enum": [ "set", [ "standalone", "clustered", "relay" ] ] } } }, "name": { "type": "string" }, "schema": { "type": { "key": { "type": "string" }, "min": 0, "max": 1 } }, "sid": { "type": { "key": { "type": "uuid" }, "min": 0, "max": 1 } } }, "isRoot": true } } }` func Schema() ovsdb.DatabaseSchema { var s ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &s) if err != nil { panic(err) } return s } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/set.go000066400000000000000000000061741464501522100217030ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "reflect" ) // OvsSet is an OVSDB style set // RFC 7047 has a weird (but understandable) notation for set as described as : // Either an , representing a set with exactly one element, or // a 2-element JSON array that represents a database set value. The // first element of the array must be the string "set", and the // second element must be an array of zero or more s giving the // values in the set. All of the s must have the same type. type OvsSet struct { GoSet []interface{} } // NewOvsSet creates a new OVSDB style set from a Go interface (object) func NewOvsSet(obj interface{}) (OvsSet, error) { ovsSet := make([]interface{}, 0) var v reflect.Value if reflect.TypeOf(obj).Kind() == reflect.Ptr { v = reflect.ValueOf(obj).Elem() if v.Kind() == reflect.Invalid { // must be a nil pointer, so just return an empty set return OvsSet{ovsSet}, nil } } else { v = reflect.ValueOf(obj) } switch v.Kind() { case reflect.Slice: for i := 0; i < v.Len(); i++ { ovsSet = append(ovsSet, v.Index(i).Interface()) } case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Bool: ovsSet = append(ovsSet, v.Interface()) case reflect.Struct: if v.Type() == reflect.TypeOf(UUID{}) { ovsSet = append(ovsSet, v.Interface()) } else { return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") } default: return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") } return OvsSet{ovsSet}, nil } // MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array func (o OvsSet) MarshalJSON() ([]byte, error) { switch l := len(o.GoSet); { case l == 1: return json.Marshal(o.GoSet[0]) case l > 0: var oSet []interface{} oSet = append(oSet, "set") oSet = append(oSet, o.GoSet) return json.Marshal(oSet) } return []byte("[\"set\",[]]"), nil } // UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set func (o *OvsSet) UnmarshalJSON(b []byte) (err error) { o.GoSet = make([]interface{}, 0) addToSet := func(o *OvsSet, v interface{}) error { goVal, err := ovsSliceToGoNotation(v) if err == nil { o.GoSet = append(o.GoSet, goVal) } return err } var inter interface{} if err = json.Unmarshal(b, &inter); err != nil { return err } switch inter.(type) { case []interface{}: var oSet []interface{} oSet = inter.([]interface{}) // it's a single uuid object if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") { return addToSet(o, UUID{GoUUID: oSet[1].(string)}) } if oSet[0] != "set" { // it is a slice, but is not a set return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)} } innerSet := oSet[1].([]interface{}) for _, val := range innerSet { err := addToSet(o, val) if err != nil { return err } } return err default: // it is a single object return addToSet(o, inter) } } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/set_test.go000066400000000000000000000127001464501522100227320ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "reflect" "strings" "testing" ) var testUUIDs = []string{ "38d9fa08-8e97-4402-9347-a610773b91cb", "aab50e87-1410-4c44-8c43-58aed178c833", "445d365f-1e5b-44ee-86e7-41605858df83", "a132ac6f-8b95-483b-8595-5453703e0617", "5e617059-c157-47ff-a4ea-2bc3f163b198", "faceebeb-4b52-4721-a879-c9f70e3f58a6", "1ff23dbb-41d1-423f-acbc-94b06c508926", "7e191fdb-228d-4bf3-9db4-883c8705ac7e", } func benchmarkSetMarshalJSON(s interface{}, b *testing.B) { testSet, err := NewOvsSet(s) if err != nil { b.Fatal(err) } for n := 0; n < b.N; n++ { _, err := json.Marshal(testSet) if err != nil { b.Fatal(err) } } } func BenchmarkSetMarshalJSONString1(b *testing.B) { benchmarkSetMarshalJSON("foo", b) } func BenchmarkSetMarshalJSONString2(b *testing.B) { benchmarkSetMarshalJSON([]string{"foo", "bar"}, b) } func BenchmarkSetMarshalJSONString3(b *testing.B) { benchmarkSetMarshalJSON([]string{"foo", "bar", "baz"}, b) } func BenchmarkSetMarshalJSONString5(b *testing.B) { benchmarkSetMarshalJSON([]string{"foo", "bar", "baz", "quux", "foofoo"}, b) } func BenchmarkSetMarshalJSONString8(b *testing.B) { benchmarkSetMarshalJSON([]string{"foo", "bar", "baz", "quux", "foofoo", "foobar", "foobaz", "fooquux"}, b) } func BenchmarkSetMarshalJSONInt1(b *testing.B) { benchmarkSetMarshalJSON(1, b) } func BenchmarkSetMarshalJSONInt2(b *testing.B) { benchmarkSetMarshalJSON([]int{1, 2}, b) } func BenchmarkSetMarshalJSONInt3(b *testing.B) { benchmarkSetMarshalJSON([]int{1, 2, 3}, b) } func BenchmarkSetMarshalJSONInt5(b *testing.B) { benchmarkSetMarshalJSON([]int{1, 2, 3, 4, 5}, b) } func BenchmarkSetMarshalJSONInt8(b *testing.B) { benchmarkSetMarshalJSON([]int{1, 2, 3, 4, 5, 6, 7, 8}, b) } func BenchmarkSetMarshalJSONFloat1(b *testing.B) { benchmarkSetMarshalJSON(1.0, b) } func BenchmarkSetMarshalJSONFloat2(b *testing.B) { benchmarkSetMarshalJSON([]int{1.0, 2.0}, b) } func BenchmarkSetMarshalJSONFloat3(b *testing.B) { benchmarkSetMarshalJSON([]int{1.0, 2.0, 3.0}, b) } func BenchmarkSetMarshalJSONFloat5(b *testing.B) { benchmarkSetMarshalJSON([]int{1.0, 2.0, 3.0, 4.0, 5.0}, b) } func BenchmarkSetMarshalJSONFloat8(b *testing.B) { benchmarkSetMarshalJSON([]int{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}, b) } func BenchmarkSetMarshalJSONUUID1(b *testing.B) { benchmarkSetMarshalJSON(testUUIDs[0], b) } func BenchmarkSetMarshalJSONUUID2(b *testing.B) { benchmarkSetMarshalJSON(testUUIDs[0:2], b) } func BenchmarkSetMarshalJSONUUID3(b *testing.B) { benchmarkSetMarshalJSON(testUUIDs[0:3], b) } func BenchmarkSetMarshalJSONUUID5(b *testing.B) { benchmarkSetMarshalJSON(testUUIDs[0:5], b) } func BenchmarkSetMarshalJSONUUID8(b *testing.B) { benchmarkSetMarshalJSON(testUUIDs, b) } func benchmarkSetUnmarshalJSON(data []byte, b *testing.B) { for n := 0; n < b.N; n++ { var s OvsSet err := json.Unmarshal(data, &s) if err != nil { b.Fatal(err) } } } func BenchmarkSetUnmarshalJSONString1(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`"foo"`), b) } func BenchmarkSetUnmarshalJSONString2(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`[ "set", ["foo","bar"] ]`), b) } func BenchmarkSetUnmarshalJSONString3(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`[ "set", ["foo","bar","baz"] ]`), b) } func BenchmarkSetUnmarshalJSONString5(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`[ "set", ["foo","bar","baz","quuz","foofoo"] ]`), b) } func BenchmarkSetUnmarshalJSONString8(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`[ "set", ["foo","bar","baz","quuz","foofoo","foobar","foobaz","fooquuz"] ]`), b) } func BenchmarkSetUnmarshalJSONInt1(b *testing.B) { benchmarkSetUnmarshalJSON([]byte("1"), b) } func BenchmarkSetUnmarshalJSONInt2(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1, 2]]`), b) } func BenchmarkSetUnmarshalJSONInt3(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1, 2, 3]]`), b) } func BenchmarkSetUnmarshalJSONInt5(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1, 2, 3, 4, 5]]`), b) } func BenchmarkSetUnmarshalJSONInt8(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1, 2, 3, 4, 5, 6, 7, 8]]`), b) } func BenchmarkSetUnmarshalJSONFloat1(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`1.0`), b) } func BenchmarkSetUnmarshalJSONFloat2(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1.0, 2.0]]`), b) } func BenchmarkSetUnmarshalJSONFloat3(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1.0, 2.0, 3.0]]`), b) } func BenchmarkSetUnmarshalJSONFloat5(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1.0, 2.0, 3.0, 4.0, 5.0]]`), b) } func BenchmarkSetUnmarshalJSONFloat8(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`["set", [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]]`), b) } func BenchmarkSetUnmarshalJSONUUID1(b *testing.B) { benchmarkSetUnmarshalJSON([]byte(`"`+testUUIDs[0]+`"`), b) } func BenchmarkSetUnmarshalJSONUUID2(b *testing.B) { benchmarkSetUnmarshalJSON(setify(testUUIDs[0:2]), b) } func BenchmarkSetUnmarshalJSONUUID3(b *testing.B) { benchmarkSetUnmarshalJSON(setify(testUUIDs[0:3]), b) } func BenchmarkSetUnmarshalJSONUUID5(b *testing.B) { benchmarkSetUnmarshalJSON(setify(testUUIDs[0:5]), b) } func BenchmarkSetUnmarshalJSONUUID8(b *testing.B) { benchmarkSetUnmarshalJSON(setify(testUUIDs), b) } func setify(i interface{}) []byte { var s []string iv := reflect.ValueOf(i) for j := 0; j < iv.Len(); j++ { s = append(s, fmt.Sprintf("%v", iv.Index(j))) } return []byte(fmt.Sprintf(`[ "set", [ "%s" ]]`, strings.Join(s, `","`))) } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/update3.go000066400000000000000000000017161464501522100224520ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" ) type MonitorCondSinceReply struct { Found bool LastTransactionID string Updates TableUpdates2 } func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) { v := []interface{}{m.Found, m.LastTransactionID, m.Updates} return json.Marshal(v) } func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error { var v []json.RawMessage err := json.Unmarshal(b, &v) if err != nil { return err } if len(v) != 3 { return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) } var found bool err = json.Unmarshal(v[0], &found) if err != nil { return err } var lastTransactionID string err = json.Unmarshal(v[1], &lastTransactionID) if err != nil { return err } var updates TableUpdates2 err = json.Unmarshal(v[2], &updates) if err != nil { return err } m.Found = found m.LastTransactionID = lastTransactionID m.Updates = updates return nil } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/updates.go000066400000000000000000000016251464501522100225510ustar00rootroot00000000000000package ovsdb // TableUpdates is an object that maps from a table name to a // TableUpdate type TableUpdates map[string]TableUpdate // TableUpdate is an object that maps from the row's UUID to a // RowUpdate type TableUpdate map[string]*RowUpdate // RowUpdate represents a row update according to RFC7047 type RowUpdate struct { New *Row `json:"new,omitempty"` Old *Row `json:"old,omitempty"` } // Insert returns true if this is an update for an insert operation func (r RowUpdate) Insert() bool { return r.New != nil && r.Old == nil } // Modify returns true if this is an update for a modify operation func (r RowUpdate) Modify() bool { return r.New != nil && r.Old != nil } // Delete returns true if this is an update for a delete operation func (r RowUpdate) Delete() bool { return r.New == nil && r.Old != nil } func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) { r.Old = ru2.Old r.New = ru2.New } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/updates2.go000066400000000000000000000010621464501522100226260ustar00rootroot00000000000000package ovsdb // TableUpdates2 is an object that maps from a table name to a // TableUpdate2 type TableUpdates2 map[string]TableUpdate2 // TableUpdate2 is an object that maps from the row's UUID to a // RowUpdate2 type TableUpdate2 map[string]*RowUpdate2 // RowUpdate2 represents a row update according to ovsdb-server.7 type RowUpdate2 struct { Initial *Row `json:"initial,omitempty"` Insert *Row `json:"insert,omitempty"` Modify *Row `json:"modify,omitempty"` Delete *Row `json:"delete,omitempty"` Old *Row `json:"-"` New *Row `json:"-"` } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/updates_test.go000066400000000000000000000015171464501522100236100ustar00rootroot00000000000000package ovsdb import ( "testing" "github.com/stretchr/testify/assert" ) func TestRowUpdateInsert(t *testing.T) { u1 := RowUpdate{Old: nil, New: &Row{}} u2 := RowUpdate{Old: &Row{}, New: &Row{}} u3 := RowUpdate{Old: &Row{}, New: nil} assert.True(t, u1.Insert()) assert.False(t, u2.Insert()) assert.False(t, u3.Insert()) } func TestRowUpdateModify(t *testing.T) { u1 := RowUpdate{Old: nil, New: &Row{}} u2 := RowUpdate{Old: &Row{}, New: &Row{}} u3 := RowUpdate{Old: &Row{}, New: nil} assert.False(t, u1.Modify()) assert.True(t, u2.Modify()) assert.False(t, u3.Modify()) } func TestRowUpdateDelete(t *testing.T) { u1 := RowUpdate{Old: nil, New: &Row{}} u2 := RowUpdate{Old: &Row{}, New: &Row{}} u3 := RowUpdate{Old: &Row{}, New: nil} assert.False(t, u1.Delete()) assert.False(t, u2.Delete()) assert.True(t, u3.Delete()) } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/uuid.go000066400000000000000000000023561464501522100220540ustar00rootroot00000000000000package ovsdb import ( "encoding/json" "fmt" "regexp" ) var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) // UUID is a UUID according to RFC7047 type UUID struct { GoUUID string `json:"uuid"` } // MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array func (u UUID) MarshalJSON() ([]byte, error) { var uuidSlice []string err := ValidateUUID(u.GoUUID) if err == nil { uuidSlice = []string{"uuid", u.GoUUID} } else { uuidSlice = []string{"named-uuid", u.GoUUID} } return json.Marshal(uuidSlice) } // UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID func (u *UUID) UnmarshalJSON(b []byte) (err error) { var ovsUUID []string if err := json.Unmarshal(b, &ovsUUID); err == nil { u.GoUUID = ovsUUID[1] } return err } func ValidateUUID(uuid string) error { if len(uuid) != 36 { return fmt.Errorf("uuid exceeds 36 characters") } if !validUUID.MatchString(uuid) { return fmt.Errorf("uuid does not match regexp") } return nil } func IsNamedUUID(uuid string) bool { return len(uuid) > 0 && !validUUID.MatchString(uuid) } func IsValidUUID(uuid string) bool { if err := ValidateUUID(uuid); err != nil { return false } return true } golang-github-ovn-org-libovsdb-0.7.0/ovsdb/uuid_test.go000066400000000000000000000007001464501522100231020ustar00rootroot00000000000000package ovsdb import "testing" func TestUUIDIsNamed(t *testing.T) { tests := []struct { name string uuid string want bool }{ { "named", "foo", true, }, { "named", aUUID0, false, }, { "empty", "", false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := IsNamedUUID(tt.uuid); got != tt.want { t.Errorf("UUID.Named() = %v, want %v", got, tt.want) } }) } } golang-github-ovn-org-libovsdb-0.7.0/server/000077500000000000000000000000001464501522100207425ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/server/doc.go000066400000000000000000000004201464501522100220320ustar00rootroot00000000000000/* Package server provides an alpha-quality implementation of an OVSDB Server It is designed only to be used for testing the functionality of the client library such that assertions can be made on the cache that backs the client's monitor or the server */ package server golang-github-ovn-org-libovsdb-0.7.0/server/monitor.go000066400000000000000000000133051464501522100227620ustar00rootroot00000000000000package server import ( "encoding/json" "log" "sync" "github.com/cenkalti/rpc2" "github.com/google/uuid" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/ovsdb" ) // connectionMonitors maps a connection to a map or monitors type connectionMonitors struct { monitors map[string]*monitor mu sync.RWMutex } func newConnectionMonitors() *connectionMonitors { return &connectionMonitors{ monitors: make(map[string]*monitor), mu: sync.RWMutex{}, } } // monitor represents a connection to a client where db changes // will be reflected type monitor struct { id string kind monitorKind request map[string]*ovsdb.MonitorRequest client *rpc2.Client } type monitorKind int const ( monitorKindOriginal monitorKind = iota monitorKindConditional monitorKindConditionalSince ) func newMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor { m := &monitor{ id: id, kind: monitorKindOriginal, request: request, client: client, } return m } func newConditionalMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor { m := &monitor{ id: id, kind: monitorKindConditional, request: request, client: client, } return m } func newConditionalSinceMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor { m := &monitor{ id: id, kind: monitorKindConditional, request: request, client: client, } return m } // Send will send an update if it matches the tables and monitor select arguments // we take the update by value (not reference) so we can mutate it in place before // queuing it for dispatch func (m *monitor) Send(update database.Update) { // remove updates for tables that we aren't watching tu := m.filter(update) if len(tu) == 0 { return } args := []interface{}{json.RawMessage([]byte(m.id)), tu} var reply interface{} err := m.client.Call("update2", args, &reply) if err != nil { log.Printf("client error handling update rpc: %v", err) } } // Send2 will send an update if it matches the tables and monitor select arguments // we take the update by value (not reference) so we can mutate it in place before // queuing it for dispatch func (m *monitor) Send2(update database.Update) { // remove updates for tables that we aren't watching tu := m.filter2(update) if len(tu) == 0 { return } args := []interface{}{json.RawMessage([]byte(m.id)), tu} var reply interface{} err := m.client.Call("update2", args, &reply) if err != nil { log.Printf("client error handling update2 rpc: %v", err) } } // Send3 will send an update if it matches the tables and monitor select arguments // we take the update by value (not reference) so we can mutate it in place before // queuing it for dispatch func (m *monitor) Send3(id uuid.UUID, update database.Update) { // remove updates for tables that we aren't watching tu := m.filter2(update) if len(tu) == 0 { return } args := []interface{}{json.RawMessage([]byte(m.id)), id.String(), tu} var reply interface{} err := m.client.Call("update2", args, &reply) if err != nil { log.Printf("client error handling update3 rpc: %v", err) } } func filterColumns(row *ovsdb.Row, columns map[string]bool) *ovsdb.Row { if row == nil { return nil } new := make(ovsdb.Row, len(*row)) for k, v := range *row { if _, ok := columns[k]; ok { new[k] = v } } return &new } func (m *monitor) filter(update database.Update) ovsdb.TableUpdates { // remove updates for tables that we aren't watching tables := update.GetUpdatedTables() tus := make(ovsdb.TableUpdates, len(tables)) for _, table := range tables { if _, ok := m.request[table]; len(m.request) > 0 && !ok { // only remove updates for tables that were not requested if other // tables were requested, otherwise all tables are watched. continue } tu := ovsdb.TableUpdate{} cols := make(map[string]bool) cols["_uuid"] = true for _, c := range m.request[table].Columns { cols[c] = true } _ = update.ForEachRowUpdate(table, func(uuid string, ru2 ovsdb.RowUpdate2) error { ru := &ovsdb.RowUpdate{} ru.FromRowUpdate2(ru2) switch { case ru.Insert() && m.request[table].Select.Insert(): fallthrough case ru.Modify() && m.request[table].Select.Modify(): fallthrough case ru.Delete() && m.request[table].Select.Delete(): if len(cols) == 0 { return nil } ru.New = filterColumns(ru.New, cols) ru.Old = filterColumns(ru.Old, cols) tu[uuid] = ru } return nil }) tus[table] = tu } return tus } func (m *monitor) filter2(update database.Update) ovsdb.TableUpdates2 { // remove updates for tables that we aren't watching tables := update.GetUpdatedTables() tus2 := make(ovsdb.TableUpdates2, len(tables)) for _, table := range tables { if _, ok := m.request[table]; len(m.request) > 0 && !ok { // only remove updates for tables that were not requested if other // tables were requested, otherwise all tables are watched. continue } tu2 := ovsdb.TableUpdate2{} cols := make(map[string]bool) cols["_uuid"] = true for _, c := range m.request[table].Columns { cols[c] = true } _ = update.ForEachRowUpdate(table, func(uuid string, ru2 ovsdb.RowUpdate2) error { switch { case ru2.Insert != nil && m.request[table].Select.Insert(): fallthrough case ru2.Modify != nil && m.request[table].Select.Modify(): fallthrough case ru2.Delete != nil && m.request[table].Select.Delete(): if len(cols) == 0 { return nil } ru2.Insert = filterColumns(ru2.Insert, cols) ru2.Modify = filterColumns(ru2.Modify, cols) ru2.Delete = filterColumns(ru2.Delete, cols) tu2[uuid] = &ru2 } return nil }) tus2[table] = tu2 } return tus2 } golang-github-ovn-org-libovsdb-0.7.0/server/monitor_test.go000066400000000000000000000073601464501522100240250ustar00rootroot00000000000000package server import ( "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/test" "github.com/ovn-org/libovsdb/updates" "github.com/stretchr/testify/assert" ) func TestMonitorFilter(t *testing.T) { monitor := monitor{ request: map[string]*ovsdb.MonitorRequest{ "Bridge": { Columns: []string{"name"}, Select: ovsdb.NewDefaultMonitorSelect(), }, }, } bridgeRow := ovsdb.Row{ "_uuid": "foo", "name": "bar", } bridgeExternalIds, _ := ovsdb.NewOvsMap(map[string]string{"foo": "bar"}) bridgeRowWithIDs := ovsdb.Row{ "_uuid": "foo", "name": "bar", "external_ids": bridgeExternalIds, } tests := []struct { name string update ovsdb.TableUpdates2 expected ovsdb.TableUpdates2 }{ { "not filtered", ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, }, { "removed table", ovsdb.TableUpdates2{ "Open_vSwitch": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, ovsdb.TableUpdates2{}, }, { "removed column", ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRowWithIDs, }, }, }, ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dbModel, err := test.GetModel() assert.NoError(t, err) update := updates.ModelUpdates{} for table, rows := range tt.update { for uuid, row := range rows { err := update.AddRowUpdate2(dbModel, table, uuid, nil, *row) assert.NoError(t, err) } } tu := monitor.filter2(updates.NewDatabaseUpdate(update, nil)) assert.Equal(t, tt.expected, tu) }) } } func TestMonitorFilter2(t *testing.T) { monitor := monitor{ request: map[string]*ovsdb.MonitorRequest{ "Bridge": { Columns: []string{"name"}, Select: ovsdb.NewDefaultMonitorSelect(), }, }, } bridgeRow := ovsdb.Row{ "name": "bar", } bridgeExternalIds, _ := ovsdb.NewOvsMap(map[string]string{"foo": "bar"}) bridgeRowWithIDs := ovsdb.Row{ "name": "bar", "external_ids": bridgeExternalIds, } tests := []struct { name string update ovsdb.TableUpdates2 expected ovsdb.TableUpdates2 }{ { "not filtered", ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, }, { "removed table", ovsdb.TableUpdates2{ "Open_vSwitch": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, ovsdb.TableUpdates2{}, }, { "removed column", ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRowWithIDs, }, }, }, ovsdb.TableUpdates2{ "Bridge": ovsdb.TableUpdate2{ "foo": &ovsdb.RowUpdate2{ Insert: &bridgeRow, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dbModel, err := test.GetModel() assert.NoError(t, err) update := updates.ModelUpdates{} for table, rows := range tt.update { for uuid, row := range rows { err := update.AddRowUpdate2(dbModel, table, uuid, nil, *row) assert.NoError(t, err) } } tu := monitor.filter2(updates.NewDatabaseUpdate(update, nil)) assert.Equal(t, tt.expected, tu) }) } } golang-github-ovn-org-libovsdb-0.7.0/server/server.go000066400000000000000000000277041464501522100226110ustar00rootroot00000000000000package server import ( "encoding/json" "errors" "fmt" "log" "net" "os" "sync" "github.com/cenkalti/rpc2" "github.com/cenkalti/rpc2/jsonrpc" "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // OvsdbServer is an ovsdb server type OvsdbServer struct { srv *rpc2.Server listener net.Listener done chan struct{} db database.Database ready bool doEcho bool readyMutex sync.RWMutex models map[string]model.DatabaseModel modelsMutex sync.RWMutex monitors map[*rpc2.Client]*connectionMonitors monitorMutex sync.RWMutex logger logr.Logger txnMutex sync.Mutex } func init() { stdr.SetVerbosity(5) } // NewOvsdbServer returns a new OvsdbServer func NewOvsdbServer(db database.Database, models ...model.DatabaseModel) (*OvsdbServer, error) { l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("server") o := &OvsdbServer{ done: make(chan struct{}, 1), doEcho: true, db: db, models: make(map[string]model.DatabaseModel), modelsMutex: sync.RWMutex{}, monitors: make(map[*rpc2.Client]*connectionMonitors), monitorMutex: sync.RWMutex{}, logger: l, } o.modelsMutex.Lock() for _, model := range models { o.models[model.Schema.Name] = model } o.modelsMutex.Unlock() for database, model := range o.models { if err := o.db.CreateDatabase(database, model.Schema); err != nil { return nil, err } } o.srv = rpc2.NewServer() o.srv.Handle("list_dbs", o.ListDatabases) o.srv.Handle("get_schema", o.GetSchema) o.srv.Handle("transact", o.Transact) o.srv.Handle("cancel", o.Cancel) o.srv.Handle("monitor", o.Monitor) o.srv.Handle("monitor_cond", o.MonitorCond) o.srv.Handle("monitor_cond_since", o.MonitorCondSince) o.srv.Handle("monitor_cancel", o.MonitorCancel) o.srv.Handle("steal", o.Steal) o.srv.Handle("unlock", o.Unlock) o.srv.Handle("echo", o.Echo) return o, nil } // OnConnect registers a function to run when a client connects. func (o *OvsdbServer) OnConnect(f func(*rpc2.Client)) { o.srv.OnConnect(f) } // OnDisConnect registers a function to run when a client disconnects. func (o *OvsdbServer) OnDisConnect(f func(*rpc2.Client)) { o.srv.OnDisconnect(f) } func (o *OvsdbServer) DoEcho(ok bool) { o.readyMutex.Lock() o.doEcho = ok o.readyMutex.Unlock() } // Serve starts the OVSDB server on the given path and protocol func (o *OvsdbServer) Serve(protocol string, path string) error { var err error o.listener, err = net.Listen(protocol, path) if err != nil { return err } o.readyMutex.Lock() o.ready = true o.readyMutex.Unlock() for { conn, err := o.listener.Accept() if err != nil { if !o.Ready() { return nil } return err } // TODO: Need to cleanup when connection is closed go o.srv.ServeCodec(jsonrpc.NewJSONCodec(conn)) } } func isClosed(ch <-chan struct{}) bool { select { case <-ch: return true default: } return false } // Close closes the OvsdbServer func (o *OvsdbServer) Close() { o.readyMutex.Lock() o.ready = false o.readyMutex.Unlock() // Only close the listener if Serve() has been called if o.listener != nil { if err := o.listener.Close(); err != nil { o.logger.Error(err, "failed to close listener") } } if !isClosed(o.done) { close(o.done) } } // Ready returns true if a server is ready to handle connections func (o *OvsdbServer) Ready() bool { o.readyMutex.RLock() defer o.readyMutex.RUnlock() return o.ready } // ListDatabases lists the databases in the current system func (o *OvsdbServer) ListDatabases(client *rpc2.Client, args []interface{}, reply *[]string) error { dbs := []string{} o.modelsMutex.RLock() for _, db := range o.models { dbs = append(dbs, db.Schema.Name) } o.modelsMutex.RUnlock() *reply = dbs return nil } func (o *OvsdbServer) GetSchema(client *rpc2.Client, args []interface{}, reply *ovsdb.DatabaseSchema, ) error { db, ok := args[0].(string) if !ok { return fmt.Errorf("database %v is not a string", args[0]) } o.modelsMutex.RLock() model, ok := o.models[db] if !ok { return fmt.Errorf("database %s does not exist", db) } o.modelsMutex.RUnlock() *reply = model.Schema return nil } // Transact issues a new database transaction and returns the results func (o *OvsdbServer) Transact(client *rpc2.Client, args []json.RawMessage, reply *[]*ovsdb.OperationResult) error { // While allowing other rpc handlers to run in parallel, this ovsdb server expects transactions // to be serialized. The following mutex ensures that. // Ref: https://github.com/cenkalti/rpc2/blob/c1acbc6ec984b7ae6830b6a36b62f008d5aefc4c/client.go#L187 o.txnMutex.Lock() defer o.txnMutex.Unlock() if len(args) < 2 { return fmt.Errorf("not enough args") } var db string err := json.Unmarshal(args[0], &db) if err != nil { return fmt.Errorf("database %v is not a string", args[0]) } var ops []ovsdb.Operation for i := 1; i < len(args); i++ { var op ovsdb.Operation err = json.Unmarshal(args[i], &op) if err != nil { return err } ops = append(ops, op) } response, updates := o.transact(db, ops) *reply = response for _, operResult := range response { if operResult.Error != "" { o.logger.Error(errors.New("failed to process operation"), "Skipping transaction DB commit due to error", "operations", ops, "results", response, "operation error", operResult.Error) return nil } } transactionID := uuid.New() o.processMonitors(transactionID, updates) return o.db.Commit(db, transactionID, updates) } func (o *OvsdbServer) transact(name string, operations []ovsdb.Operation) ([]*ovsdb.OperationResult, database.Update) { transaction := o.db.NewTransaction(name) return transaction.Transact(operations...) } // Cancel cancels the last transaction func (o *OvsdbServer) Cancel(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { return fmt.Errorf("not implemented") } // Monitor monitors a given database table and provides updates to the client via an RPC callback func (o *OvsdbServer) Monitor(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.TableUpdates) error { var db string if err := json.Unmarshal(args[0], &db); err != nil { return fmt.Errorf("database %v is not a string", args[0]) } if !o.db.Exists(db) { return fmt.Errorf("db does not exist") } value := string(args[1]) var request map[string]*ovsdb.MonitorRequest if err := json.Unmarshal(args[2], &request); err != nil { return err } o.monitorMutex.Lock() defer o.monitorMutex.Unlock() clientMonitors, ok := o.monitors[client] if !ok { o.monitors[client] = newConnectionMonitors() } else { if _, ok := clientMonitors.monitors[value]; ok { return fmt.Errorf("monitor with that value already exists") } } transaction := o.db.NewTransaction(db) tableUpdates := make(ovsdb.TableUpdates) for t, request := range request { op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns} result, _ := transaction.Transact(op) if len(result) == 0 || len(result[0].Rows) == 0 { continue } rows := result[0].Rows tableUpdates[t] = make(ovsdb.TableUpdate, len(rows)) for i := range rows { uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID tableUpdates[t][uuid] = &ovsdb.RowUpdate{New: &rows[i]} } } *reply = tableUpdates o.monitors[client].monitors[value] = newMonitor(value, request, client) return nil } // MonitorCond monitors a given database table and provides updates to the client via an RPC callback func (o *OvsdbServer) MonitorCond(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.TableUpdates2) error { var db string if err := json.Unmarshal(args[0], &db); err != nil { return fmt.Errorf("database %v is not a string", args[0]) } if !o.db.Exists(db) { return fmt.Errorf("db does not exist") } value := string(args[1]) var request map[string]*ovsdb.MonitorRequest if err := json.Unmarshal(args[2], &request); err != nil { return err } o.monitorMutex.Lock() defer o.monitorMutex.Unlock() clientMonitors, ok := o.monitors[client] if !ok { o.monitors[client] = newConnectionMonitors() } else { if _, ok := clientMonitors.monitors[value]; ok { return fmt.Errorf("monitor with that value already exists") } } transaction := o.db.NewTransaction(db) tableUpdates := make(ovsdb.TableUpdates2) for t, request := range request { op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns} result, _ := transaction.Transact(op) if len(result) == 0 || len(result[0].Rows) == 0 { continue } rows := result[0].Rows tableUpdates[t] = make(ovsdb.TableUpdate2, len(rows)) for i := range rows { uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID tableUpdates[t][uuid] = &ovsdb.RowUpdate2{Initial: &rows[i]} } } *reply = tableUpdates o.monitors[client].monitors[value] = newConditionalMonitor(value, request, client) return nil } // MonitorCondSince monitors a given database table and provides updates to the client via an RPC callback func (o *OvsdbServer) MonitorCondSince(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.MonitorCondSinceReply) error { var db string if err := json.Unmarshal(args[0], &db); err != nil { return fmt.Errorf("database %v is not a string", args[0]) } if !o.db.Exists(db) { return fmt.Errorf("db does not exist") } value := string(args[1]) var request map[string]*ovsdb.MonitorRequest if err := json.Unmarshal(args[2], &request); err != nil { return err } o.monitorMutex.Lock() defer o.monitorMutex.Unlock() clientMonitors, ok := o.monitors[client] if !ok { o.monitors[client] = newConnectionMonitors() } else { if _, ok := clientMonitors.monitors[value]; ok { return fmt.Errorf("monitor with that value already exists") } } transaction := o.db.NewTransaction(db) tableUpdates := make(ovsdb.TableUpdates2) for t, request := range request { op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns} result, _ := transaction.Transact(op) if len(result) == 0 || len(result[0].Rows) == 0 { continue } rows := result[0].Rows tableUpdates[t] = make(ovsdb.TableUpdate2, len(rows)) for i := range rows { uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID tableUpdates[t][uuid] = &ovsdb.RowUpdate2{Initial: &rows[i]} } } *reply = ovsdb.MonitorCondSinceReply{Found: false, LastTransactionID: "00000000-0000-0000-000000000000", Updates: tableUpdates} o.monitors[client].monitors[value] = newConditionalSinceMonitor(value, request, client) return nil } // MonitorCancel cancels a monitor on a given table func (o *OvsdbServer) MonitorCancel(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { return fmt.Errorf("not implemented") } // Lock acquires a lock on a table for a the client func (o *OvsdbServer) Lock(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { return fmt.Errorf("not implemented") } // Steal steals a lock for a client func (o *OvsdbServer) Steal(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { return fmt.Errorf("not implemented") } // Unlock releases a lock for a client func (o *OvsdbServer) Unlock(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { return fmt.Errorf("not implemented") } // Echo tests the liveness of the connection func (o *OvsdbServer) Echo(client *rpc2.Client, args []interface{}, reply *[]interface{}) error { o.readyMutex.Lock() defer o.readyMutex.Unlock() if !o.doEcho { return fmt.Errorf("no echo reply") } echoReply := make([]interface{}, len(args)) copy(echoReply, args) *reply = echoReply return nil } func (o *OvsdbServer) processMonitors(id uuid.UUID, update database.Update) { o.monitorMutex.RLock() for _, c := range o.monitors { for _, m := range c.monitors { switch m.kind { case monitorKindOriginal: m.Send(update) case monitorKindConditional: m.Send2(update) case monitorKindConditionalSince: m.Send3(id, update) } } } o.monitorMutex.RUnlock() } golang-github-ovn-org-libovsdb-0.7.0/server/server_integration_test.go000066400000000000000000000574251464501522100262560ustar00rootroot00000000000000package server import ( "context" "fmt" "math/rand" "os" "reflect" "sync" "testing" "time" "github.com/google/uuid" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/database/inmemory" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" . "github.com/ovn-org/libovsdb/test" ) func buildTestServerAndClient(t *testing.T) (client.Client, func()) { dbModel, err := GetModel() require.NoError(t, err) ovsDB := inmemory.NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) schema := dbModel.Schema defDB := dbModel.Client() rand.Seed(time.Now().UnixNano()) tmpfile := fmt.Sprintf("/tmp/ovsdb-%d.sock", rand.Intn(10000)) defer os.Remove(tmpfile) dbModel, errs := model.NewDatabaseModel(schema, defDB) require.Empty(t, errs) server, err := NewOvsdbServer(ovsDB, dbModel) assert.Nil(t, err) go func(t *testing.T, o *OvsdbServer) { if err := o.Serve("unix", tmpfile); err != nil { t.Error(err) } }(t, server) defer server.Close() require.Eventually(t, func() bool { return server.Ready() }, 1*time.Second, 10*time.Millisecond) ovs, err := client.NewOVSDBClient(defDB, client.WithEndpoint(fmt.Sprintf("unix:%s", tmpfile))) require.NoError(t, err) err = ovs.Connect(context.Background()) require.NoError(t, err) return ovs, func() { ovs.Disconnect() server.Close() } } func TestClientServerEcho(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() err := ovs.Echo(context.Background()) assert.Nil(t, err) } func TestClientServerInsert(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() _, err := ovs.MonitorAll(context.Background()) require.NoError(t, err) wallace := "wallace" bridgeRow := &BridgeType{ Name: "foo", DatapathType: "bar", DatapathID: &wallace, ExternalIds: map[string]string{"go": "awesome", "docker": "made-for-each-other"}, } ops, err := ovs.Create(bridgeRow) require.Nil(t, err) reply, err := ovs.Transact(context.Background(), ops...) assert.Nil(t, err) opErr, err := ovsdb.CheckOperationResults(reply, ops) assert.NoErrorf(t, err, "%+v", opErr) uuid := reply[0].UUID.GoUUID require.Eventually(t, func() bool { br := &BridgeType{UUID: uuid} err := ovs.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) br := &BridgeType{UUID: uuid} err = ovs.Get(context.Background(), br) require.NoError(t, err) assert.Equal(t, bridgeRow.Name, br.Name) assert.Equal(t, bridgeRow.ExternalIds, br.ExternalIds) assert.Equal(t, bridgeRow.DatapathType, br.DatapathType) assert.Equal(t, *bridgeRow.DatapathID, wallace) } func TestClientServerMonitor(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() ovsRow := &OvsType{ UUID: "ovs", } bridgeRow := &BridgeType{ UUID: "foo", Name: "foo", ExternalIds: map[string]string{"go": "awesome", "docker": "made-for-each-other"}, } seenMutex := sync.RWMutex{} seenInsert := false seenMutation := false seenInitialOvs := false ovs.Cache().AddEventHandler(&cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { if table == "Bridge" { br := model.(*BridgeType) assert.Equal(t, bridgeRow.Name, br.Name) assert.Equal(t, bridgeRow.ExternalIds, br.ExternalIds) seenMutex.Lock() seenInsert = true seenMutex.Unlock() } if table == "Open_vSwitch" { seenMutex.Lock() seenInitialOvs = true seenMutex.Unlock() } }, UpdateFunc: func(table string, old, new model.Model) { if table == "Open_vSwitch" { ov := new.(*OvsType) assert.Equal(t, 1, len(ov.Bridges)) seenMutex.Lock() seenMutation = true seenMutex.Unlock() } }, }) var ops []ovsdb.Operation ovsOps, err := ovs.Create(ovsRow) require.Nil(t, err) reply, err := ovs.Transact(context.Background(), ovsOps...) require.Nil(t, err) _, err = ovsdb.CheckOperationResults(reply, ovsOps) require.Nil(t, err) require.NotEmpty(t, reply[0].UUID.GoUUID) ovsRow.UUID = reply[0].UUID.GoUUID _, err = ovs.MonitorAll(context.Background()) require.Nil(t, err) require.Eventually(t, func() bool { seenMutex.RLock() defer seenMutex.RUnlock() return seenInitialOvs }, 1*time.Second, 10*time.Millisecond) bridgeOps, err := ovs.Create(bridgeRow) require.Nil(t, err) ops = append(ops, bridgeOps...) mutateOps, err := ovs.Where(ovsRow).Mutate(ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: ovsdb.MutateOperationInsert, Value: []string{"foo"}, }) require.Nil(t, err) ops = append(ops, mutateOps...) reply, err = ovs.Transact(context.Background(), ops...) require.Nil(t, err) _, err = ovsdb.CheckOperationResults(reply, ops) assert.Nil(t, err) assert.Equal(t, 1, reply[1].Count) assert.Eventually(t, func() bool { seenMutex.RLock() defer seenMutex.RUnlock() return seenInsert }, 1*time.Second, 10*time.Millisecond) assert.Eventually(t, func() bool { seenMutex.RLock() defer seenMutex.RUnlock() return seenMutation }, 1*time.Second, 10*time.Millisecond) } func TestClientServerInsertAndDelete(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() _, err := ovs.MonitorAll(context.Background()) require.NoError(t, err) bridgeRow := &BridgeType{ Name: "foo", ExternalIds: map[string]string{"go": "awesome", "docker": "made-for-each-other"}, } ops, err := ovs.Create(bridgeRow) require.Nil(t, err) reply, err := ovs.Transact(context.Background(), ops...) require.Nil(t, err) _, err = ovsdb.CheckOperationResults(reply, ops) require.Nil(t, err) uuid := reply[0].UUID.GoUUID assert.Eventually(t, func() bool { br := &BridgeType{UUID: uuid} err := ovs.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) bridgeRow.UUID = uuid deleteOp, err := ovs.Where(bridgeRow).Delete() require.Nil(t, err) reply, err = ovs.Transact(context.Background(), deleteOp...) assert.Nil(t, err) _, err = ovsdb.CheckOperationResults(reply, ops) assert.Nil(t, err) assert.Equal(t, 1, reply[0].Count) } func TestClientServerInsertDuplicate(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() bridgeRow := &BridgeType{ Name: "foo", ExternalIds: map[string]string{"go": "awesome", "docker": "made-for-each-other"}, } ops, err := ovs.Create(bridgeRow) require.Nil(t, err) reply, err := ovs.Transact(context.Background(), ops...) require.Nil(t, err) _, err = ovsdb.CheckOperationResults(reply, ops) require.Nil(t, err) // duplicate reply, err = ovs.Transact(context.Background(), ops...) require.Nil(t, err) opErrs, err := ovsdb.CheckOperationResults(reply, ops) require.Nil(t, opErrs) require.Error(t, err) require.IsTypef(t, &ovsdb.ConstraintViolation{}, err, err.Error()) } func TestClientServerInsertAndUpdate(t *testing.T) { ovs, close := buildTestServerAndClient(t) defer close() _, err := ovs.MonitorAll(context.Background()) require.NoError(t, err) bridgeRow := &BridgeType{ Name: "br-update", ExternalIds: map[string]string{"go": "awesome", "docker": "made-for-each-other"}, } ops, err := ovs.Create(bridgeRow) require.NoError(t, err) reply, err := ovs.Transact(context.Background(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(reply, ops) require.NoError(t, err) uuid := reply[0].UUID.GoUUID assert.Eventually(t, func() bool { br := &BridgeType{UUID: uuid} err := ovs.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) // try to modify immutable field bridgeRow.UUID = uuid bridgeRow.Name = "br-update2" _, err = ovs.Where(bridgeRow).Update(bridgeRow, &bridgeRow.Name) require.Error(t, err) bridgeRow.Name = "br-update" // update many fields bridgeRow.UUID = uuid bridgeRow.Name = "br-update" bridgeRow.ExternalIds["baz"] = "foobar" bridgeRow.OtherConfig = map[string]string{"foo": "bar"} ops, err = ovs.Where(bridgeRow).Update(bridgeRow) require.NoError(t, err) reply, err = ovs.Transact(context.Background(), ops...) require.NoError(t, err) opErrs, err := ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(t, err, "%+v", opErrs) require.Eventually(t, func() bool { br := &BridgeType{UUID: uuid} err = ovs.Get(context.Background(), br) if err != nil { return false } return reflect.DeepEqual(br, bridgeRow) }, 2*time.Second, 50*time.Millisecond) newExternalIds := map[string]string{"foo": "bar"} bridgeRow.ExternalIds = newExternalIds ops, err = ovs.Where(bridgeRow).Update(bridgeRow, &bridgeRow.ExternalIds) require.NoError(t, err) reply, err = ovs.Transact(context.Background(), ops...) require.NoError(t, err) opErr, err := ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(t, err, "%+v", opErr) assert.Eventually(t, func() bool { br := &BridgeType{UUID: uuid} err = ovs.Get(context.Background(), br) if err != nil { return false } return reflect.DeepEqual(br.ExternalIds, bridgeRow.ExternalIds) }, 2*time.Second, 500*time.Millisecond) br := &BridgeType{UUID: uuid} err = ovs.Get(context.Background(), br) assert.NoError(t, err) assert.Equal(t, bridgeRow, br) } func TestUnsetOptional(t *testing.T) { c, close := buildTestServerAndClient(t) defer close() _, err := c.MonitorAll(context.Background()) require.NoError(t, err) // Create the default bridge which has an optional DatapathID set optional := "optional" br := BridgeType{ Name: "br-with-optional", DatapathID: &optional, } ops, err := c.Create(&br) require.NoError(t, err) r, err := c.Transact(context.Background(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(t, err) // verify the bridge has DatapathID set err = c.Get(context.Background(), &br) require.NoError(t, err) require.NotNil(t, br.DatapathID) // modify bridge to unset DatapathID br.DatapathID = nil ops, err = c.Where(&br).Update(&br, &br.DatapathID) require.NoError(t, err) r, err = c.Transact(context.Background(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(t, err) // verify the bridge has DatapathID unset err = c.Get(context.Background(), &br) require.NoError(t, err) require.Nil(t, br.DatapathID) } func TestUpdateOptional(t *testing.T) { c, close := buildTestServerAndClient(t) defer close() _, err := c.MonitorAll(context.Background()) require.NoError(t, err) // Create the default bridge which has an optional DatapathID set old := "old" br := BridgeType{ Name: "br-with-optional", DatapathID: &old, } ops, err := c.Create(&br) require.NoError(t, err) r, err := c.Transact(context.Background(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(t, err) // verify the bridge has DatapathID set err = c.Get(context.Background(), &br) require.NoError(t, err) require.NotNil(t, br.DatapathID) // modify bridge to update DatapathID new := "new" br.DatapathID = &new ops, err = c.Where(&br).Update(&br, &br.DatapathID) require.NoError(t, err) r, err = c.Transact(context.Background(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(t, err) // verify the bridge has DatapathID updated err = c.Get(context.Background(), &br) require.NoError(t, err) require.Equal(t, &new, br.DatapathID) } func TestMultipleOpsSameRow(t *testing.T) { c, close := buildTestServerAndClient(t) defer close() _, err := c.MonitorAll(context.Background()) require.NoError(t, err) var ops []ovsdb.Operation var op []ovsdb.Operation // Insert a bridge bridgeInsertOp := len(ops) bridgeUUID := "bridge_multiple_ops_same_row" datapathID := "datapathID" br := BridgeType{ UUID: bridgeUUID, Name: bridgeUUID, DatapathID: &datapathID, Ports: []string{"port10", "port1"}, ExternalIds: map[string]string{"key1": "value1"}, } op, err = c.Create(&br) require.NoError(t, err) ops = append(ops, op...) results, err := c.Transact(context.TODO(), ops...) require.NoError(t, err) _, err = ovsdb.CheckOperationResults(results, ops) require.NoError(t, err) // find out the real bridge UUID bridgeUUID = results[bridgeInsertOp].UUID.GoUUID ops = []ovsdb.Operation{} // Do several ops with the bridge in the same transaction br.Ports = []string{"port10"} br.ExternalIds = map[string]string{"key1": "value1", "key10": "value10"} op, err = c.Where(&br).Update(&br, &br.Ports, &br.ExternalIds) require.NoError(t, err) ops = append(ops, op...) op, err = c.Where(&br).Mutate(&br, model.Mutation{ Field: &br.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"keyA": "valueA"}, }, model.Mutation{ Field: &br.Ports, Mutator: ovsdb.MutateOperationInsert, Value: []string{"port1"}, }, ) require.NoError(t, err) ops = append(ops, op...) op, err = c.Where(&br).Mutate(&br, model.Mutation{ Field: &br.ExternalIds, Mutator: ovsdb.MutateOperationDelete, Value: map[string]string{"key10": "value10"}, }, model.Mutation{ Field: &br.Ports, Mutator: ovsdb.MutateOperationDelete, Value: []string{"port10"}, }, ) require.NoError(t, err) ops = append(ops, op...) datapathID = "datapathID_updated" op, err = c.Where(&br).Update(&br, &br.DatapathID) require.NoError(t, err) ops = append(ops, op...) br.DatapathID = nil op, err = c.Where(&br).Update(&br, &br.DatapathID) require.NoError(t, err) ops = append(ops, op...) results, err = c.Transact(context.TODO(), ops...) require.NoError(t, err) require.Len(t, results, len(ops)) errors, err := ovsdb.CheckOperationResults(results, ops) require.NoError(t, err) require.Nil(t, errors) br = BridgeType{ UUID: bridgeUUID, } err = c.Get(context.TODO(), &br) require.NoError(t, err) require.Equal(t, []string{"port1"}, br.Ports) require.Equal(t, map[string]string{"key1": "value1", "keyA": "valueA"}, br.ExternalIds) require.Nil(t, br.DatapathID) } func TestReferentialIntegrity(t *testing.T) { // UUIDs to use throughout the tests ovsUUID := uuid.New().String() bridgeUUID := uuid.New().String() port1UUID := uuid.New().String() port2UUID := uuid.New().String() mirrorUUID := uuid.New().String() // the test adds an additional op to initialOps to set a reference to // the bridge in OVS table // the test deletes expectModels at the end tests := []struct { name string initialOps []ovsdb.Operation testOps func(client.Client) ([]ovsdb.Operation, error) expectModels []model.Model dontExpectModels []model.Model expectErr bool }{ { name: "strong reference is garbage collected", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // remove the mirror reference b := &test.BridgeType{UUID: bridgeUUID} return c.Where(b).Update(b, &b.Mirrors) }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port1UUID}}, &test.PortType{UUID: port1UUID, Name: port1UUID}, }, dontExpectModels: []model.Model{ // mirror should have been garbage collected &test.MirrorType{UUID: mirrorUUID}, }, }, { name: "adding non-root row that is not strongly reference is a noop", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add a mirror m := &test.MirrorType{UUID: mirrorUUID, Name: mirrorUUID} return c.Create(m) }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID}, }, dontExpectModels: []model.Model{ // mirror should have not been added as is not referenced from anywhere &test.MirrorType{UUID: mirrorUUID}, }, }, { name: "adding non-existent strong reference fails", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add a mirror b := &test.BridgeType{UUID: bridgeUUID, Mirrors: []string{mirrorUUID}} return c.Where(b).Update(b, &b.Mirrors) }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID}, }, expectErr: true, }, { name: "weak reference is garbage collected", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}, ovsdb.UUID{GoUUID: port2UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port2UUID, Row: ovsdb.Row{ "name": port2UUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}, ovsdb.UUID{GoUUID: port2UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // remove port1 p := &test.PortType{UUID: port1UUID} ops, err := c.Where(p).Delete() if err != nil { return nil, err } b := &test.BridgeType{UUID: bridgeUUID, Ports: []string{port2UUID}} op, err := c.Where(b).Update(b, &b.Ports) if err != nil { return nil, err } return append(ops, op...), nil }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port2UUID}, Mirrors: []string{mirrorUUID}}, &test.PortType{UUID: port2UUID, Name: port2UUID}, // mirror reference to port1 should have been garbage collected &test.MirrorType{UUID: mirrorUUID, Name: mirrorUUID, SelectSrcPort: []string{port2UUID}}, }, dontExpectModels: []model.Model{ &test.PortType{UUID: port1UUID}, }, }, { name: "adding a weak reference to a non-existent row is a noop", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add reference to non-existent port2 m := &test.MirrorType{UUID: mirrorUUID, SelectSrcPort: []string{port1UUID, port2UUID}} return c.Where(m).Update(m, &m.SelectSrcPort) }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port1UUID}, Mirrors: []string{mirrorUUID}}, &test.PortType{UUID: port1UUID, Name: port1UUID}, // mirror reference to port2 should have been garbage collected resulting in noop &test.MirrorType{UUID: mirrorUUID, Name: mirrorUUID, SelectSrcPort: []string{port1UUID}}, }, }, { name: "garbage collecting a weak reference on a column lowering it below the min length fails", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // remove port 1 return c.Where(&test.PortType{UUID: port1UUID}).Delete() }, expectModels: []model.Model{ &test.BridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port1UUID}, Mirrors: []string{mirrorUUID}}, &test.PortType{UUID: port1UUID, Name: port1UUID}, &test.MirrorType{UUID: mirrorUUID, Name: mirrorUUID, SelectSrcPort: []string{port1UUID}}, }, expectErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c, close := buildTestServerAndClient(t) defer close() _, err := c.MonitorAll(context.Background()) require.NoError(t, err) // add the bridge reference to the initial ops ops := append(tt.initialOps, ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: "Open_vSwitch", UUID: ovsUUID, Row: ovsdb.Row{ "bridges": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: bridgeUUID}}}, }, }) results, err := c.Transact(context.Background(), ops...) require.NoError(t, err) require.Len(t, results, len(ops)) errors, err := ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) require.NoError(t, err) ops, err = tt.testOps(c) require.NoError(t, err) results, err = c.Transact(context.Background(), ops...) require.NoError(t, err) errors, err = ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) if tt.expectErr { require.Error(t, err) } else { require.NoError(t, err) } for _, m := range tt.expectModels { actual := model.Clone(m) err := c.Get(context.Background(), actual) require.NoError(t, err, "when expecting model %v", m) require.Equal(t, m, actual) } for _, m := range tt.dontExpectModels { err := c.Get(context.Background(), m) require.ErrorIs(t, err, client.ErrNotFound, "when not expecting model %v", m) } ops = []ovsdb.Operation{} for _, m := range tt.expectModels { op, err := c.Where(m).Delete() require.NoError(t, err) require.Len(t, op, 1) ops = append(ops, op...) } // remove the bridge reference ops = append(ops, ovsdb.Operation{ Op: ovsdb.OperationDelete, Table: "Open_vSwitch", Where: []ovsdb.Condition{ { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: ovsUUID}, }, }, }) results, err = c.Transact(context.Background(), ops...) require.NoError(t, err) require.Len(t, results, len(ops)) errors, err = ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) require.NoError(t, err) }) } } golang-github-ovn-org-libovsdb-0.7.0/server/server_test.go000066400000000000000000000051421464501522100236400ustar00rootroot00000000000000package server import ( "encoding/json" "testing" "github.com/google/uuid" "github.com/ovn-org/libovsdb/database/inmemory" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" . "github.com/ovn-org/libovsdb/test" ) func TestOvsdbServerMonitor(t *testing.T) { dbModel, err := GetModel() require.NoError(t, err) ovsDB := inmemory.NewDatabase(map[string]model.ClientDBModel{"Open_vSwitch": dbModel.Client()}) schema := dbModel.Schema o, err := NewOvsdbServer(ovsDB, dbModel) require.Nil(t, err) requests := make(map[string]ovsdb.MonitorRequest) for table, tableSchema := range schema.Tables { var columns []string for column := range tableSchema.Columns { columns = append(columns, column) } requests[table] = ovsdb.MonitorRequest{ Columns: columns, Select: ovsdb.NewDefaultMonitorSelect(), } } fooUUID := uuid.NewString() barUUID := uuid.NewString() bazUUID := uuid.NewString() quuxUUID := uuid.NewString() operations := []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: fooUUID, Row: ovsdb.Row{"name": "foo"}, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: barUUID, Row: ovsdb.Row{"name": "bar"}, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bazUUID, Row: ovsdb.Row{"name": "baz"}, }, { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: quuxUUID, Row: ovsdb.Row{"name": "quux"}, }, } transaction := ovsDB.NewTransaction("Open_vSwitch") _, updates := transaction.Transact(operations...) err = o.db.Commit("Open_vSwitch", uuid.New(), updates) require.NoError(t, err) db, err := json.Marshal("Open_vSwitch") require.Nil(t, err) value, err := json.Marshal("foo") require.Nil(t, err) rJSON, err := json.Marshal(requests) require.Nil(t, err) args := []json.RawMessage{db, value, rJSON} reply := &ovsdb.TableUpdates{} err = o.Monitor(nil, args, reply) require.Nil(t, err) expected := &ovsdb.TableUpdates{ "Bridge": { fooUUID: &ovsdb.RowUpdate{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: fooUUID}, "name": "foo", }, }, barUUID: &ovsdb.RowUpdate{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: barUUID}, "name": "bar", }, }, bazUUID: &ovsdb.RowUpdate{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: bazUUID}, "name": "baz", }, }, quuxUUID: &ovsdb.RowUpdate{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: quuxUUID}, "name": "quux", }, }, }, } assert.Equal(t, expected, reply) } golang-github-ovn-org-libovsdb-0.7.0/test/000077500000000000000000000000001464501522100204135ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/test/ovs/000077500000000000000000000000001464501522100212225ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/test/ovs/ovs_integration_test.go000066400000000000000000001412541464501522100260310ustar00rootroot00000000000000package ovs import ( "context" "os" "reflect" "strings" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/google/uuid" "github.com/ory/dockertest/v3" "github.com/ory/dockertest/v3/docker" "github.com/ovn-org/libovsdb/cache" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) // OVSIntegrationSuite runs tests against a real Open vSwitch instance type OVSIntegrationSuite struct { suite.Suite pool *dockertest.Pool resource *dockertest.Resource clientWithoutInactvityCheck client.Client clientWithInactivityCheck client.Client } func (suite *OVSIntegrationSuite) SetupSuite() { var err error suite.pool, err = dockertest.NewPool("") require.NoError(suite.T(), err) tag := os.Getenv("OVS_IMAGE_TAG") if tag == "" { tag = "2.15.0" } options := &dockertest.RunOptions{ Repository: "libovsdb/ovs", Tag: tag, ExposedPorts: []string{"6640/tcp"}, PortBindings: map[docker.Port][]docker.PortBinding{ "6640/tcp": {{HostPort: "56640"}}, }, Tty: true, } hostConfig := func(config *docker.HostConfig) { // set AutoRemove to true so that stopped container goes away by itself config.AutoRemove = true config.RestartPolicy = docker.RestartPolicy{ Name: "no", } } suite.resource, err = suite.pool.RunWithOptions(options, hostConfig) require.NoError(suite.T(), err) // set expiry to 90 seconds so containers are cleaned up on test panic err = suite.resource.Expire(90) require.NoError(suite.T(), err) // let the container start before we attempt connection time.Sleep(5 * time.Second) } func (suite *OVSIntegrationSuite) SetupTest() { if suite.clientWithoutInactvityCheck != nil { suite.clientWithoutInactvityCheck.Close() } if suite.clientWithInactivityCheck != nil { suite.clientWithInactivityCheck.Close() } var err error err = suite.pool.Retry(func() error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() endpoint := "tcp::56640" ovs, err := client.NewOVSDBClient( defDB, client.WithEndpoint(endpoint), client.WithLeaderOnly(true), ) if err != nil { return err } err = ovs.Connect(ctx) if err != nil { suite.T().Log(err) return err } suite.clientWithoutInactvityCheck = ovs ovs2, err := client.NewOVSDBClient( defDB, client.WithEndpoint(endpoint), client.WithInactivityCheck(2*time.Second, 1*time.Second, &backoff.ZeroBackOff{}), client.WithLeaderOnly(true), ) if err != nil { return err } err = ovs2.Connect(ctx) if err != nil { suite.T().Log(err) return err } suite.clientWithInactivityCheck = ovs2 return nil }) require.NoError(suite.T(), err) // give ovsdb-server some time to start up _, err = suite.clientWithoutInactvityCheck.Monitor(context.TODO(), suite.clientWithoutInactvityCheck.NewMonitor( client.WithTable(&ovsType{}), client.WithTable(&bridgeType{}), ), ) require.NoError(suite.T(), err) } func (suite *OVSIntegrationSuite) TearDownSuite() { if suite.clientWithoutInactvityCheck != nil { suite.clientWithoutInactvityCheck.Close() suite.clientWithoutInactvityCheck = nil } if suite.clientWithInactivityCheck != nil { suite.clientWithInactivityCheck.Close() suite.clientWithInactivityCheck = nil } err := suite.pool.Purge(suite.resource) require.NoError(suite.T(), err) } func TestOVSIntegrationTestSuite(t *testing.T) { if testing.Short() { t.Skip() } suite.Run(t, new(OVSIntegrationSuite)) } type BridgeFailMode = string var ( BridgeFailModeStandalone BridgeFailMode = "standalone" BridgeFailModeSecure BridgeFailMode = "secure" ) // bridgeType is the simplified ORM model of the Bridge table type bridgeType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` OtherConfig map[string]string `ovsdb:"other_config"` ExternalIds map[string]string `ovsdb:"external_ids"` Ports []string `ovsdb:"ports"` Status map[string]string `ovsdb:"status"` BridgeFailMode *BridgeFailMode `ovsdb:"fail_mode"` IPFIX *string `ovsdb:"ipfix"` DatapathID *string `ovsdb:"datapath_id"` Mirrors []string `ovsdb:"mirrors"` } // ovsType is the ORM model of the OVS table type ovsType struct { UUID string `ovsdb:"_uuid"` Bridges []string `ovsdb:"bridges"` CurCfg int `ovsdb:"cur_cfg"` DatapathTypes []string `ovsdb:"datapath_types"` Datapaths map[string]string `ovsdb:"datapaths"` DbVersion *string `ovsdb:"db_version"` DpdkInitialized bool `ovsdb:"dpdk_initialized"` DpdkVersion *string `ovsdb:"dpdk_version"` ExternalIDs map[string]string `ovsdb:"external_ids"` IfaceTypes []string `ovsdb:"iface_types"` ManagerOptions []string `ovsdb:"manager_options"` NextCfg int `ovsdb:"next_cfg"` OtherConfig map[string]string `ovsdb:"other_config"` OVSVersion *string `ovsdb:"ovs_version"` SSL *string `ovsdb:"ssl"` Statistics map[string]string `ovsdb:"statistics"` SystemType *string `ovsdb:"system_type"` SystemVersion *string `ovsdb:"system_version"` } // ipfixType is a simplified ORM model for the IPFIX table type ipfixType struct { UUID string `ovsdb:"_uuid"` Targets []string `ovsdb:"targets"` } // queueType is the simplified ORM model of the Queue table type queueType struct { UUID string `ovsdb:"_uuid"` DSCP *int `ovsdb:"dscp"` } type portType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` Interfaces []string `ovsdb:"interfaces"` } type interfaceType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` } type mirrorType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` SelectSrcPort []string `ovsdb:"select_src_port"` } var defDB, _ = model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ "Open_vSwitch": &ovsType{}, "Bridge": &bridgeType{}, "IPFIX": &ipfixType{}, "Queue": &queueType{}, "Port": &portType{}, "Mirror": &mirrorType{}, "Interface": &interfaceType{}, }) func (suite *OVSIntegrationSuite) TestConnectReconnect() { assert.True(suite.T(), suite.clientWithoutInactvityCheck.Connected()) err := suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) bridgeName := "br-discoreco" brChan := make(chan *bridgeType) suite.clientWithoutInactvityCheck.Cache().AddEventHandler(&cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { br, ok := model.(*bridgeType) if !ok { return } if br.Name == bridgeName { brChan <- br } }, }) bridgeUUID, err := suite.createBridge(bridgeName) require.NoError(suite.T(), err) <-brChan // make another connect call, this should return without error as we're already connected ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) disconnectNotification := suite.clientWithoutInactvityCheck.DisconnectNotify() notified := make(chan struct{}) ready := make(chan struct{}) go func() { ready <- struct{}{} <-disconnectNotification notified <- struct{}{} }() <-ready suite.clientWithoutInactvityCheck.Disconnect() select { case <-notified: // got notification case <-time.After(5 * time.Second): suite.T().Fatal("expected a disconnect notification but didn't receive one") } assert.Equal(suite.T(), false, suite.clientWithoutInactvityCheck.Connected()) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.EqualError(suite.T(), err, client.ErrNotConnected.Error()) ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) br := &bridgeType{ UUID: bridgeUUID, } // assert cache has been purged err = suite.clientWithoutInactvityCheck.Get(ctx, br) require.Error(suite.T(), err, client.ErrNotFound) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) assert.NoError(suite.T(), err) _, err = suite.clientWithoutInactvityCheck.Monitor(context.TODO(), suite.clientWithoutInactvityCheck.NewMonitor( client.WithTable(&ovsType{}), client.WithTable(&bridgeType{}), ), ) require.NoError(suite.T(), err) // assert cache has been re-populated require.NoError(suite.T(), suite.clientWithoutInactvityCheck.Get(ctx, br)) } func (suite *OVSIntegrationSuite) TestWithInactivityCheck() { assert.Equal(suite.T(), true, suite.clientWithInactivityCheck.Connected()) err := suite.clientWithInactivityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) // Disconnect client suite.clientWithInactivityCheck.Disconnect() // Ensure Disconnect doesn't have any impact to the connection. require.Eventually(suite.T(), func() bool { return suite.clientWithInactivityCheck.Connected() }, 5*time.Second, 1*time.Second) // Try to reconfigure client which already have an established connection. err = suite.clientWithInactivityCheck.SetOption( client.WithReconnect(2*time.Second, &backoff.ZeroBackOff{}), ) require.Error(suite.T(), err) // Ensure Connect doesn't purge the cache. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err = suite.clientWithInactivityCheck.Connect(ctx) require.NoError(suite.T(), err) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) require.True(suite.T(), suite.clientWithoutInactvityCheck.Cache().Table("Bridge").Len() != 0) // set up a disconnect notification disconnectNotification := suite.clientWithoutInactvityCheck.DisconnectNotify() notified := make(chan struct{}) ready := make(chan struct{}) go func() { ready <- struct{}{} <-disconnectNotification notified <- struct{}{} }() <-ready // close the connection suite.clientWithoutInactvityCheck.Close() select { case <-notified: // got notification case <-time.After(5 * time.Second): suite.T().Fatal("expected a disconnect notification but didn't receive one") } assert.Equal(suite.T(), false, suite.clientWithoutInactvityCheck.Connected()) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.EqualError(suite.T(), err, client.ErrNotConnected.Error()) ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) assert.NoError(suite.T(), err) _, err = suite.clientWithoutInactvityCheck.MonitorAll(context.TODO()) require.NoError(suite.T(), err) } func (suite *OVSIntegrationSuite) TestWithReconnect() { assert.Equal(suite.T(), true, suite.clientWithoutInactvityCheck.Connected()) err := suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) // Disconnect client suite.clientWithoutInactvityCheck.Disconnect() require.Eventually(suite.T(), func() bool { return !suite.clientWithoutInactvityCheck.Connected() }, 5*time.Second, 1*time.Second) // Reconfigure err = suite.clientWithoutInactvityCheck.SetOption( client.WithReconnect(2*time.Second, &backoff.ZeroBackOff{}), ) require.NoError(suite.T(), err) // Connect (again) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) // make another connect call, this should return without error as we're already connected ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) // check the connection is working err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) // check the cache is purged require.True(suite.T(), suite.clientWithoutInactvityCheck.Cache().Table("Bridge").Len() == 0) // set up the monitor again _, err = suite.clientWithoutInactvityCheck.MonitorAll(context.TODO()) require.NoError(suite.T(), err) // add a bridge and verify our handler gets called bridgeName := "recon-b4" brChan := make(chan *bridgeType) suite.clientWithoutInactvityCheck.Cache().AddEventHandler(&cache.EventHandlerFuncs{ AddFunc: func(table string, model model.Model) { br, ok := model.(*bridgeType) if !ok { return } if strings.HasPrefix(br.Name, "recon-") { brChan <- br } }, }) _, err = suite.createBridge(bridgeName) require.NoError(suite.T(), err) br := <-brChan require.Equal(suite.T(), bridgeName, br.Name) // trigger reconnect err = suite.pool.Client.RestartContainer(suite.resource.Container.ID, 0) require.NoError(suite.T(), err) // check that we are automatically reconnected require.Eventually(suite.T(), func() bool { return suite.clientWithoutInactvityCheck.Connected() }, 20*time.Second, 1*time.Second) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.NoError(suite.T(), err) // check our original bridge is in the cache err = suite.clientWithoutInactvityCheck.Get(ctx, br) require.NoError(suite.T(), err) // create a new bridge to ensure the monitor and cache handler is still working bridgeName = "recon-after" _, err = suite.createBridge(bridgeName) require.NoError(suite.T(), err) LOOP: for { select { case <-time.After(5 * time.Second): suite.T().Fatal("timed out waiting for bridge") case b := <-brChan: if b.Name == bridgeName { break LOOP } } } // set up a disconnect notification disconnectNotification := suite.clientWithoutInactvityCheck.DisconnectNotify() notified := make(chan struct{}) ready := make(chan struct{}) go func() { ready <- struct{}{} <-disconnectNotification notified <- struct{}{} }() <-ready // close the connection suite.clientWithoutInactvityCheck.Close() select { case <-notified: // got notification case <-time.After(5 * time.Second): suite.T().Fatal("expected a disconnect notification but didn't receive one") } assert.Equal(suite.T(), false, suite.clientWithoutInactvityCheck.Connected()) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) require.EqualError(suite.T(), err, client.ErrNotConnected.Error()) ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err = suite.clientWithoutInactvityCheck.Connect(ctx) require.NoError(suite.T(), err) err = suite.clientWithoutInactvityCheck.Echo(context.TODO()) assert.NoError(suite.T(), err) _, err = suite.clientWithoutInactvityCheck.MonitorAll(context.TODO()) require.NoError(suite.T(), err) } func (suite *OVSIntegrationSuite) TestInsertTransactIntegration() { bridgeName := "gopher-br7" uuid, err := suite.createBridge(bridgeName) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) } func (suite *OVSIntegrationSuite) TestMultipleOpsTransactIntegration() { bridgeName := "a_bridge_to_nowhere" uuid, err := suite.createBridge(bridgeName) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) var operations []ovsdb.Operation ovsRow := bridgeType{} br := &bridgeType{UUID: uuid} op1, err := suite.clientWithoutInactvityCheck.Where(br). Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"one": "1"}, }) require.NoError(suite.T(), err) operations = append(operations, op1...) op2Mutations := []model.Mutation{ { Field: &ovsRow.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"two": "2", "three": "3"}, }, { Field: &ovsRow.ExternalIds, Mutator: ovsdb.MutateOperationDelete, Value: []string{"docker"}, }, { Field: &ovsRow.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"podman": "made-for-each-other"}, }, } op2, err := suite.clientWithoutInactvityCheck.Where(br).Mutate(&ovsRow, op2Mutations...) require.NoError(suite.T(), err) operations = append(operations, op2...) var op3Comment = "update external ids" op3 := ovsdb.Operation{Op: ovsdb.OperationComment, Comment: &op3Comment} operations = append(operations, op3) reply, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operations...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(reply, operations) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) expectedExternalIds := map[string]string{ "go": "awesome", "podman": "made-for-each-other", "one": "1", "two": "2", "three": "3", } require.Exactly(suite.T(), expectedExternalIds, br.ExternalIds) } func (suite *OVSIntegrationSuite) TestInsertAndDeleteTransactIntegration() { bridgeName := "gopher-br5" bridgeUUID, err := suite.createBridge(bridgeName) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: bridgeUUID} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) deleteOp, err := suite.clientWithoutInactvityCheck.Where(&bridgeType{Name: bridgeName}).Delete() require.NoError(suite.T(), err) ovsRow := ovsType{} delMutateOp, err := suite.clientWithoutInactvityCheck.WhereCache(func(*ovsType) bool { return true }). Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: ovsdb.MutateOperationDelete, Value: []string{bridgeUUID}, }) require.NoError(suite.T(), err) delOperations := append(deleteOp, delMutateOp...) delReply, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), delOperations...) require.NoError(suite.T(), err) delOperationErrs, err := ovsdb.CheckOperationResults(delReply, delOperations) if err != nil { for _, oe := range delOperationErrs { suite.T().Error(oe) } suite.T().Fatal(err) } require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: bridgeUUID} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err != nil }, 2*time.Second, 500*time.Millisecond) } func (suite *OVSIntegrationSuite) TestTableSchemaValidationIntegration() { operation := ovsdb.Operation{ Op: "insert", Table: "InvalidTable", Row: ovsdb.Row(map[string]interface{}{"name": "docker-ovs"}), } _, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operation) assert.Error(suite.T(), err) } func (suite *OVSIntegrationSuite) TestColumnSchemaInRowValidationIntegration() { operation := ovsdb.Operation{ Op: "insert", Table: "Bridge", Row: ovsdb.Row(map[string]interface{}{"name": "docker-ovs", "invalid_column": "invalid_column"}), } _, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operation) assert.Error(suite.T(), err) } func (suite *OVSIntegrationSuite) TestColumnSchemaInMultipleRowsValidationIntegration() { invalidBridge := ovsdb.Row(map[string]interface{}{"invalid_column": "invalid_column"}) bridge := ovsdb.Row(map[string]interface{}{"name": "docker-ovs"}) rows := []ovsdb.Row{invalidBridge, bridge} operation := ovsdb.Operation{ Op: "insert", Table: "Bridge", Rows: rows, } _, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operation) assert.Error(suite.T(), err) } func (suite *OVSIntegrationSuite) TestColumnSchemaValidationIntegration() { operation := ovsdb.Operation{ Op: "select", Table: "Bridge", Columns: []string{"name", "invalidColumn"}, } _, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operation) assert.Error(suite.T(), err) } func (suite *OVSIntegrationSuite) TestMonitorCancelIntegration() { monitorID, err := suite.clientWithoutInactvityCheck.Monitor( context.TODO(), suite.clientWithoutInactvityCheck.NewMonitor( client.WithTable(&queueType{}), ), ) require.NoError(suite.T(), err) uuid, err := suite.createQueue("test1", 0) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { q := &queueType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), q) return err == nil }, 2*time.Second, 500*time.Millisecond) err = suite.clientWithoutInactvityCheck.MonitorCancel(context.TODO(), monitorID) assert.NoError(suite.T(), err) uuid, err = suite.createQueue("test2", 1) require.NoError(suite.T(), err) assert.Never(suite.T(), func() bool { q := &queueType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), q) return err == nil }, 2*time.Second, 500*time.Millisecond) } func (suite *OVSIntegrationSuite) TestMonitorConditionIntegration() { // Monitor table Queue rows with dscp == 1 or 2. queue := queueType{} dscp1 := 1 dscp2 := 2 conditions := []model.Condition{ { Field: &queue.DSCP, Function: ovsdb.ConditionEqual, Value: &dscp1, }, { Field: &queue.DSCP, Function: ovsdb.ConditionEqual, Value: &dscp2, }, } _, err := suite.clientWithoutInactvityCheck.Monitor( context.TODO(), suite.clientWithoutInactvityCheck.NewMonitor( client.WithConditionalTable(&queue, conditions), ), ) require.NoError(suite.T(), err) uuid, err := suite.createQueue("test1", 1) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { q := &queueType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), q) return err == nil }, 2*time.Second, 500*time.Millisecond) uuid, err = suite.createQueue("test2", 3) require.NoError(suite.T(), err) assert.Never(suite.T(), func() bool { q := &queueType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), q) return err == nil }, 2*time.Second, 500*time.Millisecond) uuid, err = suite.createQueue("test3", 2) require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { q := &queueType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), q) return err == nil }, 2*time.Second, 500*time.Millisecond) } func (suite *OVSIntegrationSuite) TestInsertDuplicateTransactIntegration() { uuid, err := suite.createBridge("br-dup") require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) _, err = suite.createBridge("br-dup") assert.Error(suite.T(), err) assert.IsType(suite.T(), &ovsdb.ConstraintViolation{}, err) } func (suite *OVSIntegrationSuite) TestUpdate() { uuid, err := suite.createBridge("br-update") require.NoError(suite.T(), err) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err := suite.clientWithoutInactvityCheck.Get(context.Background(), br) return err == nil }, 2*time.Second, 500*time.Millisecond) bridgeRow := &bridgeType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), bridgeRow) require.NoError(suite.T(), err) // try to modify immutable field bridgeRow.Name = "br-update2" _, err = suite.clientWithoutInactvityCheck.Where(bridgeRow).Update(bridgeRow, &bridgeRow.Name) require.Error(suite.T(), err) bridgeRow.Name = "br-update" // update many fields bridgeRow.ExternalIds["baz"] = "foobar" bridgeRow.OtherConfig = map[string]string{"foo": "bar"} ops, err := suite.clientWithoutInactvityCheck.Where(bridgeRow).Update(bridgeRow) require.NoError(suite.T(), err) reply, err := suite.clientWithoutInactvityCheck.Transact(context.Background(), ops...) require.NoError(suite.T(), err) opErrs, err := ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(suite.T(), err, "%+v", opErrs) require.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), br) if err != nil { return false } return reflect.DeepEqual(br, bridgeRow) }, 2*time.Second, 50*time.Millisecond) newExternalIds := map[string]string{"foo": "bar"} bridgeRow.ExternalIds = newExternalIds ops, err = suite.clientWithoutInactvityCheck.Where(bridgeRow).Update(bridgeRow, &bridgeRow.ExternalIds) require.NoError(suite.T(), err) reply, err = suite.clientWithoutInactvityCheck.Transact(context.Background(), ops...) require.NoError(suite.T(), err) opErr, err := ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(suite.T(), err, "%Populate2+v", opErr) assert.Eventually(suite.T(), func() bool { br := &bridgeType{UUID: uuid} err = suite.clientWithoutInactvityCheck.Get(context.Background(), br) if err != nil { return false } return reflect.DeepEqual(br, bridgeRow) }, 2*time.Second, 500*time.Millisecond) } func (suite *OVSIntegrationSuite) createBridge(bridgeName string) (string, error) { // NamedUUID is used to add multiple related Operations in a single Transact operation namedUUID := "gopher" br := bridgeType{ UUID: namedUUID, Name: bridgeName, ExternalIds: map[string]string{ "go": "awesome", "docker": "made-for-each-other", }, BridgeFailMode: &BridgeFailModeSecure, } insertOp, err := suite.clientWithoutInactvityCheck.Create(&br) require.NoError(suite.T(), err) // Inserting a Bridge row in Bridge table requires mutating the open_vswitch table. ovsRow := ovsType{} mutateOp, err := suite.clientWithoutInactvityCheck.WhereCache(func(*ovsType) bool { return true }). Mutate(&ovsRow, model.Mutation{ Field: &ovsRow.Bridges, Mutator: ovsdb.MutateOperationInsert, Value: []string{namedUUID}, }) require.NoError(suite.T(), err) operations := append(insertOp, mutateOp...) reply, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operations...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(reply, operations) return reply[0].UUID.GoUUID, err } func (suite *OVSIntegrationSuite) TestCreateIPFIX() { // Create a IPFIX row and update the bridge in the same transaction uuid, err := suite.createBridge("br-ipfix") require.NoError(suite.T(), err) namedUUID := "gopher" ipfix := ipfixType{ UUID: namedUUID, Targets: []string{"127.0.0.1:6650"}, } insertOp, err := suite.clientWithoutInactvityCheck.Create(&ipfix) require.NoError(suite.T(), err) bridge := bridgeType{ UUID: uuid, IPFIX: &namedUUID, } updateOps, err := suite.clientWithoutInactvityCheck.Where(&bridge).Update(&bridge, &bridge.IPFIX) require.NoError(suite.T(), err) operations := append(insertOp, updateOps...) reply, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), operations...) require.NoError(suite.T(), err) opErrs, err := ovsdb.CheckOperationResults(reply, operations) if err != nil { for _, oe := range opErrs { suite.T().Error(oe) } } // Delete the IPFIX row by removing it's strong reference bridge.IPFIX = nil updateOps, err = suite.clientWithoutInactvityCheck.Where(&bridge).Update(&bridge, &bridge.IPFIX) require.NoError(suite.T(), err) reply, err = suite.clientWithoutInactvityCheck.Transact(context.TODO(), updateOps...) require.NoError(suite.T(), err) opErrs, err = ovsdb.CheckOperationResults(reply, updateOps) if err != nil { for _, oe := range opErrs { suite.T().Error(oe) } } require.NoError(suite.T(), err) //Assert the IPFIX table is empty ipfixes := []ipfixType{} err = suite.clientWithoutInactvityCheck.List(context.Background(), &ipfixes) require.NoError(suite.T(), err) require.Empty(suite.T(), ipfixes) } func (suite *OVSIntegrationSuite) TestWait() { var err error brName := "br-wait-for-it" // Use Wait to ensure bridge does not exist yet bridgeRow := &bridgeType{ Name: brName, } conditions := []model.Condition{ { Field: &bridgeRow.Name, Function: ovsdb.ConditionEqual, Value: brName, }, } timeout := 0 ops, err := suite.clientWithoutInactvityCheck.WhereAny(bridgeRow, conditions...).Wait( ovsdb.WaitConditionNotEqual, &timeout, bridgeRow, &bridgeRow.Name) require.NoError(suite.T(), err) reply, err := suite.clientWithoutInactvityCheck.Transact(context.Background(), ops...) require.NoError(suite.T(), err) opErrs, err := ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(suite.T(), err, "%+v", opErrs) // Now, create the bridge _, err = suite.createBridge(brName) require.NoError(suite.T(), err) // Use wait to verify bridge's existence bridgeRow = &bridgeType{ Name: brName, BridgeFailMode: &BridgeFailModeSecure, } conditions = []model.Condition{ { Field: &bridgeRow.BridgeFailMode, Function: ovsdb.ConditionEqual, Value: &BridgeFailModeSecure, }, } timeout = 2 * 1000 // 2 seconds (in milliseconds) ops, err = suite.clientWithoutInactvityCheck.WhereAny(bridgeRow, conditions...).Wait( ovsdb.WaitConditionEqual, &timeout, bridgeRow, &bridgeRow.BridgeFailMode) require.NoError(suite.T(), err) reply, err = suite.clientWithoutInactvityCheck.Transact(context.Background(), ops...) require.NoError(suite.T(), err) opErrs, err = ovsdb.CheckOperationResults(reply, ops) require.NoErrorf(suite.T(), err, "%+v", opErrs) // Use wait to get a txn error due to until condition that is not happening timeout = 222 // milliseconds ops, err = suite.clientWithoutInactvityCheck.WhereAny(bridgeRow, conditions...).Wait( ovsdb.WaitConditionNotEqual, &timeout, bridgeRow, &bridgeRow.BridgeFailMode) require.NoError(suite.T(), err) reply, err = suite.clientWithoutInactvityCheck.Transact(context.Background(), ops...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(reply, ops) assert.Error(suite.T(), err) } func (suite *OVSIntegrationSuite) createQueue(queueName string, dscp int) (string, error) { q := queueType{ DSCP: &dscp, } insertOp, err := suite.clientWithoutInactvityCheck.Create(&q) require.NoError(suite.T(), err) reply, err := suite.clientWithoutInactvityCheck.Transact(context.TODO(), insertOp...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(reply, insertOp) return reply[0].UUID.GoUUID, err } func (suite *OVSIntegrationSuite) TestOpsWaitForReconnect() { namedUUID := "trozet" ipfix := ipfixType{ UUID: namedUUID, Targets: []string{"127.0.0.1:6650"}, } // Shutdown client suite.clientWithoutInactvityCheck.Disconnect() require.Eventually(suite.T(), func() bool { return !suite.clientWithoutInactvityCheck.Connected() }, 5*time.Second, 1*time.Second) err := suite.clientWithoutInactvityCheck.SetOption( client.WithReconnect(2*time.Second, &backoff.ZeroBackOff{}), ) require.NoError(suite.T(), err) var insertOp []ovsdb.Operation insertOp, err = suite.clientWithoutInactvityCheck.Create(&ipfix) require.NoError(suite.T(), err) wg := sync.WaitGroup{} wg.Add(1) // delay reconnecting for 5 seconds go func() { time.Sleep(5 * time.Second) err := suite.clientWithoutInactvityCheck.Connect(context.Background()) require.NoError(suite.T(), err) wg.Done() }() // execute the transaction, should not fail and execute after reconnection ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() reply, err := suite.clientWithoutInactvityCheck.Transact(ctx, insertOp...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(reply, insertOp) require.NoError(suite.T(), err) wg.Wait() } func (suite *OVSIntegrationSuite) TestUnsetOptional() { // Create the default bridge which has an optional BridgeFailMode set uuid, err := suite.createBridge("br-with-optional-unset") require.NoError(suite.T(), err) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Second) defer cancel() br := bridgeType{ UUID: uuid, } // verify the bridge has BridgeFailMode set err = suite.clientWithoutInactvityCheck.Get(ctx, &br) require.NoError(suite.T(), err) require.NotNil(suite.T(), br.BridgeFailMode) // modify bridge to unset BridgeFailMode br.BridgeFailMode = nil ops, err := suite.clientWithoutInactvityCheck.Where(&br).Update(&br, &br.BridgeFailMode) require.NoError(suite.T(), err) r, err := suite.clientWithoutInactvityCheck.Transact(ctx, ops...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(suite.T(), err) // verify the bridge has BridgeFailMode unset err = suite.clientWithoutInactvityCheck.Get(ctx, &br) require.NoError(suite.T(), err) require.Nil(suite.T(), br.BridgeFailMode) } func (suite *OVSIntegrationSuite) TestUpdateOptional() { // Create the default bridge which has an optional BridgeFailMode set uuid, err := suite.createBridge("br-with-optional-update") require.NoError(suite.T(), err) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Second) defer cancel() br := bridgeType{ UUID: uuid, } // verify the bridge has BridgeFailMode set err = suite.clientWithoutInactvityCheck.Get(ctx, &br) require.NoError(suite.T(), err) require.Equal(suite.T(), &BridgeFailModeSecure, br.BridgeFailMode) // modify bridge to update BridgeFailMode br.BridgeFailMode = &BridgeFailModeStandalone ops, err := suite.clientWithoutInactvityCheck.Where(&br).Update(&br, &br.BridgeFailMode) require.NoError(suite.T(), err) r, err := suite.clientWithoutInactvityCheck.Transact(ctx, ops...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(r, ops) require.NoError(suite.T(), err) // verify the bridge has BridgeFailMode updated err = suite.clientWithoutInactvityCheck.Get(ctx, &br) require.NoError(suite.T(), err) require.Equal(suite.T(), &BridgeFailModeStandalone, br.BridgeFailMode) } func (suite *OVSIntegrationSuite) TestMultipleOpsSameRow() { ctx, cancel := context.WithTimeout(context.Background(), 500*time.Second) defer cancel() var ops []ovsdb.Operation var op []ovsdb.Operation // Use raw ops for the tables we don't have in the model, they are not the // target of the test and are just used to comply with the schema // referential integrity iface1UUID := "iface1" op = []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Interface", UUIDName: iface1UUID, Row: ovsdb.Row{ "name": iface1UUID, }, }, } ops = append(ops, op...) port1InsertOp := len(ops) port1UUID := "port1" op = []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Port", UUIDName: port1UUID, Row: ovsdb.Row{ "name": port1UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: iface1UUID}}}, }, }, } ops = append(ops, op...) iface10UUID := "iface10" op = []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Interface", UUIDName: iface10UUID, Row: ovsdb.Row{ "name": iface10UUID, }, }, } ops = append(ops, op...) port10InsertOp := len(ops) port10UUID := "port10" op = []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Port", UUIDName: port10UUID, Row: ovsdb.Row{ "name": port10UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: iface10UUID}}}, }, }, } ops = append(ops, op...) // Insert a bridge and register it in the OVS table bridgeInsertOp := len(ops) bridgeUUID := "bridge_multiple_ops_same_row" datapathID := "datapathID" br := bridgeType{ UUID: bridgeUUID, Name: bridgeUUID, DatapathID: &datapathID, Ports: []string{port10UUID, port1UUID}, ExternalIds: map[string]string{"key1": "value1"}, } op, err := suite.clientWithoutInactvityCheck.Create(&br) require.NoError(suite.T(), err) ops = append(ops, op...) ovs := ovsType{} op, err = suite.clientWithoutInactvityCheck.WhereCache(func(*ovsType) bool { return true }).Mutate(&ovs, model.Mutation{ Field: &ovs.Bridges, Mutator: ovsdb.MutateOperationInsert, Value: []string{bridgeUUID}, }) require.NoError(suite.T(), err) ops = append(ops, op...) results, err := suite.clientWithoutInactvityCheck.Transact(ctx, ops...) require.NoError(suite.T(), err) _, err = ovsdb.CheckOperationResults(results, ops) require.NoError(suite.T(), err) // find out the real UUIDs port1UUID = results[port1InsertOp].UUID.GoUUID port10UUID = results[port10InsertOp].UUID.GoUUID bridgeUUID = results[bridgeInsertOp].UUID.GoUUID ops = []ovsdb.Operation{} // Do several ops with the bridge in the same transaction br.Ports = []string{port10UUID} br.ExternalIds = map[string]string{"key1": "value1", "key10": "value10"} op, err = suite.clientWithoutInactvityCheck.Where(&br).Update(&br, &br.Ports, &br.ExternalIds) require.NoError(suite.T(), err) ops = append(ops, op...) op, err = suite.clientWithoutInactvityCheck.Where(&br).Mutate(&br, model.Mutation{ Field: &br.ExternalIds, Mutator: ovsdb.MutateOperationInsert, Value: map[string]string{"keyA": "valueA"}, }, model.Mutation{ Field: &br.Ports, Mutator: ovsdb.MutateOperationInsert, Value: []string{port1UUID}, }, ) require.NoError(suite.T(), err) ops = append(ops, op...) op, err = suite.clientWithoutInactvityCheck.Where(&br).Mutate(&br, model.Mutation{ Field: &br.ExternalIds, Mutator: ovsdb.MutateOperationDelete, Value: map[string]string{"key10": "value10"}, }, model.Mutation{ Field: &br.Ports, Mutator: ovsdb.MutateOperationDelete, Value: []string{port10UUID}, }, ) require.NoError(suite.T(), err) ops = append(ops, op...) datapathID = "datapathID_updated" op, err = suite.clientWithoutInactvityCheck.Where(&br).Update(&br, &br.DatapathID) require.NoError(suite.T(), err) ops = append(ops, op...) br.DatapathID = nil op, err = suite.clientWithoutInactvityCheck.Where(&br).Update(&br, &br.DatapathID) require.NoError(suite.T(), err) ops = append(ops, op...) results, err = suite.clientWithoutInactvityCheck.Transact(ctx, ops...) require.NoError(suite.T(), err) errors, err := ovsdb.CheckOperationResults(results, ops) require.NoError(suite.T(), err) require.Nil(suite.T(), errors) require.Len(suite.T(), results, len(ops)) br = bridgeType{ UUID: bridgeUUID, } err = suite.clientWithoutInactvityCheck.Get(ctx, &br) require.NoError(suite.T(), err) require.Equal(suite.T(), []string{port1UUID}, br.Ports) require.Equal(suite.T(), map[string]string{"key1": "value1", "keyA": "valueA"}, br.ExternalIds) require.Nil(suite.T(), br.DatapathID) } func (suite *OVSIntegrationSuite) TestReferentialIntegrity() { t := suite.Suite.T() ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() // fetch the OVS UUID var ovs []*ovsType err := suite.clientWithoutInactvityCheck.WhereCache(func(*ovsType) bool { return true }).List(ctx, &ovs) require.NoError(t, err) require.Len(t, ovs, 1) // UUIDs to use throughout the tests ovsUUID := ovs[0].UUID bridgeUUID := uuid.New().String() port1UUID := uuid.New().String() port2UUID := uuid.New().String() interfaceUUID := uuid.New().String() mirrorUUID := uuid.New().String() // monitor additional table specific to this test _, err = suite.clientWithoutInactvityCheck.Monitor(ctx, suite.clientWithoutInactvityCheck.NewMonitor( client.WithTable(&portType{}), client.WithTable(&interfaceType{}), client.WithTable(&mirrorType{}), ), ) require.NoError(t, err) // the test adds an additional op to initialOps to set a reference to // the bridge in OVS table // the test deletes expectModels at the end tests := []struct { name string initialOps []ovsdb.Operation testOps func(client.Client) ([]ovsdb.Operation, error) expectModels []model.Model dontExpectModels []model.Model expectErr bool }{ { name: "strong reference is garbage collected", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: interfaceUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Interface", UUID: interfaceUUID, Row: ovsdb.Row{ "name": interfaceUUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // remove the mirror reference b := &bridgeType{UUID: bridgeUUID} return c.Where(b).Update(b, &b.Mirrors) }, expectModels: []model.Model{ &bridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port1UUID}}, &portType{UUID: port1UUID, Name: port1UUID, Interfaces: []string{interfaceUUID}}, &interfaceType{UUID: interfaceUUID, Name: interfaceUUID}, }, dontExpectModels: []model.Model{ // mirror should have been garbage collected &mirrorType{UUID: mirrorUUID}, }, }, { name: "adding non-root row that is not strongly reference is a noop", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add a mirror m := &mirrorType{UUID: mirrorUUID, Name: mirrorUUID} return c.Create(m) }, expectModels: []model.Model{ &bridgeType{UUID: bridgeUUID, Name: bridgeUUID}, }, dontExpectModels: []model.Model{ // mirror should have not been added as is not referenced from anywhere &mirrorType{UUID: mirrorUUID}, }, }, { name: "adding non-existent strong reference fails", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add a mirror b := &bridgeType{UUID: bridgeUUID, Mirrors: []string{mirrorUUID}} return c.Where(b).Update(b, &b.Mirrors) }, expectModels: []model.Model{ &bridgeType{UUID: bridgeUUID, Name: bridgeUUID}, }, expectErr: true, }, { name: "weak reference is garbage collected", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}, ovsdb.UUID{GoUUID: port2UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: interfaceUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port2UUID, Row: ovsdb.Row{ "name": port2UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: interfaceUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Interface", UUID: interfaceUUID, Row: ovsdb.Row{ "name": interfaceUUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}, ovsdb.UUID{GoUUID: port2UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // remove port1 b := &bridgeType{UUID: bridgeUUID, Ports: []string{port2UUID}} return c.Where(b).Update(b, &b.Ports) }, expectModels: []model.Model{ &bridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port2UUID}, Mirrors: []string{mirrorUUID}}, &portType{UUID: port2UUID, Name: port2UUID, Interfaces: []string{interfaceUUID}}, // mirror reference to port1 should have been garbage collected &mirrorType{UUID: mirrorUUID, Name: mirrorUUID, SelectSrcPort: []string{port2UUID}}, }, dontExpectModels: []model.Model{ &portType{UUID: port1UUID}, }, }, { name: "adding a weak reference to a non-existent row is a noop", initialOps: []ovsdb.Operation{ { Op: ovsdb.OperationInsert, Table: "Bridge", UUID: bridgeUUID, Row: ovsdb.Row{ "name": bridgeUUID, "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, "mirrors": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: mirrorUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Port", UUID: port1UUID, Row: ovsdb.Row{ "name": port1UUID, "interfaces": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: interfaceUUID}}}, }, }, { Op: ovsdb.OperationInsert, Table: "Interface", UUID: interfaceUUID, Row: ovsdb.Row{ "name": interfaceUUID, }, }, { Op: ovsdb.OperationInsert, Table: "Mirror", UUID: mirrorUUID, Row: ovsdb.Row{ "name": mirrorUUID, "select_src_port": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: port1UUID}}}, }, }, }, testOps: func(c client.Client) ([]ovsdb.Operation, error) { // add reference to non-existent port2 m := &mirrorType{UUID: mirrorUUID, SelectSrcPort: []string{port1UUID, port2UUID}} return c.Where(m).Update(m, &m.SelectSrcPort) }, expectModels: []model.Model{ &bridgeType{UUID: bridgeUUID, Name: bridgeUUID, Ports: []string{port1UUID}, Mirrors: []string{mirrorUUID}}, &portType{UUID: port1UUID, Name: port1UUID, Interfaces: []string{interfaceUUID}}, &interfaceType{UUID: interfaceUUID, Name: interfaceUUID}, // mirror reference to port2 should have been garbage collected resulting in noop &mirrorType{UUID: mirrorUUID, Name: mirrorUUID, SelectSrcPort: []string{port1UUID}}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := suite.clientWithoutInactvityCheck // add the bridge reference to the initial ops ops := append(tt.initialOps, ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: "Open_vSwitch", Mutations: []ovsdb.Mutation{ { Mutator: ovsdb.MutateOperationInsert, Column: "bridges", Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: bridgeUUID}}}, }, }, Where: []ovsdb.Condition{ { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: ovsUUID}, }, }, }) results, err := c.Transact(ctx, ops...) require.NoError(t, err) require.Len(t, results, len(ops)) errors, err := ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) require.NoError(t, err) ops, err = tt.testOps(c) require.NoError(t, err) results, err = c.Transact(ctx, ops...) require.NoError(t, err) errors, err = ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) if tt.expectErr { require.Error(t, err) } else { require.NoError(t, err) } for _, m := range tt.expectModels { actual := model.Clone(m) err := c.Get(ctx, actual) require.NoError(t, err, "when expecting model %v", m) require.Equal(t, m, actual) } for _, m := range tt.dontExpectModels { err := c.Get(ctx, m) require.ErrorIs(t, err, client.ErrNotFound, "when expecting model %v", m) } ops = []ovsdb.Operation{} for _, m := range tt.expectModels { op, err := c.Where(m).Delete() require.NoError(t, err) require.Len(t, op, 1) ops = append(ops, op...) } // remove the bridge reference ops = append(ops, ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: "Open_vSwitch", Mutations: []ovsdb.Mutation{ { Mutator: ovsdb.MutateOperationDelete, Column: "bridges", Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: bridgeUUID}}}, }, }, Where: []ovsdb.Condition{ { Column: "_uuid", Function: ovsdb.ConditionEqual, Value: ovsdb.UUID{GoUUID: ovsUUID}, }, }, }) results, err = c.Transact(context.Background(), ops...) require.NoError(t, err) require.Len(t, results, len(ops)) errors, err = ovsdb.CheckOperationResults(results, ops) require.Nil(t, errors) require.NoError(t, err) }) } } golang-github-ovn-org-libovsdb-0.7.0/test/test_data.go000066400000000000000000000167501464501522100227230ustar00rootroot00000000000000package test import ( "encoding/json" "fmt" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // Note that this schema is not strictly a subset of the real OVS schema. It has // some small variations that allow to effectively test some OVSDB RFC features const schema = ` { "name": "Open_vSwitch", "version": "0.0.1", "tables": { "Open_vSwitch": { "columns": { "manager_options": { "type": { "key": { "type": "uuid", "refTable": "Manager" }, "min": 0, "max": "unlimited" } }, "bridges": { "type": { "key": { "type": "uuid" }, "min": 0, "max": "unlimited" } } }, "isRoot": true, "maxRows": 1 }, "Bridge": { "columns": { "name": { "type": "string", "mutable": false }, "datapath_type": { "type": "string" }, "datapath_id": { "type": { "key": "string", "min": 0, "max": 1 }, "ephemeral": true }, "ports": { "type": { "key": { "type": "uuid" }, "min": 0, "max": "unlimited" } }, "mirrors": { "type": { "key": { "type": "uuid", "refTable": "Mirror" }, "min": 0, "max": "unlimited" } }, "status": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" }, "ephemeral": true }, "other_config": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } }, "external_ids": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } } }, "isRoot": true, "indexes": [ [ "name" ] ] }, "Flow_Sample_Collector_Set": { "columns": { "id": { "type": { "key": { "type": "integer", "minInteger": 0, "maxInteger": 4294967295 }, "min": 1, "max": 1 } }, "bridge": { "type": { "key": { "type": "uuid" }, "min": 1, "max": 1 } }, "external_ids": { "type": { "key": "string", "value": "string", "min": 0, "max": "unlimited" } } }, "isRoot": true, "indexes": [ [ "id", "bridge" ] ] }, "Manager": { "columns": { "target": { "type": "string" } }, "indexes": [["target"]] }, "Mirror": { "columns": { "name": { "type": "string" }, "select_src_port": { "type": { "key": { "type": "uuid", "refTable": "Port", "refType": "weak" }, "min": 1, "max": "unlimited" } } } }, "Port": { "columns": { "name": { "type": "string", "mutable": false } }, "isRoot": true, "indexes": [["name"]] } } } ` // BridgeType is the simplified ORM model of the Bridge table type BridgeType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` DatapathType string `ovsdb:"datapath_type"` DatapathID *string `ovsdb:"datapath_id"` OtherConfig map[string]string `ovsdb:"other_config"` ExternalIds map[string]string `ovsdb:"external_ids"` Ports []string `ovsdb:"ports"` Status map[string]string `ovsdb:"status"` Mirrors []string `ovsdb:"mirrors"` } // OvsType is the simplified ORM model of the Bridge table type OvsType struct { UUID string `ovsdb:"_uuid"` Bridges []string `ovsdb:"bridges"` ManagerOptions []string `ovsdb:"manager_options"` } type FlowSampleCollectorSetType struct { UUID string `ovsdb:"_uuid"` Bridge string `ovsdb:"bridge"` ExternalIDs map[string]string `ovsdb:"external_ids"` ID int `ovsdb:"id"` IPFIX *string // `ovsdb:"ipfix"` } type ManagerType struct { UUID string `ovsdb:"_uuid"` Target string `ovsdb:"target"` } type PortType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` } type MirrorType struct { UUID string `ovsdb:"_uuid"` Name string `ovsdb:"name"` SelectSrcPort []string `ovsdb:"select_src_port"` } func GetModel() (model.DatabaseModel, error) { client, err := model.NewClientDBModel( "Open_vSwitch", map[string]model.Model{ "Open_vSwitch": &OvsType{}, "Bridge": &BridgeType{}, "Flow_Sample_Collector_Set": &FlowSampleCollectorSetType{}, "Manager": &ManagerType{}, "Mirror": &MirrorType{}, "Port": &PortType{}, }, ) if err != nil { return model.DatabaseModel{}, err } schema, err := GetSchema() if err != nil { return model.DatabaseModel{}, err } dbModel, errs := model.NewDatabaseModel(schema, client) if len(errs) > 0 { return model.DatabaseModel{}, fmt.Errorf("errors build model: %v", errs) } return dbModel, nil } func GetSchema() (ovsdb.DatabaseSchema, error) { var dbSchema ovsdb.DatabaseSchema err := json.Unmarshal([]byte(schema), &dbSchema) return dbSchema, err } golang-github-ovn-org-libovsdb-0.7.0/updates/000077500000000000000000000000001464501522100211015ustar00rootroot00000000000000golang-github-ovn-org-libovsdb-0.7.0/updates/difference.go000066400000000000000000000170671464501522100235350ustar00rootroot00000000000000package updates import "reflect" // difference between value 'a' and value 'b'. // This difference is calculated as described in // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The result is calculated in 'a' in-place and returned unless the // difference is 'b' in which case 'b' is returned unmodified. Also returns a // boolean indicating if there is an actual difference. func difference(a, b interface{}) (interface{}, bool) { return mergeDifference(nil, a, b) } // applyDifference returns the result of applying difference 'd' to value 'v' // along with a boolean indicating if 'v' was changed. func applyDifference(v, d interface{}) (interface{}, bool) { if d == nil { return v, false } // difference can be applied with the same algorithm used to calculate it // f(x,f(x,y))=y result, changed := difference(v, d) dv := reflect.ValueOf(d) switch dv.Kind() { case reflect.Slice: fallthrough case reflect.Map: // but we need to tweak the interpretation of change for map and slices: // when there is no difference between the value and non-empty delta, it // actually means the value needs to be emptied so there is actually a // change if !changed && dv.Len() > 0 { return result, true } // there are no changes when delta is empty return result, changed && dv.Len() > 0 } return result, changed } // mergeDifference, given an original value 'o' and two differences 'a' and 'b', // returns a new equivalent difference that when applied on 'o' it would have // the same result as applying 'a' and 'b' consecutively. // If 'o' is nil, returns the difference between 'a' and 'b'. // This difference is calculated as described in // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The result is calculated in 'a' in-place and returned unless the result is // 'b' in which case 'b' is returned unmodified. Also returns a boolean // indicating if there is an actual difference. func mergeDifference(o, a, b interface{}) (interface{}, bool) { kind := reflect.ValueOf(b).Kind() if kind == reflect.Invalid { kind = reflect.ValueOf(a).Kind() } switch kind { case reflect.Invalid: return nil, false case reflect.Slice: // set differences are transitive return setDifference(a, b) case reflect.Map: return mergeMapDifference(o, a, b) case reflect.Array: panic("Not implemented") default: return mergeAtomicDifference(o, a, b) } } // setDifference calculates the difference between set 'a' and set 'b'. // This difference is calculated as described in // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The result is calculated in 'a' in-place and returned unless the difference // is 'b' in which case 'b' is returned unmodified. Also returns a boolean // indicating if there is an actual difference. func setDifference(a, b interface{}) (interface{}, bool) { av := reflect.ValueOf(a) bv := reflect.ValueOf(b) if !av.IsValid() && !bv.IsValid() { return nil, false } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { return b, bv.Len() != 0 } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { return a, av.Len() != 0 } // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The difference between two sets are all elements that only belong to one // of the sets. difference := make(map[interface{}]struct{}, bv.Len()) for i := 0; i < bv.Len(); i++ { // supossedly we are working with comparable atomic types with no // pointers so we can use the values as map key difference[bv.Index(i).Interface()] = struct{}{} } j := av.Len() for i := 0; i < j; { vv := av.Index(i) vi := vv.Interface() if _, ok := difference[vi]; ok { // this value of 'a' is in 'b', so remove it from 'a'; to do that, // overwrite it with the last value and re-evaluate vv.Set(av.Index(j - 1)) // decrease where the last 'a' value is at j-- // remove from 'b' values delete(difference, vi) } else { // this value of 'a' is not in 'b', evaluate the next value i++ } } // trim the slice to the actual values held av = av.Slice(0, j) for item := range difference { // this value of 'b' is not in 'a', so add it av = reflect.Append(av, reflect.ValueOf(item)) } if av.Len() == 0 { return reflect.Zero(av.Type()).Interface(), false } return av.Interface(), true } // mergeMapDifference, given an original map 'o' and two differences 'a' and // 'b', returns a new equivalent difference that when applied on 'o' it would // have the same result as applying 'a' and 'b' consecutively. // If 'o' is nil, returns the difference between 'a' and 'b'. // This difference is calculated as described in // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The result is calculated in 'a' in-place and returned unless the result is // 'b' in which case 'b' is returned unmodified. // Returns a boolean indicating if there is an actual difference. func mergeMapDifference(o, a, b interface{}) (interface{}, bool) { av := reflect.ValueOf(a) bv := reflect.ValueOf(b) if !av.IsValid() && !bv.IsValid() { return nil, false } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { return b, bv.Len() != 0 } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { return a, av.Len() != 0 } ov := reflect.ValueOf(o) if !ov.IsValid() { ov = reflect.Zero(av.Type()) } // From // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // The difference between two maps are all key-value pairs whose keys // appears in only one of the maps, plus the key-value pairs whose keys // appear in both maps but with different values. For the latter elements, // includes the value from the new column. // We can assume that difference is a transitive operation so we calculate // the difference between 'a' and 'b' but we need to handle exceptions when // the same key is present in all values. for i := bv.MapRange(); i.Next(); { kv := i.Key() bvv := i.Value() avv := av.MapIndex(kv) ovv := ov.MapIndex(kv) // supossedly we are working with comparable types with no pointers so // we can compare directly here switch { case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface(): // key is present in the three values // final result would restore key to the original value, delete from 'a' av.SetMapIndex(kv, reflect.Value{}) case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface(): // key is present in the three values // final result would remove key, set in 'a' with 'o' value av.SetMapIndex(kv, ovv) case avv.IsValid() && avv.Interface() == bvv.Interface(): // key/value is in 'a' and 'b', delete from 'a' av.SetMapIndex(kv, reflect.Value{}) default: // key/value in 'b' is not in 'a', set in 'a' with 'b' value av.SetMapIndex(kv, bvv) } } if av.Len() == 0 { return reflect.Zero(av.Type()).Interface(), false } return av.Interface(), true } // mergeAtomicDifference, given an original atomic value 'o' and two differences // 'a' and 'b', returns a new equivalent difference that when applied on 'o' it // would have the same result as applying 'a' and 'b' consecutively. // If 'o' is nil, returns the difference between 'a' and 'b'. // This difference is calculated as described in // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification // Returns a boolean indicating if there is an actual difference. func mergeAtomicDifference(o, a, b interface{}) (interface{}, bool) { if o != nil { return b, !reflect.DeepEqual(o, b) } return b, !reflect.DeepEqual(a, b) } golang-github-ovn-org-libovsdb-0.7.0/updates/difference_test.go000066400000000000000000000164251464501522100245710ustar00rootroot00000000000000package updates import ( "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" ) func TestDifference(t *testing.T) { foo := "foo" bar := "bar" var null *string var nilMap map[string]string var nilSet []string tests := []struct { name string a interface{} b interface{} expected interface{} }{ { "value, different", "foo", "bar", "bar", }, { "value, equal", "foo", "foo", "foo", }, { "pointer, different", &foo, &bar, &bar, }, { "pointer, equal", &foo, &foo, &foo, }, { "pointer, nil", &foo, null, null, }, { "set, single element, different", []string{"foo"}, []string{"bar"}, []string{"foo", "bar"}, }, { "set, single element, equal", []string{"foo"}, []string{"foo"}, nilSet, }, { "set, different last element", []string{"foo", "bar"}, []string{"foo", "foobar"}, []string{"bar", "foobar"}, }, { "set, different first element", []string{"foo", "bar"}, []string{"foobar", "bar"}, []string{"foo", "foobar"}, }, { "set, multiple elements different", []string{"foo", "bar", "foobar", "baz"}, []string{"qux", "foo", "quux", "baz", "waldo"}, []string{"bar", "foobar", "qux", "quux", "waldo"}, }, { "set, all elements different", []string{"foo", "bar", "foobar", "baz"}, []string{"qux", "quux", "fred", "waldo"}, []string{"foo", "bar", "foobar", "baz", "qux", "quux", "fred", "waldo"}, }, { "set, multiple elements equal", []string{"foo", "bar"}, []string{"foo", "bar"}, nilSet, }, { "map, different", map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo"}, map[string]string{"bar": "baz", "qux": "fred", "foobar": "foobar"}, map[string]string{"foo": "bar", "qux": "fred", "foobar": "foobar"}, }, { "map, equal", map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo"}, map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo"}, nilMap, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff, _ := difference(tt.a, tt.b) switch v := tt.expected.(type) { case []string: if v != nil { assert.ElementsMatch(t, tt.expected, v) return } assert.Equal(t, tt.expected, diff) default: assert.Equal(t, tt.expected, diff) } }) } } func BenchmarkSetDifference(t *testing.B) { l := 57000 c, a := make([]string, l), make([]string, l) for i := 0; i < l; i++ { c[i] = fmt.Sprintf("foo%d", i) } b := []string{"bar", c[1000], c[20000], "foobar", c[55000], "baz"} t.StopTimer() t.ResetTimer() for n := 0; n < t.N; n++ { copy(a, c) t.StartTimer() setDifference(a, b) t.StopTimer() } } func Test_applyDifference(t *testing.T) { type args struct { v interface{} d interface{} } tests := []struct { name string args args expected interface{} changed bool }{ { name: "atomic, apply difference changes value", args: args{ v: "foo", d: "bar", }, expected: "bar", changed: true, }, { name: "atomic, apply difference does not change value", args: args{ v: "foo", d: "foo", }, expected: "foo", changed: false, }, { name: "set, apply difference changes value", args: args{ v: []string{"foo"}, d: []string{"bar"}, }, expected: []string{"foo", "bar"}, changed: true, }, { name: "set, apply difference empties value", args: args{ v: []string{"foo"}, d: []string{"foo"}, }, expected: reflect.Zero(reflect.TypeOf([]string{})).Interface(), changed: true, }, { name: "set, apply empty difference", args: args{ v: []string{"foo"}, d: []string{}, }, expected: []string{"foo"}, changed: false, }, { name: "map, apply difference changes value", args: args{ v: map[string]string{"foo": "bar"}, d: map[string]string{"fred": "waldo"}, }, expected: map[string]string{"foo": "bar", "fred": "waldo"}, changed: true, }, { name: "map, apply difference empties value", args: args{ v: map[string]string{"foo": "bar"}, d: map[string]string{"foo": "bar"}, }, expected: reflect.Zero(reflect.TypeOf(map[string]string{})).Interface(), changed: true, }, { name: "map, apply empty difference", args: args{ v: map[string]string{"foo": "bar"}, d: map[string]string{}, }, expected: map[string]string{"foo": "bar"}, changed: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, changed := applyDifference(tt.args.v, tt.args.d) switch tt.expected.(type) { case []string: assert.ElementsMatch(t, tt.expected, result) default: assert.Equal(t, tt.expected, result) } assert.Equal(t, tt.changed, changed) }) } } func Test_mergeMapDifference(t *testing.T) { type args struct { o interface{} a interface{} b interface{} } tests := []struct { name string args args expected interface{} changed bool }{ { name: "original nil", args: args{ a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "foobar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred", "foobar": "foobar"}, changed: true, }, { name: "original empty", args: args{ o: map[string]string{}, a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "foobar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred", "foobar": "foobar"}, changed: true, }, { name: "key value updated back to the original value", args: args{ o: map[string]string{"foobar": "foobar"}, a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo", "foobar": "bar"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "foobar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred"}, changed: true, }, { name: "key value updated and then removed", args: args{ o: map[string]string{"foobar": "foobar"}, a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo", "foobar": "bar"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "bar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred", "foobar": "foobar"}, changed: true, }, { name: "key value removed and then added to the original value", args: args{ o: map[string]string{"foobar": "foobar"}, a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo", "foobar": "foobar"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "foobar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred"}, changed: true, }, { name: "key removed and then added to a different value", args: args{ o: map[string]string{"foobar": "foobar"}, a: map[string]string{"foo": "bar", "bar": "baz", "qux": "waldo", "foobar": "foobar"}, b: map[string]string{"bar": "baz", "qux": "fred", "foobar": "bar"}, }, expected: map[string]string{"foo": "bar", "qux": "fred", "foobar": "bar"}, changed: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, changed := mergeMapDifference(tt.args.o, tt.args.a, tt.args.b) assert.Equal(t, tt.expected, result) assert.Equal(t, tt.changed, changed) }) } } golang-github-ovn-org-libovsdb-0.7.0/updates/doc.go000066400000000000000000000011371464501522100221770ustar00rootroot00000000000000/* Package updates provides an utility to perform and aggregate model updates. As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via the corresponding Add methods. As output, it supports both OVSDB RowUpdate2 as well as model notation via the corresponding ForEach iterative methods. Several updates can be added and will be merged with any previous updates even if they are for the same model. If several updates for the same model are aggregated, the user is responsible that the provided model to be updated matches the updated model of the previous update. */ package updates golang-github-ovn-org-libovsdb-0.7.0/updates/merge.go000066400000000000000000000103341464501522100225300ustar00rootroot00000000000000package updates import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/ovsdb" ) func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { // handle model update switch { case b.old == nil && b.new == nil: // noop case a.old == nil && a.new == nil: // first op a.old = b.old a.new = b.new case a.new != nil && b.old != nil && b.new != nil: // update after an insert or an update a.new = b.new case b.old != nil && b.new == nil: // a final delete a.new = nil default: return modelUpdate{}, fmt.Errorf("sequence of updates not supported") } // handle row update ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2) if err != nil { return modelUpdate{}, err } if ru2 == nil { return modelUpdate{}, nil } a.rowUpdate2 = ru2 return a, nil } func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) { switch { case b == nil: // noop case a == nil: // first op a = b case a.Insert != nil && b.Modify != nil: // update after an insert a.New = b.New a.Insert = b.New case a.Modify != nil && b.Modify != nil: // update after update a.New = b.New a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify) if a.Modify == nil { // we merged two modifications that brought back the row to its // original value which is a no op a = nil } case a.Insert != nil && b.Delete != nil: // delete after insert a = nil case b.Delete != nil: // a final delete a.Initial = nil a.Insert = nil a.Modify = nil a.New = nil a.Delete = b.Delete default: return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported") } return a, nil } // mergeModifyRow merges two modification rows 'a' and 'b' with respect an // original row 'o'. Two modifications that restore the original value cancel // each other and won't be included in the result. Returns nil if there are no // resulting modifications. func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row { original := *o aMod := *a bMod := *b for k, v := range bMod { if _, ok := aMod[k]; !ok { aMod[k] = v continue } var result interface{} var changed bool // handle maps or sets first switch v.(type) { // difference only supports set or map values that are comparable with // no pointers. This should be currently fine because the set or map // values should only be non pointer atomic types or the UUID struct. case ovsdb.OvsSet: aSet := aMod[k].(ovsdb.OvsSet) bSet := v.(ovsdb.OvsSet) // handle sets of multiple values, single value sets are handled as // atomic values if ts.Column(k).TypeObj.Max() != 1 { // set difference is a fully transitive operation so we dont // need to do anything special to merge two differences result, changed = setDifference(aSet.GoSet, bSet.GoSet) result = ovsdb.OvsSet{GoSet: result.([]interface{})} } case ovsdb.OvsMap: aMap := aMod[k].(ovsdb.OvsMap) bMap := v.(ovsdb.OvsMap) var originalMap ovsdb.OvsMap if v, ok := original[k]; ok { originalMap = v.(ovsdb.OvsMap) } // map difference is not transitive with respect to the original // value so we have to take the original value into account when // merging result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap) result = ovsdb.OvsMap{GoMap: result.(map[interface{}]interface{})} } // was neither a map nor a set if result == nil { // atomic difference is not transitive with respect to the original // value so we have to take the original value into account when // merging o := original[k] if o == nil { // assume zero value if original does not have the column o = reflect.Zero(reflect.TypeOf(v)).Interface() } if set, ok := o.(ovsdb.OvsSet); ok { // atomic optional values are cleared out with an empty set // if the original value was also cleared out, use an empty set // instead of a nil set so that mergeAtomicDifference notices // that we are returning to the original value if set.GoSet == nil { set.GoSet = []interface{}{} } o = set } result, changed = mergeAtomicDifference(o, aMod[k], v) } if !changed { delete(aMod, k) continue } aMod[k] = result } if len(aMod) == 0 { return nil } return a } golang-github-ovn-org-libovsdb-0.7.0/updates/merge_test.go000066400000000000000000000624331464501522100235760ustar00rootroot00000000000000package updates import ( "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func Test_merge(t *testing.T) { oldDatapathID := "old" newDatapathID := "new" type args struct { a modelUpdate b modelUpdate } tests := []struct { name string args args want modelUpdate wantErr bool }{ { name: "no op", }, { name: "insert", args: args{ b: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, want: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, { name: "update", args: args{ b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, { name: "delete", args: args{ b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, { name: "no op after insert", args: args{ a: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, want: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, { name: "no op after update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, { name: "no op after delete", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, { name: "insert after insert fails", args: args{ a: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, b: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, wantErr: true, }, { name: "insert after update fails", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, b: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, wantErr: true, }, { name: "insert after delete fails", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, b: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, wantErr: true, }, { name: "update after insert", args: args{ a: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, want: modelUpdate{ new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge2", }, New: &ovsdb.Row{ "name": "bridge2", }, }, }, }, { name: "update after update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge2", }, new: &test.BridgeType{ Name: "bridge3", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge2", }, New: &ovsdb.Row{ "name": "bridge3", }, Modify: &ovsdb.Row{ "name": "bridge3", }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge3", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge3", }, Modify: &ovsdb.Row{ "name": "bridge3", }, }, }, }, { name: "update after delete fails", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge2", }, new: &test.BridgeType{ Name: "bridge3", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge2", }, New: &ovsdb.Row{ "name": "bridge3", }, Modify: &ovsdb.Row{ "name": "bridge3", }, }, }, }, wantErr: true, }, { name: "delete after insert", args: args{ a: modelUpdate{ new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, }, { name: "delete after update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge2", }, Delete: &ovsdb.Row{}, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, { name: "delete after delete", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, { name: "update atomic field to original value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge2", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", "datapath_type": "type", }, Modify: &ovsdb.Row{ "name": "bridge2", "datapath_type": "type", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge2", DatapathType: "type", }, new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge2", "datapath_type": "type", }, New: &ovsdb.Row{ "name": "bridge", }, Modify: &ovsdb.Row{ "name": "bridge", "datapath_type": "", }, }, }, }, }, { name: "update atomic field to same updated value after update results in original update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, Modify: &ovsdb.Row{ "datapath_type": "type", }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathType: "type", }, new: &test.BridgeType{ Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, Modify: &ovsdb.Row{ "datapath_type": "type", }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, Modify: &ovsdb.Row{ "datapath_type": "type", }, }, }, }, { name: "update optional field to same value after update results in original update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, }, { name: "update optional field to original value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &oldDatapathID, }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, new: &test.BridgeType{ Name: "bridge", DatapathID: &oldDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, }, }, }, }, { name: "update optional field to original empty value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{}}, }, }, }, }, }, { name: "update optional field to empty value after update", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &oldDatapathID, }, new: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &newDatapathID, }, new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{}}, }, }, }, }, want: modelUpdate{ old: &test.BridgeType{ Name: "bridge", DatapathID: &oldDatapathID, }, new: &test.BridgeType{ Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge", }, Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{}}, }, }, }, }, { name: "update set field to original value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port2"}, }, new: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port3"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port2"}}, }, New: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port3"}}, }, Modify: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port2", "port3"}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port3"}, }, new: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port2"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port3"}}, }, New: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port2"}}, }, Modify: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{"port2", "port3"}}, }, }, }, }, }, { name: "update map field to original value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", ExternalIds: map[string]string{"key": "value", "key1": "value1", "key2": "value2"}, }, new: &test.BridgeType{ Name: "bridge", ExternalIds: map[string]string{"key": "value1", "key1": "value1", "key3": "value3"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1", "key2": "value2"}}, }, New: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key1": "value1", "key3": "value3"}}, }, Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key2": "value2", "key3": "value3"}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge", ExternalIds: map[string]string{"key": "value1", "key1": "value1", "key3": "value3"}, }, new: &test.BridgeType{ Name: "bridge", ExternalIds: map[string]string{"key": "value", "key1": "value1", "key2": "value2"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key1": "value1", "key3": "value3"}}, }, New: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1", "key2": "value2"}}, }, Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key2": "value2", "key3": "value3"}}, }, }, }, }, }, { name: "update multiple fields to original value after update results in no op", args: args{ a: modelUpdate{ old: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port2"}, ExternalIds: map[string]string{"key": "value", "key1": "value1", "key2": "value2"}, DatapathID: &oldDatapathID, }, new: &test.BridgeType{ Name: "bridge2", Ports: []string{"port1", "port3"}, ExternalIds: map[string]string{"key": "value1", "key1": "value1", "key3": "value3"}, DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port2"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1", "key2": "value2"}}, "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, New: &ovsdb.Row{ "name": "bridge2", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port3"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key1": "value1", "key3": "value3"}}, "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, Modify: &ovsdb.Row{ "name": "bridge2", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port2", "port3"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key2": "value2", "key3": "value3"}}, "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, b: modelUpdate{ old: &test.BridgeType{ Name: "bridge2", Ports: []string{"port1", "port3"}, ExternalIds: map[string]string{"key": "value1", "key1": "value1", "key3": "value3"}, DatapathID: &newDatapathID, }, new: &test.BridgeType{ Name: "bridge", Ports: []string{"port1", "port2"}, ExternalIds: map[string]string{"key": "value", "key1": "value1", "key2": "value2"}, DatapathID: &oldDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge2", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port3"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key1": "value1", "key3": "value3"}}, }, New: &ovsdb.Row{ "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port1", "port2"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1", "key2": "value2"}}, }, Modify: &ovsdb.Row{ "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{"port2", "port3"}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key2": "value2", "key3": "value3"}}, "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dbModel, err := test.GetModel() require.NoError(t, err) ts := dbModel.Schema.Table("Bridge") got, err := merge(ts, tt.args.a, tt.args.b) if tt.wantErr { assert.Error(t, err) return } assert.NoError(t, err) assert.Equal(t, tt.want, got) }) } } golang-github-ovn-org-libovsdb-0.7.0/updates/mutate.go000066400000000000000000000147471464501522100227440ustar00rootroot00000000000000package updates import ( "reflect" "github.com/ovn-org/libovsdb/ovsdb" ) func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { for i := 0; i < a.Len(); i++ { if a.Index(i).Interface() == b.Interface() { v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len())) return v, true } } return a, false } func insertToSlice(a, b reflect.Value) (reflect.Value, bool) { for i := 0; i < a.Len(); i++ { if a.Index(i).Interface() == b.Interface() { return a, false } } return reflect.Append(a, b), true } func mutate(current interface{}, mutator ovsdb.Mutator, value interface{}) (interface{}, interface{}) { switch current.(type) { case bool, string: return current, value } switch mutator { case ovsdb.MutateOperationInsert: // for insert, the delta will be the new value added return mutateInsert(current, value) case ovsdb.MutateOperationDelete: return mutateDelete(current, value) case ovsdb.MutateOperationAdd: // for add, the delta is the new value new := mutateAdd(current, value) return new, new case ovsdb.MutateOperationSubtract: // for subtract, the delta is the new value new := mutateSubtract(current, value) return new, new case ovsdb.MutateOperationMultiply: new := mutateMultiply(current, value) return new, new case ovsdb.MutateOperationDivide: new := mutateDivide(current, value) return new, new case ovsdb.MutateOperationModulo: new := mutateModulo(current, value) return new, new } return current, value } func mutateInsert(current, value interface{}) (interface{}, interface{}) { switch current.(type) { case int, float64: return current, current } vc := reflect.ValueOf(current) vv := reflect.ValueOf(value) if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { v, ok := insertToSlice(vc, vv) var diff interface{} if ok { diff = value } return v.Interface(), diff } if !vc.IsValid() { if vv.IsValid() { return vv.Interface(), vv.Interface() } return nil, nil } if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { v := vc diff := reflect.Indirect(reflect.New(vv.Type())) for i := 0; i < vv.Len(); i++ { var ok bool v, ok = insertToSlice(v, vv.Index(i)) if ok { diff = reflect.Append(diff, vv.Index(i)) } } if diff.Len() > 0 { return v.Interface(), diff.Interface() } return v.Interface(), nil } if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { if vc.IsNil() && vv.Len() > 0 { return value, value } diff := reflect.MakeMap(vc.Type()) iter := vv.MapRange() for iter.Next() { k := iter.Key() if !vc.MapIndex(k).IsValid() { vc.SetMapIndex(k, iter.Value()) diff.SetMapIndex(k, iter.Value()) } } if diff.Len() > 0 { return current, diff.Interface() } return current, nil } return current, nil } func mutateDelete(current, value interface{}) (interface{}, interface{}) { switch current.(type) { case int, float64: return current, nil } vc := reflect.ValueOf(current) vv := reflect.ValueOf(value) if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { v, ok := removeFromSlice(vc, vv) diff := value if !ok { diff = nil } return v.Interface(), diff } if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { v := vc diff := reflect.Indirect(reflect.New(vv.Type())) for i := 0; i < vv.Len(); i++ { var ok bool v, ok = removeFromSlice(v, vv.Index(i)) if ok { diff = reflect.Append(diff, vv.Index(i)) } } if diff.Len() > 0 { return v.Interface(), diff.Interface() } return v.Interface(), nil } if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) { diff := reflect.MakeMap(vc.Type()) for i := 0; i < vv.Len(); i++ { if vc.MapIndex(vv.Index(i)).IsValid() { diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i))) vc.SetMapIndex(vv.Index(i), reflect.Value{}) } } if diff.Len() > 0 { return current, diff.Interface() } return current, nil } if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { diff := reflect.MakeMap(vc.Type()) iter := vv.MapRange() for iter.Next() { vvk := iter.Key() vvv := iter.Value() vcv := vc.MapIndex(vvk) if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) { diff.SetMapIndex(vvk, vcv) vc.SetMapIndex(vvk, reflect.Value{}) } } if diff.Len() > 0 { return current, diff.Interface() } return current, nil } return current, nil } func mutateAdd(current, value interface{}) interface{} { if i, ok := current.(int); ok { v := value.(int) return i + v } if i, ok := current.(float64); ok { v := value.(float64) return i + v } if is, ok := current.([]int); ok { v := value.(int) for i, j := range is { is[i] = j + v } return is } if is, ok := current.([]float64); ok { v := value.(float64) for i, j := range is { is[i] = j + v } return is } return current } func mutateSubtract(current, value interface{}) interface{} { if i, ok := current.(int); ok { v := value.(int) return i - v } if i, ok := current.(float64); ok { v := value.(float64) return i - v } if is, ok := current.([]int); ok { v := value.(int) for i, j := range is { is[i] = j - v } return is } if is, ok := current.([]float64); ok { v := value.(float64) for i, j := range is { is[i] = j - v } return is } return current } func mutateMultiply(current, value interface{}) interface{} { if i, ok := current.(int); ok { v := value.(int) return i * v } if i, ok := current.(float64); ok { v := value.(float64) return i * v } if is, ok := current.([]int); ok { v := value.(int) for i, j := range is { is[i] = j * v } return is } if is, ok := current.([]float64); ok { v := value.(float64) for i, j := range is { is[i] = j * v } return is } return current } func mutateDivide(current, value interface{}) interface{} { if i, ok := current.(int); ok { v := value.(int) return i / v } if i, ok := current.(float64); ok { v := value.(float64) return i / v } if is, ok := current.([]int); ok { v := value.(int) for i, j := range is { is[i] = j / v } return is } if is, ok := current.([]float64); ok { v := value.(float64) for i, j := range is { is[i] = j / v } return is } return current } func mutateModulo(current, value interface{}) interface{} { if i, ok := current.(int); ok { v := value.(int) return i % v } if is, ok := current.([]int); ok { v := value.(int) for i, j := range is { is[i] = j % v } return is } return current } golang-github-ovn-org-libovsdb-0.7.0/updates/mutate_test.go000066400000000000000000000161551464501522100237760ustar00rootroot00000000000000package updates import ( "testing" "github.com/ovn-org/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) func TestMutateAdd(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} }{ { "add int", 1, ovsdb.MutateOperationAdd, 1, 2, }, { "add float", 1.0, ovsdb.MutateOperationAdd, 1.0, 2.0, }, { "add float set", []float64{1.0, 2.0, 3.0}, ovsdb.MutateOperationAdd, 1.0, []float64{2.0, 3.0, 4.0}, }, { "add int set float", []int{1, 2, 3}, ovsdb.MutateOperationAdd, 1, []int{2, 3, 4}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.want, diff) }) } } func TestMutateSubtract(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} }{ { "subtract int", 1, ovsdb.MutateOperationSubtract, 1, 0, }, { "subtract float", 1.0, ovsdb.MutateOperationSubtract, 1.0, 0.0, }, { "subtract float set", []float64{1.0, 2.0, 3.0}, ovsdb.MutateOperationSubtract, 1.0, []float64{0.0, 1.0, 2.0}, }, { "subtract int set", []int{1, 2, 3}, ovsdb.MutateOperationSubtract, 1, []int{0, 1, 2}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.want, diff) }) } } func TestMutateMultiply(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} }{ { "multiply int", 1, ovsdb.MutateOperationMultiply, 2, 2, }, { "multiply float", 1.0, ovsdb.MutateOperationMultiply, 2.0, 2.0, }, { "multiply float set", []float64{1.0, 2.0, 3.0}, ovsdb.MutateOperationMultiply, 2.0, []float64{2.0, 4.0, 6.0}, }, { "multiply int set", []int{1, 2, 3}, ovsdb.MutateOperationMultiply, 2, []int{2, 4, 6}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.want, diff) }) } } func TestMutateDivide(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} }{ { "divide int", 10, ovsdb.MutateOperationDivide, 2, 5, }, { "divide float", 1.0, ovsdb.MutateOperationDivide, 2.0, 0.5, }, { "divide float set", []float64{1.0, 2.0, 4.0}, ovsdb.MutateOperationDivide, 2.0, []float64{0.5, 1.0, 2.0}, }, { "divide int set", []int{10, 20, 30}, ovsdb.MutateOperationDivide, 5, []int{2, 4, 6}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.want, diff) }) } } func TestMutateModulo(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} }{ { "modulo int", 3, ovsdb.MutateOperationModulo, 2, 1, }, { "modulo int set", []int{3, 5, 7}, ovsdb.MutateOperationModulo, 2, []int{1, 1, 1}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.want, diff) }) } } func TestMutateInsert(t *testing.T) { var nilSlice []string var nilMap map[string]string tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} diff interface{} }{ { "insert single string", []string{"foo", "bar"}, ovsdb.MutateOperationInsert, "baz", []string{"foo", "bar", "baz"}, "baz", }, { "insert in to nil value", nil, ovsdb.MutateOperationInsert, []string{"foo"}, []string{"foo"}, []string{"foo"}, }, { "insert in to nil slice", nilSlice, ovsdb.MutateOperationInsert, []string{"foo"}, []string{"foo"}, []string{"foo"}, }, { "insert existing string", []string{"foo", "bar", "baz"}, ovsdb.MutateOperationInsert, "baz", []string{"foo", "bar", "baz"}, nil, }, { "insert multiple string", []string{"foo", "bar"}, ovsdb.MutateOperationInsert, []string{"baz", "quux", "foo"}, []string{"foo", "bar", "baz", "quux"}, []string{"baz", "quux"}, }, { "insert key value pairs", map[string]string{ "foo": "bar", }, ovsdb.MutateOperationInsert, map[string]string{ "foo": "ignored", "baz": "quux", }, map[string]string{ "foo": "bar", "baz": "quux", }, map[string]string{ "baz": "quux", }, }, { "insert key value pairs on nil value", nil, ovsdb.MutateOperationInsert, map[string]string{ "foo": "bar", }, map[string]string{ "foo": "bar", }, map[string]string{ "foo": "bar", }, }, { "insert key value pairs on nil map", nilMap, ovsdb.MutateOperationInsert, map[string]string{ "foo": "bar", }, map[string]string{ "foo": "bar", }, map[string]string{ "foo": "bar", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.diff, diff) }) } } func TestMutateDelete(t *testing.T) { tests := []struct { name string current interface{} mutator ovsdb.Mutator value interface{} want interface{} diff interface{} }{ { "delete single string", []string{"foo", "bar"}, ovsdb.MutateOperationDelete, "bar", []string{"foo"}, "bar", }, { "delete multiple string", []string{"foo", "bar", "baz"}, ovsdb.MutateOperationDelete, []string{"bar", "baz"}, []string{"foo"}, []string{"bar", "baz"}, }, { "delete key value pairs", map[string]string{ "foo": "bar", "baz": "quux", }, ovsdb.MutateOperationDelete, map[string]string{ "foo": "ignored", "baz": "quux", }, map[string]string{ "foo": "bar", }, map[string]string{ "baz": "quux", }, }, { "delete non-existent key value pairs", map[string]string{ "foo": "bar", "baz": "quux", }, ovsdb.MutateOperationDelete, map[string]string{ "key": "value", }, map[string]string{ "foo": "bar", "baz": "quux", }, nil, }, { "delete keys", map[string]string{ "foo": "bar", "baz": "quux", }, ovsdb.MutateOperationDelete, []string{"foo"}, map[string]string{ "baz": "quux", }, map[string]string{ "foo": "bar", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, diff := mutate(tt.current, tt.mutator, tt.value) assert.Equal(t, tt.want, got) assert.Equal(t, tt.diff, diff) }) } } golang-github-ovn-org-libovsdb-0.7.0/updates/references.go000066400000000000000000000565441464501522100235670ustar00rootroot00000000000000package updates import ( "fmt" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) // ReferenceProvider should be implemented by a database that tracks references type ReferenceProvider interface { // GetReferences provides the references to the provided row GetReferences(database, table, uuid string) (database.References, error) // Get provides the corresponding model Get(database, table string, uuid string) (model.Model, error) } // DatabaseUpdate bundles updates together with the updated // reference information type DatabaseUpdate struct { ModelUpdates referenceUpdates database.References } func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error { refsCopy := database.References{} // since refsCopy is empty, this will just copy everything applyReferenceModifications(refsCopy, u.referenceUpdates) return do(refsCopy) } func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate { return DatabaseUpdate{ ModelUpdates: updates, referenceUpdates: references, } } // ProcessReferences tracks referential integrity for the provided set of // updates. It returns an updated set of updates which includes additional // updates and updated references as a result of the reference garbage // collection described in RFC7047. These additional updates resulting from the // reference garbage collection are also returned separately. Any constraint or // referential integrity violation is returned as an error. func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { referenceTracker := newReferenceTracker(dbModel, provider) return referenceTracker.processReferences(updates) } type referenceTracker struct { dbModel model.DatabaseModel provider ReferenceProvider // updates that are being processed updates ModelUpdates // references are the updated references by the set of updates processed references database.References // helper maps to track the rows that we are processing and their tables tracked map[string]string added map[string]string deleted map[string]string } func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker { return &referenceTracker{ dbModel: dbModel, provider: provider, } } func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { rt.updates = updates rt.tracked = make(map[string]string) rt.added = make(map[string]string) rt.deleted = make(map[string]string) rt.references = make(database.References) referenceUpdates, err := rt.processReferencesLoop(updates) if err != nil { return ModelUpdates{}, ModelUpdates{}, nil, err } // merge the updates generated from reference tracking into the main updates err = updates.Merge(rt.dbModel, referenceUpdates) if err != nil { return ModelUpdates{}, ModelUpdates{}, nil, err } return updates, referenceUpdates, rt.references, nil } func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) { referenceUpdates := ModelUpdates{} // references can be transitive and deleting them can lead to further // references having to be removed so loop until there are no updates to be // made for len(updates.updates) > 0 { // update the references from the updates err := rt.processModelUpdates(updates) if err != nil { return ModelUpdates{}, err } // process strong reference integrity updates, err = rt.processStrongReferences() if err != nil { return ModelUpdates{}, err } // process weak reference integrity weakUpdates, err := rt.processWeakReferences() if err != nil { return ModelUpdates{}, err } // merge strong and weak reference updates err = updates.Merge(rt.dbModel, weakUpdates) if err != nil { return ModelUpdates{}, err } // merge updates from this iteration to the overall reference updates err = referenceUpdates.Merge(rt.dbModel, updates) if err != nil { return ModelUpdates{}, err } } return referenceUpdates, nil } // processModelUpdates keeps track of the updated references by a set of updates func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error { tables := updates.GetUpdatedTables() for _, table := range tables { err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { return rt.processRowUpdate(table, uuid, &row) }) if err != nil { return err } } return nil } // processRowUpdate keeps track of the updated references by a given row update func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error { // getReferencesFromRowModify extracts updated references from the // modifications. Following the same strategy as the modify field of Update2 // notification, it will extract a difference, that is, both old removed // references and new added references are extracted. This difference will // then be applied to currently tracked references to come up with the // updated references. // For more info on the modify field of Update2 notification and the // strategy used to apply differences, check // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification var updateRefs database.References switch { case row.Delete != nil: rt.deleted[uuid] = table updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old) case row.Modify != nil: updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old) case row.Insert != nil: if !isRoot(&rt.dbModel, table) { // track rows added that are not part of the root set, we might need // to delete those later rt.added[uuid] = table rt.tracked[uuid] = table } updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil) } // (lazy) initialize existing references to the same rows from the database for spec, refs := range updateRefs { for to := range refs { err := rt.initReferences(spec.ToTable, to) if err != nil { return err } } } // apply the reference modifications to the initialized references applyReferenceModifications(rt.references, updateRefs) return nil } // processStrongReferences adds delete operations for rows that are not part of // the root set and are no longer strongly referenced. Returns a referential // integrity violation if a nonexistent row is strongly referenced or a strongly // referenced row has been deleted. func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) { // make sure that we are tracking the references to the deleted rows err := rt.initReferencesOfDeletedRows() if err != nil { return ModelUpdates{}, err } // track if rows are referenced or not isReferenced := map[string]bool{} // go over the updated references for spec, refs := range rt.references { // we only care about strong references if !isStrong(&rt.dbModel, spec) { continue } for to, from := range refs { // check if the referenced row exists exists, err := rt.rowExists(spec.ToTable, to) if err != nil { return ModelUpdates{}, err } if !exists { for _, uuid := range from { // strong reference to a row that does not exist return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf( "Table %s column %s row %s references nonexistent or deleted row %s in table %s", spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable)) } // we deleted the row ourselves on a previous loop continue } // track if this row is referenced from this location spec isReferenced[to] = isReferenced[to] || len(from) > 0 } } // inserted rows that are unreferenced and not part of the root set will // silently be dropped from the updates for uuid := range rt.added { if isReferenced[uuid] { continue } isReferenced[uuid] = false } // delete rows that are not referenced updates := ModelUpdates{} for uuid, isReferenced := range isReferenced { if isReferenced { // row is still referenced, ignore continue } if rt.deleted[uuid] != "" { // already deleted, ignore continue } table := rt.tracked[uuid] if isRoot(&rt.dbModel, table) { // table is part of the root set, ignore continue } // delete row that is not part of the root set and is no longer // referenced update, err := rt.deleteRow(table, uuid) if err != nil { return ModelUpdates{}, err } err = updates.Merge(rt.dbModel, update) if err != nil { return ModelUpdates{}, err } } return updates, nil } // processWeakReferences deletes weak references to rows that were deleted. // Returns a constraint violation if this results in invalid values func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) { // make sure that we are tracking the references to rows that might have // been deleted as a result of strong reference garbage collection err := rt.initReferencesOfDeletedRows() if err != nil { return ModelUpdates{}, err } tables := map[string]string{} originalRows := map[string]ovsdb.Row{} updatedRows := map[string]ovsdb.Row{} for spec, refs := range rt.references { // fetch some reference information from the schema extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) isEmptyAllowed := minLenAllowed == 0 if refType != ovsdb.Weak { // we only care about weak references continue } for to, from := range refs { if len(from) == 0 { // not referenced from anywhere, ignore continue } // check if the referenced row exists exists, err := rt.rowExists(spec.ToTable, to) if err != nil { return ModelUpdates{}, err } if exists { // we only care about rows that have been deleted or otherwise // don't exist continue } // generate the updates to remove the references to deleted rows for _, uuid := range from { if _, ok := updatedRows[uuid]; !ok { updatedRows[uuid] = ovsdb.NewRow() } if rt.deleted[uuid] != "" { // already deleted, ignore continue } // fetch the original rows if originalRows[uuid] == nil { originalRow, err := rt.getRow(spec.FromTable, uuid) if err != nil { return ModelUpdates{}, err } if originalRow == nil { return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid) } originalRows[uuid] = *originalRow } var becomesLen int switch extendedType { case ovsdb.TypeMap: // a map referencing the row // generate the mutation to remove the entry form the map originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap var mutationMap map[interface{}]interface{} value, ok := updatedRows[uuid][spec.FromColumn] if !ok { mutationMap = map[interface{}]interface{}{} } else { mutationMap = value.(ovsdb.OvsMap).GoMap } // copy the map entries referencing the row from the original map mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to}) // track the new length of the map if !isEmptyAllowed { becomesLen = len(originalMap) - len(mutationMap) } updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap} case ovsdb.TypeSet: // a set referencing the row // generate the mutation to remove the entry form the set var mutationSet []interface{} value, ok := updatedRows[uuid][spec.FromColumn] if !ok { mutationSet = []interface{}{} } else { mutationSet = value.(ovsdb.OvsSet).GoSet } mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to}) // track the new length of the set if !isEmptyAllowed { originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet becomesLen = len(originalSet) - len(mutationSet) } updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet} case ovsdb.TypeUUID: // this is an atomic UUID value that needs to be cleared updatedRows[uuid][spec.FromColumn] = nil becomesLen = 0 } if becomesLen < minLenAllowed { return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf( "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+ "row %s caused this column to have an invalid length.", spec.FromColumn, spec.FromTable, uuid)) } // track the table of the row we are going to update tables[uuid] = spec.FromTable } } } // process the updates updates := ModelUpdates{} for uuid, rowUpdate := range updatedRows { update, err := rt.updateRow(tables[uuid], uuid, rowUpdate) if err != nil { return ModelUpdates{}, err } err = updates.Merge(rt.dbModel, update) if err != nil { return ModelUpdates{}, err } } return updates, nil } func copyMapKeyValues(from, to map[interface{}]interface{}, isKey bool, keyValue ovsdb.UUID) map[interface{}]interface{} { if isKey { to[keyValue] = from[keyValue] return to } for key, value := range from { if value.(ovsdb.UUID) == keyValue { to[key] = from[key] } } return to } // initReferences initializes the references to the provided row from the // database func (rt *referenceTracker) initReferences(table, uuid string) error { if _, ok := rt.tracked[uuid]; ok { // already initialized return nil } existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid) if err != nil { return err } rt.references.UpdateReferences(existingRefs) rt.tracked[uuid] = table return nil } func (rt *referenceTracker) initReferencesOfDeletedRows() error { for uuid, table := range rt.deleted { err := rt.initReferences(table, uuid) if err != nil { return err } } return nil } // deleteRow adds an update to delete the provided row. func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) { model, err := rt.getModel(table, uuid) if err != nil { return ModelUpdates{}, err } row, err := rt.getRow(table, uuid) if err != nil { return ModelUpdates{}, err } updates := ModelUpdates{} update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row} err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update) rt.deleted[uuid] = table return updates, err } // updateRow generates updates for the provided row func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) { model, err := rt.getModel(table, uuid) if err != nil { return ModelUpdates{}, err } // In agreement with processWeakReferences, columns with values are assumed // to be values of sets or maps that need to be mutated for deletion. // Columns with no values are assumed to be atomic optional values that need // to be cleared with an update. mutations := make([]ovsdb.Mutation, 0, len(row)) update := ovsdb.Row{} for column, value := range row { if value != nil { mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value)) continue } update[column] = ovsdb.OvsSet{GoSet: []interface{}{}} } updates := ModelUpdates{} if len(mutations) > 0 { err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ Op: ovsdb.OperationMutate, Table: table, Mutations: mutations, Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, }) if err != nil { return ModelUpdates{}, err } } if len(update) > 0 { err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Table: table, Row: update, Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, }) if err != nil { return ModelUpdates{}, err } } return updates, nil } // getModel gets the model from the updates or the database func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) { if _, deleted := rt.deleted[uuid]; deleted { // model has been deleted return nil, nil } // look for the model in the updates model := rt.updates.GetModel(table, uuid) if model != nil { return model, nil } // look for the model in the database model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) if err != nil { return nil, err } return model, nil } // getRow gets the row from the updates or the database func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) { if _, deleted := rt.deleted[uuid]; deleted { // row has been deleted return nil, nil } // look for the row in the updates row := rt.updates.GetRow(table, uuid) if row != nil { return row, nil } // look for the model in the database and build the row model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) if err != nil { return nil, err } info, err := rt.dbModel.NewModelInfo(model) if err != nil { return nil, err } newRow, err := rt.dbModel.Mapper.NewRow(info) if err != nil { return nil, err } return &newRow, nil } // rowExists returns whether the row exists either in the updates or the database func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) { model, err := rt.getModel(table, uuid) return model != nil, err } func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References { refs := database.References{} for column, value := range *modify { var oldValue interface{} if old != nil { oldValue = (*old)[column] } crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue) refs.UpdateReferences(crefs) } return refs } func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old interface{}) database.References { switch v := modify.(type) { case ovsdb.UUID: var oldUUID ovsdb.UUID if old != nil { oldUUID = old.(ovsdb.UUID) } return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID) case ovsdb.OvsSet: var oldSet ovsdb.OvsSet if old != nil { oldSet = old.(ovsdb.OvsSet) } return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet) case ovsdb.OvsMap: return getReferenceModificationsFromMap(dbModel, table, uuid, column, v) } return nil } func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References { if len(value.GoMap) == 0 { return nil } // get the referenced table keyRefTable := refTable(dbModel, table, column, false) valueRefTable := refTable(dbModel, table, column, true) if keyRefTable == "" && valueRefTable == "" { return nil } from := uuid keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false} valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true} refs := database.References{} for k, v := range value.GoMap { if keyRefTable != "" { switch to := k.(type) { case ovsdb.UUID: if _, ok := refs[keySpec]; !ok { refs[keySpec] = database.Reference{to.GoUUID: []string{from}} } else if _, ok := refs[keySpec][to.GoUUID]; !ok { refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from) } } } if valueRefTable != "" { switch to := v.(type) { case ovsdb.UUID: if _, ok := refs[valueSpec]; !ok { refs[valueSpec] = database.Reference{to.GoUUID: []string{from}} } else if _, ok := refs[valueSpec][to.GoUUID]; !ok { refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from) } } } } return refs } func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References { // if the modify set is empty, it means the op is clearing an atomic value // so pick the old value instead value := modify if len(modify.GoSet) == 0 { value = old } if len(value.GoSet) == 0 { return nil } // get the referenced table refTable := refTable(dbModel, table, column, false) if refTable == "" { return nil } spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} from := uuid refs := database.References{spec: database.Reference{}} for _, v := range value.GoSet { switch to := v.(type) { case ovsdb.UUID: refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from) } } return refs } func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References { // get the referenced table refTable := refTable(dbModel, table, column, false) if refTable == "" { return nil } spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} from := uuid to := modify.GoUUID refs := database.References{spec: {to: {from}}} if old.GoUUID != "" { // extract the old value as well refs[spec][old.GoUUID] = []string{from} } return refs } // applyReferenceModifications updates references in 'a' from those in 'b' func applyReferenceModifications(a, b database.References) { for spec, bv := range b { for to, bfrom := range bv { if av, ok := a[spec]; ok { if afrom, ok := av[to]; ok { r, _ := applyDifference(afrom, bfrom) av[to] = r.([]string) } else { // this reference is not in 'a', so add it av[to] = bfrom } } else { // this reference is not in 'a', so add it a[spec] = database.Reference{to: bfrom} } } } } func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) { tSchema := dbModel.Schema.Table(table) if tSchema == nil { panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table)) } cSchema := tSchema.Column(column) if cSchema == nil { panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column)) } cType := cSchema.TypeObj if cType == nil { // this is not a reference return "", 0, "", "" } var bType *ovsdb.BaseType switch { case !mapValue && cType.Key != nil: bType = cType.Key case mapValue && cType.Value != nil: bType = cType.Value default: panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column)) } if bType.Type != ovsdb.TypeUUID { // this is not a reference return "", 0, "", "" } // treat optional values represented with sets as atomic UUIDs extendedType := cSchema.Type if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 { extendedType = ovsdb.TypeUUID } rType, err := bType.RefType() if err != nil { panic(fmt.Sprintf("unexpected schema error: %v", err)) } rTable, err := bType.RefTable() if err != nil { panic(fmt.Sprintf("unexpected schema error: %v", err)) } return extendedType, cType.Min(), rType, rTable } func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType { _, _, _, refTable := refInfo(dbModel, table, column, mapValue) return refTable } func isRoot(dbModel *model.DatabaseModel, table string) bool { isRoot, err := dbModel.Schema.IsRoot(table) if err != nil { panic(fmt.Sprintf("unexpected schema error: %v", err)) } return isRoot } func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool { _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) return refType == ovsdb.Strong } golang-github-ovn-org-libovsdb-0.7.0/updates/references_test.go000066400000000000000000001657111464501522100246230ustar00rootroot00000000000000package updates import ( "encoding/json" "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ovn-org/libovsdb/database" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) const referencesTestSchema = ` { "name": "References_Test", "version": "0.0.1", "tables": { "Parent": { "columns": { "strong_atomic_required_reference": { "type": { "key": { "type": "uuid", "refTable": "Child" }, "min": 1, "max": 1 } }, "strong_atomic_optional_reference": { "type": { "key": { "type": "uuid", "refTable": "Child" }, "min": 0, "max": 1 } }, "strong_set_reference": { "type": { "key": { "type": "uuid", "refTable": "Child" }, "min": 0, "max": "unlimited" } }, "strong_map_key_reference": { "type": { "key": { "type": "uuid", "refTable": "Child" }, "value": { "type": "string" }, "min": 0, "max": "unlimited" } }, "strong_map_value_reference": { "type": { "key": { "type": "string" }, "value": { "type": "uuid", "refTable": "Child" }, "min": 1, "max": "unlimited" } }, "weak_atomic_required_reference": { "type": { "key": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "min": 1, "max": 1 } }, "weak_atomic_optional_reference": { "type": { "key": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "min": 0, "max": 1 } }, "weak_set_reference": { "type": { "key": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "min": 2, "max": "unlimited" } }, "weak_map_key_reference": { "type": { "key": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "value": { "type": "string" }, "min": 1, "max": "unlimited" } }, "weak_map_value_reference": { "type": { "key": { "type": "string" }, "value": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "min": 1, "max": "unlimited" } }, "map_key_value_reference": { "type": { "key": { "type": "uuid", "refTable": "Child", "refType": "weak" }, "value": { "type": "uuid", "refTable": "Child", "refType": "strong" }, "min": 0, "max": "unlimited" } } }, "isRoot": true }, "Child": { "columns": { "name": { "type": "string", "mutable": false }, "strong_atomic_optional_reference": { "type": { "key": { "type": "uuid", "refTable": "Grandchild" }, "min": 0, "max": 1 } }, "weak_atomic_optional_reference": { "type": { "key": { "type": "uuid", "refTable": "Grandchild", "refType": "weak" }, "min": 0, "max": 1 } } }, "indexes": [ [ "name" ] ] }, "Grandchild": { "columns": { "name": { "type": "string", "mutable": false } }, "indexes": [ [ "name" ] ] } } } ` type Parent struct { UUID string `ovsdb:"_uuid"` StrongAtomicRequiredReference string `ovsdb:"strong_atomic_required_reference"` StrongAtomicOptionalReference *string `ovsdb:"strong_atomic_optional_reference"` StrongSetReference []string `ovsdb:"strong_set_reference"` StrongMapKeyReference map[string]string `ovsdb:"strong_map_key_reference"` StrongMapValueReference map[string]string `ovsdb:"strong_map_value_reference"` WeakAtomicRequiredReference string `ovsdb:"weak_atomic_required_reference"` WeakAtomicOptionalReference *string `ovsdb:"weak_atomic_optional_reference"` WeakSetReference []string `ovsdb:"weak_set_reference"` WeakMapKeyReference map[string]string `ovsdb:"weak_map_key_reference"` WeakMapValueReference map[string]string `ovsdb:"weak_map_value_reference"` MapKeyValueReference map[string]string `ovsdb:"map_key_value_reference"` } type Child struct { UUID string `ovsdb:"_uuid"` StrongAtomicOptionalReference *string `ovsdb:"strong_atomic_optional_reference"` WeakAtomicOptionalReference *string `ovsdb:"weak_atomic_optional_reference"` } type Grandchild struct { UUID string `ovsdb:"_uuid"` } func getReferencesTestDBModel() (model.DatabaseModel, error) { client, err := model.NewClientDBModel( "References_Test", map[string]model.Model{ "Parent": &Parent{}, "Child": &Child{}, "Grandchild": &Grandchild{}, }, ) if err != nil { return model.DatabaseModel{}, err } schema, err := getReferencesTestSchema() if err != nil { return model.DatabaseModel{}, err } dbModel, errs := model.NewDatabaseModel(schema, client) if len(errs) > 0 { return model.DatabaseModel{}, fmt.Errorf("errors build model: %v", errs) } return dbModel, nil } func getReferencesTestSchema() (ovsdb.DatabaseSchema, error) { var dbSchema ovsdb.DatabaseSchema err := json.Unmarshal([]byte(referencesTestSchema), &dbSchema) return dbSchema, err } type testReferenceProvider struct { models map[string]model.Model references database.References } func (rp *testReferenceProvider) GetReferences(database, table, uuid string) (database.References, error) { return rp.references.GetReferences(table, uuid), nil } func (rp *testReferenceProvider) Get(database, table string, uuid string) (model.Model, error) { return rp.models[uuid], nil } var ( referencesTestDBModel model.DatabaseModel ) func ptr(s string) *string { return &s } type testData struct { existingModels []model.Model updatedModels []model.Model finalModels []model.Model existingReferences database.References wantUpdatedReferences database.References } func TestProcessReferences(t *testing.T) { var err error referencesTestDBModel, err = getReferencesTestDBModel() if err != nil { t.Errorf("error building DB model: %v", err) } tests := []struct { name string testData testData wantErr bool }{ { // when a strong reference is replaced with another in a required atomic // field, the referenced row should be deleted name: "strong atomic required reference garbage collected when replaced", testData: strongAtomicRequiredReferenceTestData(), }, { // attempting to delete a row that is strongly referenced from a // required atomic field should fail name: "constraint violation when strongly referenced row from required field deleted", testData: strongAtomicRequiredReferenceDeleteConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to add a required strong reference to a nonexistent row should // fail name: "constraint violation when strong required reference to nonexistent row added", testData: strongAtomicRequiredReferenceAddConstraintViolationErrorTestData(), wantErr: true, }, { // when a strong reference is removed from an optional atomic field, the // referenced row should be deleted name: "strong atomic optional reference garbage collected when removed", testData: strongAtomicOptionalReferenceTestData(), }, { // attempting to delete a row that is strongly referenced from an // optional atomic field should fail name: "constraint violation when strongly referenced row from optional field deleted", testData: strongAtomicOptionalReferenceDeleteConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to add a optional strong reference to a nonexistent // row should fail name: "constraint violation when strong optional reference to nonexistent row added", testData: strongAtomicOptionalReferenceAddConstraintViolationErrorTestData(), wantErr: true, }, { // when a strong reference is removed from a set, the referenced row should // be deleted name: "strong reference garbage collected when removed from set", testData: strongSetReferenceTestData(), }, { // attempting to remove a row that is still strongly referenced in a set should fail name: "strong set reference constraint violation when row deleted error", testData: strongSetReferenceDeleteConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to add strong set reference to non existent row should fail name: "strong set reference constraint violation when nonexistent reference added error", testData: strongSetReferenceAddConstraintViolationErrorTestData(), wantErr: true, }, { // when a strong reference is removed from a map key, the // referenced row should be deleted name: "strong reference garbage collected when removed from map key", testData: strongMapKeyReferenceTestData(), }, { // attempting to remove a row that is still strongly referenced in a // map key should fail name: "strong map key reference constraint violation when row deleted error", testData: strongMapKeyReferenceDeleteConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to add strong map key reference to non existent row should fail name: "strong map key reference constraint violation when nonexistent reference added error", testData: strongMapKeyReferenceAddConstraintViolationErrorTestData(), wantErr: true, }, { // when a strong reference is removed from a map value, the // referenced row should be deleted name: "strong reference garbage collected when removed from map value", testData: strongMapValueReferenceTestData(), }, { // attempting to remove a row that is still strongly referenced in a // map value should fail name: "strong map value reference constraint violation when row deleted error", testData: strongMapValueReferenceDeleteConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to add strong map value reference to non existent row should fail name: "strong map value reference constraint violation when nonexistent reference added error", testData: strongMapValueReferenceAddConstraintViolationErrorTestData(), wantErr: true, }, { // when a weak referenced row is deleted, the reference on an atomic // optional field is also deleted name: "weak atomic optional reference deleted when row deleted", testData: weakAtomicOptionalReferenceTestData(), }, { // when a weak referenced row is deleted, the reference on an set is // also deleted name: "weak reference deleted from set when row deleted", testData: weakSetReferenceTestData(), }, { // when a weak referenced row is deleted, the reference on a map // key is also deleted name: "weak reference deleted from map key when row deleted", testData: weakMapKeyReferenceTestData(), }, { // when a weak referenced row is deleted, the reference on a map // value is also deleted name: "weak reference deleted from map value when row deleted", testData: weakMapValueReferenceTestData(), }, { // attempting to delete a weak referenced row when it is referenced // from an atomic required field will fail name: "weak reference constraint violation in required atomic field when row deleted error", testData: weakAtomicReferenceConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to delete a weak referenced row when it is referenced // from an set that then becomes smaller than the minimum allowed // will fail name: "weak reference constraint violation in set becoming smaller than allowed error", testData: weakSetReferenceConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to delete a weak referenced row when it is referenced // from a map key that then becomes smaller than the minimum // allowed will fail name: "weak reference constraint violation in map key field becoming smaller than allowed error", testData: weakMapKeyReferenceConstraintViolationErrorTestData(), wantErr: true, }, { // attempting to delete a weak referenced row when it is referenced // from a map value that then becomes smaller than the minimum // allowed will fail name: "weak reference constraint violation in map value field becoming smaller than allowed error", testData: weakMapValueReferenceConstraintViolationErrorTestData(), wantErr: true, }, { // testing behavior with multiple combinations of references name: "multiple strong and weak reference changes", testData: multipleReferencesTestData(), }, { // corner case // inserting a row in a table that is not part of the root set and // is not strongly referenced is a noop name: "insert unreferenced row in non root set table is a noop", testData: insertNoRootUnreferencedRowTestData(), }, { // corner case // adding a weak reference to a nonexistent row is a noop name: "insert weak reference to nonexistent row is a noop", testData: weakReferenceToNonExistentRowTestData(), }, { // corner case // for a map holding weak key references to strong value references, when // the weak reference row is deleted, the map entry and the strongly // referenced row is also deleted name: "map with key weak reference and value strong reference, weak reference and strong referenced row deleted", testData: mapKeyValueReferenceTestData(), wantErr: false, }, { // corner case // when a weak referenced row is deleted, multiple references on a map // value are also deleted name: "multiple weak references deleted from map value when row deleted", testData: multipleWeakMapValueReferenceTestData(), wantErr: false, }, { // corner case when multiple rows are transitively & strongly // referenced, garbage collection happens transitively as well name: "transitive strong references garbage collected when removed", testData: transitiveStrongReferenceTestData(), }, { // corner case // when a strong referenced is removed, an unreferenced row will be // garbage collected and weak references to it removed name: "transitive strong and weak references garbage collected when removed", testData: transitiveStrongAndWeakReferenceTestData(), }, { // corner case // a row needs to have a weak reference garbage collected and // at the same time that row itself is garbage collected due to not // being strongly referenced name: "strong and weak garbage collection over the same row doesn't fail", testData: sameRowStrongAndWeakReferenceTestData(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := tt.testData rp := testReferenceProvider{ models: indexModels(td.existingModels), references: td.existingReferences, } onUpdates, err := getUpdates(td.existingModels, td.updatedModels) require.NoError(t, err, "failed to build updates from existing and updated models") // need a copy easiest way to have it is generating the updates all // over again onUpdatesCopy, err := getUpdates(td.existingModels, td.updatedModels) require.NoError(t, err, "failed to build updates copy from existing and updated models") gotModelUpdates, gotReferenceModelUpdates, gotReferenceUpdates, err := ProcessReferences(referencesTestDBModel, &rp, onUpdates) if tt.wantErr { assert.NotNil(t, err, "expected an error but got none") return } assert.NoError(t, err, "got a different error than expected") //gotModelUpdates := gotUpdates.(modelUpdatesWithReferences).ModelUpdates wantModelUpdates, err := getUpdates(td.existingModels, td.finalModels) require.NoError(t, err, "failed to build updates from existing and final models") assert.Equal(t, wantModelUpdates, gotModelUpdates, "got different updates than expected") //gotUpdatedReferences := gotUpdates.(modelUpdatesWithReferences).references assert.Equal(t, td.wantUpdatedReferences, gotReferenceUpdates, "got different reference updates than expected") gotMergedModelUpdates := onUpdatesCopy err = gotMergedModelUpdates.Merge(referencesTestDBModel, gotReferenceModelUpdates) require.NoError(t, err) assert.Equal(t, gotModelUpdates, gotMergedModelUpdates, "the updates are not a result of merging the initial updates with the reference updates") }) } } func getUUID(model model.Model) string { return reflect.ValueOf(model).Elem().FieldByName("UUID").Interface().(string) } func indexModels(models []model.Model) map[string]model.Model { indexed := map[string]model.Model{} for _, model := range models { indexed[getUUID(model)] = model } return indexed } // getUpdates returns the updates needed to go from existing to updated func getUpdates(existing, updated []model.Model) (ModelUpdates, error) { // index the models by uuid existingModels := indexModels(existing) updatedModels := indexModels(updated) // helpers tables := map[string]string{} getRow := func(model model.Model, fields ...interface{}) (ovsdb.Row, error) { info, err := referencesTestDBModel.NewModelInfo(model) if err != nil { return nil, err } row, err := referencesTestDBModel.Mapper.NewRow(info, fields...) if err != nil { return nil, err } tables[getUUID(model)] = info.Metadata.TableName return row, nil } getUpdateOp := func(old, new model.Model) (ovsdb.Operation, error) { var err error var row ovsdb.Row // insert if old == nil { row, err := getRow(new) return ovsdb.Operation{ Op: ovsdb.OperationInsert, Table: tables[getUUID(new)], Row: row, }, err } // delete if new == nil { // lazy, just to cache the table of the row _, err := getRow(old) return ovsdb.Operation{ Op: ovsdb.OperationDelete, Table: tables[getUUID(old)], Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: getUUID(old)})}, }, err } // update, just with the fields that have been changed fields := []interface{}{} xv := reflect.ValueOf(new).Elem() xt := xv.Type() for i := 0; i < xt.NumField(); i++ { if !reflect.DeepEqual(xv.Field(i).Interface(), reflect.ValueOf(old).Elem().Field(i).Interface()) { fields = append(fields, xv.Field(i).Addr().Interface()) } } row, err = getRow(new, fields...) return ovsdb.Operation{ Op: ovsdb.OperationUpdate, Table: tables[getUUID(new)], Row: row, Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: getUUID(new)})}, }, err } // generate updates updates := ModelUpdates{} for uuid, updatedModel := range updatedModels { op, err := getUpdateOp(existingModels[uuid], updatedModel) if err != nil { return updates, err } err = updates.AddOperation(referencesTestDBModel, tables[uuid], uuid, existingModels[uuid], &op) if err != nil { return updates, err } } // deletes for uuid := range existingModels { if updatedModels[uuid] != nil { continue } op, err := getUpdateOp(existingModels[uuid], nil) if err != nil { return updates, err } err = updates.AddOperation(referencesTestDBModel, tables[uuid], uuid, existingModels[uuid], &op) if err != nil { return updates, err } } return updates, nil } func strongAtomicRequiredReferenceTestData() testData { // when a strong reference is replaced with another in a required atomic // field, the referenced row should be deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "child", }, &Child{ UUID: "child", }, }, // newChild is added and parent reference is replaced with newChild updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "newChild", }, &Child{ UUID: "child", }, &Child{ UUID: "newChild", }, }, // child model should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "newChild", }, &Child{ UUID: "newChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_required_reference", }: database.Reference{ "child": []string{"parent"}, }, }, // child model is no longer referenced, newChild is wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_required_reference", }: database.Reference{ "child": nil, "newChild": []string{"parent"}, }, }, } } func strongAtomicRequiredReferenceDeleteConstraintViolationErrorTestData() testData { // attempting to delete a row that is strongly referenced from a required // atomic field should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "child", }, &Child{ UUID: "child", }, }, // child is removed but will fail as it is still referenced updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "child", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_required_reference", }: database.Reference{ "child": []string{"parent"}, }, }, } } func strongAtomicRequiredReferenceAddConstraintViolationErrorTestData() testData { // attempting to add a required strong reference to a nonexistent row should // fail return testData{ updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicRequiredReference: "child", }, }, } } func strongAtomicOptionalReferenceTestData() testData { // when a strong reference is removed from an optional atomic field, the // referenced row should be deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", }, }, // parent reference to child is removed updatedModels: []model.Model{ &Parent{ UUID: "parent", }, &Child{ UUID: "child", }, }, // child model should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": nil, }, }, } } func strongAtomicOptionalReferenceDeleteConstraintViolationErrorTestData() testData { // attempting to delete a row that is strongly referenced from an optional // atomic field should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", }, }, // child is removed but will fail as it is still referenced updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, }, } } func strongAtomicOptionalReferenceAddConstraintViolationErrorTestData() testData { // attempting to add a optional strong reference to a nonexistent row should // fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", }, }, // add reference to child but will fail as it does not exist updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, }, } } func strongSetReferenceTestData() testData { // when a strong reference is removed from a set, the referenced row should // be deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"child", "otherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child reference is removed from the set updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"otherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child model should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"otherChild"}, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": nil, }, }, } } func strongMapKeyReferenceTestData() testData { // when a strong reference is removed from a map key, the referenced row // should be deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{ "child": "value", }, }, &Child{ UUID: "child", }, }, // child reference is removed from the map updatedModels: []model.Model{ &Parent{ UUID: "parent", }, &Child{ UUID: "child", }, }, // child model should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_key_reference", FromValue: false, }: database.Reference{ "child": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_key_reference", FromValue: false, }: database.Reference{ "child": nil, }, }, } } func strongMapKeyReferenceDeleteConstraintViolationErrorTestData() testData { // attempting to remove a row that is still strongly referenced in a map key // should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{ "child": "value", }, }, &Child{ UUID: "child", }, }, // child is removed but will fail as it is still referenced updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{ "child": "value", }, }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_key_reference", FromValue: false, }: database.Reference{ "child": []string{"parent"}, }, }, } } func strongMapKeyReferenceAddConstraintViolationErrorTestData() testData { // attempting to add a map key strong reference to a nonexistent row should // fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", }, }, // child reference is added to the map but wil fail as child does not // exist updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{"child": "value"}, }, }, } } func strongMapValueReferenceTestData() testData { // when a strong reference is removed from a map value, the referenced row // should be deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongMapValueReference: map[string]string{ "key1": "child", "key2": "otherChild", }, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child reference is removed from the map updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongMapValueReference: map[string]string{ "key2": "otherChild", }, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child model should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", StrongMapValueReference: map[string]string{ "key2": "otherChild", }, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_value_reference", FromValue: true, }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_value_reference", FromValue: true, }: database.Reference{ "child": nil, }, }, } } func strongMapValueReferenceDeleteConstraintViolationErrorTestData() testData { // attempting to remove a row that is still strongly referenced in a map value // should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{ "key": "child", }, }, &Child{ UUID: "child", }, }, // child is removed but will fail as it is still referenced updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongMapKeyReference: map[string]string{ "key": "child", }, }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_map_value_reference", FromValue: true, }: database.Reference{ "child": []string{"parent"}, }, }, } } func strongMapValueReferenceAddConstraintViolationErrorTestData() testData { // attempting to add a map key strong reference to a nonexistent row should // fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", }, }, // child reference is added to the map but wil fail as is it doesn't exist updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongMapValueReference: map[string]string{"key": "child"}, }, }, } } func strongSetReferenceDeleteConstraintViolationErrorTestData() testData { // attempting to remove a row that is still strongly referenced should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"child", "otherChild"}, }, &Parent{ UUID: "otherParent", StrongSetReference: []string{"child"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child is deleted from parent but will fail as it is still referenced // from other parent updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"otherChild"}, }, &Parent{ UUID: "otherParent", StrongSetReference: []string{"child"}, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": []string{"parent", "otherParent"}, "otherChild": []string{"parent"}, }, }, } } func strongSetReferenceAddConstraintViolationErrorTestData() testData { // attempting to add strong reference to non existent row should fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"child"}, }, &Child{ UUID: "child", }, }, // otherChild reference is added to parent but will fail as otherChild // does not exist updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"child", "otherChild"}, }, &Child{ UUID: "child", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": []string{"parent"}, }, }, } } func weakAtomicOptionalReferenceTestData() testData { // when a weak referenced row is deleted, the reference on an atomic // optional field is also deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", }, }, // child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicOptionalReference: ptr("child"), }, }, // the reference to child should be removed from parent finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "child": nil, }, }, } } func weakAtomicReferenceConstraintViolationErrorTestData() testData { // an attempt to delete a weak referenced row when it is referenced from an // atomic required field will fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicRequiredReference: "child", }, &Child{ UUID: "child", }, }, // child is deleted, but will fail because that would leave a mandatory // field empty updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicRequiredReference: "child", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_required_reference", }: database.Reference{ "child": []string{"parent"}, }, }, } } func weakSetReferenceTestData() testData { // when a weak referenced row is deleted, the reference on an set is also // deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakSetReference: []string{"child", "otherChild", "thirdChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, &Child{ UUID: "thirdChild", }, }, // child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakSetReference: []string{"child", "otherChild", "thirdChild"}, }, &Child{ UUID: "otherChild", }, &Child{ UUID: "thirdChild", }, }, // the reference to child should be removed from parent finalModels: []model.Model{ &Parent{ UUID: "parent", WeakSetReference: []string{"otherChild", "thirdChild"}, }, &Child{ UUID: "otherChild", }, &Child{ UUID: "thirdChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_set_reference", }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, "thirdChild": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_set_reference", }: database.Reference{ "child": nil, }, }, } } func weakSetReferenceConstraintViolationErrorTestData() testData { // an attempt to delete a weak referenced row when it is referenced from a // set that then becomes smaller than the minimum allowed will fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakSetReference: []string{"child", "otherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child is deleted but will fail because the set becomes empty and // that is not allowed by the schema updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakSetReference: []string{"child", "otherChild"}, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_set_reference", }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, } } func weakMapKeyReferenceTestData() testData { // when a weak referenced row is deleted, the reference on a map // value is also deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakMapKeyReference: map[string]string{ "child": "value1", "otherChild": "value2", }, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakMapKeyReference: map[string]string{ "child": "value1", "otherChild": "value2", }, }, &Child{ UUID: "otherChild", }, }, // the reference to child should be removed from parent finalModels: []model.Model{ &Parent{ UUID: "parent", WeakMapKeyReference: map[string]string{ "otherChild": "value2", }, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_key_reference", }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_key_reference", }: database.Reference{ "child": nil, }, }, } } func weakMapValueReferenceTestData() testData { // when a weak referenced row is deleted, the reference on a map // value is also deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", "key2": "otherChild", }, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", "key2": "otherChild", }, }, &Child{ UUID: "otherChild", }, }, // the reference to child should be removed from parent finalModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key2": "otherChild", }, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, // child model is no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "child": nil, }, }, } } func weakMapKeyReferenceConstraintViolationErrorTestData() testData { // an attempt to delete a weak referenced row when it is referenced from a // map key that then becomes smaller than the minimum allowed will fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakMapKeyReference: map[string]string{ "child": "value", }, }, &Child{ UUID: "child", }, }, // child is deleted but will fail because the map becomes empty and // that is not allowed by the schema updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakMapKeyReference: map[string]string{ "child": "value", }, }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_key_reference", }: database.Reference{ "child": []string{"parent"}, }, }, } } func weakMapValueReferenceConstraintViolationErrorTestData() testData { // an attempt to delete a weak referenced row when it is referenced from a // map value that then becomes smaller than the minimum allowed will fail return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", }, }, &Child{ UUID: "child", }, }, // child is deleted but will fail because the map becomes empty and // that is not allowed by the schema updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", }, }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "child": []string{"parent"}, }, }, } } func mapKeyValueReferenceTestData() testData { // for a map holding weak key references to strong value references, when // the weak reference row is deleted, the map entry and the strongly // referenced row is also deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", MapKeyValueReference: map[string]string{ "weakChild": "strongChild", }, }, &Child{ UUID: "weakChild", }, &Child{ UUID: "strongChild", }, }, // weak child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", MapKeyValueReference: map[string]string{ "weakChild": "strongChild", }, }, &Child{ UUID: "strongChild", }, }, // the reference to weak child should be removed from parent // and strong child should be deleted finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "map_key_value_reference", FromValue: false, }: database.Reference{ "weakChild": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "map_key_value_reference", FromValue: true, }: database.Reference{ "strongChild": []string{"parent"}, }, }, // neither weak or strong child are referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "map_key_value_reference", FromValue: false, }: database.Reference{ "weakChild": nil, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "map_key_value_reference", FromValue: true, }: database.Reference{ "strongChild": nil, }, }, } } func multipleWeakMapValueReferenceTestData() testData { // when a weak referenced row is deleted, multiple references on a map // value are also deleted return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", "key2": "otherChild", "key3": "child", }, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, }, // child is deleted updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key1": "child", "key2": "otherChild", "key3": "child", }, }, &Child{ UUID: "otherChild", }, }, // the reference to child should be removed from parent finalModels: []model.Model{ &Parent{ UUID: "parent", WeakMapValueReference: map[string]string{ "key2": "otherChild", }, }, &Child{ UUID: "otherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"parent"}, }, }, // child model is no longer referenced, newChild is wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "child": nil, }, }, } } func transitiveStrongReferenceTestData() testData { // when multiple rows are transitively referenced, garbage collection // happens transitively as well return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", StrongAtomicOptionalReference: ptr("grandchild"), }, &Grandchild{ UUID: "grandchild", }, }, // parent reference to child is removed updatedModels: []model.Model{ &Parent{ UUID: "parent", }, &Child{ UUID: "child", StrongAtomicOptionalReference: ptr("grandchild"), }, &Grandchild{ UUID: "grandchild", }, }, // child and grandchild models should be deleted as it is no longer referenced finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Grandchild", FromTable: "Child", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "grandchild": []string{"child"}, }, }, // child and grandchild models are no longer referenced wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": nil, }, database.ReferenceSpec{ ToTable: "Grandchild", FromTable: "Child", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "grandchild": nil, }, }, } } func transitiveStrongAndWeakReferenceTestData() testData { // when a strong referenced is removed, an unreferenced row will be garbage // collected and transitively, weak references to it removed return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), WeakAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", }, }, // parent strong reference to child is removed updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", }, }, // as a result, child and and the weak reference to it is removed finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, }, // child is no longer referenced at all wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": nil, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "child": nil, }, }, } } func insertNoRootUnreferencedRowTestData() testData { return testData{ // new child is inserted updatedModels: []model.Model{ &Child{ UUID: "newChild", }, }, // but is removed since is not referenced from anywhere and the table is // not part of the root set finalModels: nil, wantUpdatedReferences: database.References{}, } } func weakReferenceToNonExistentRowTestData() testData { return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", }, }, // a weak reference is added no nonexistent row updatedModels: []model.Model{ &Parent{ UUID: "parent", WeakAtomicOptionalReference: ptr("child"), }, }, // but is removed since the row does not exist finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "child": nil, }, }, } } func sameRowStrongAndWeakReferenceTestData() testData { // a row needs to have a weak reference garbage collected and // at the same time that row itself is garbage collected due to not // being strongly referenced return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), }, &Child{ UUID: "child", WeakAtomicOptionalReference: ptr("grandchild"), }, &Grandchild{ UUID: "grandchild", }, }, // parent strong reference to child is removed // grand child is removed as well updatedModels: []model.Model{ &Parent{ UUID: "parent", }, &Child{ UUID: "child", WeakAtomicOptionalReference: ptr("grandchild"), }, }, // as a result, child is removed finalModels: []model.Model{ &Parent{ UUID: "parent", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Grandchild", FromTable: "Child", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "grandchild": []string{"child"}, }, }, // neither child nor grandchild are referenced at all wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": nil, }, database.ReferenceSpec{ ToTable: "Grandchild", FromTable: "Child", FromColumn: "weak_atomic_optional_reference", }: database.Reference{ "grandchild": nil, }, }, } } func multipleReferencesTestData() testData { // testing behavior with multiple combinations of references return testData{ existingModels: []model.Model{ &Parent{ UUID: "parent", StrongSetReference: []string{"child"}, StrongAtomicOptionalReference: ptr("child"), WeakMapValueReference: map[string]string{"key1": "yetAnotherChild", "key2": "otherChild"}, }, &Parent{ UUID: "otherParent", StrongAtomicOptionalReference: ptr("child"), StrongSetReference: []string{"otherChild"}, WeakSetReference: []string{"otherChild", "child", "yetAnotherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, &Child{ UUID: "yetAnotherChild", }, }, // all strong references to child except one are removed // single strong reference to otherChild is removed updatedModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), WeakMapValueReference: map[string]string{"key1": "yetAnotherChild", "key2": "otherChild"}, }, &Parent{ UUID: "otherParent", WeakSetReference: []string{"otherChild", "child", "yetAnotherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "otherChild", }, &Child{ UUID: "yetAnotherChild", }, }, // otherChild is garbage collected and all weak references to it removed finalModels: []model.Model{ &Parent{ UUID: "parent", StrongAtomicOptionalReference: ptr("child"), WeakMapValueReference: map[string]string{"key1": "yetAnotherChild"}, }, &Parent{ UUID: "otherParent", WeakSetReference: []string{"child", "yetAnotherChild"}, }, &Child{ UUID: "child", }, &Child{ UUID: "yetAnotherChild", }, }, existingReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": []string{"parent"}, "otherChild": []string{"otherParent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent", "otherParent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "yetAnotherChild": []string{"parent"}, "otherChild": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_set_reference", }: database.Reference{ "otherChild": []string{"otherParent"}, "child": []string{"otherParent"}, "yetAnotherChild": []string{"otherParent"}, }, }, // all strong references to child except one are removed // all references to otherChild are removed // references to yetAnotherChild are unchanged wantUpdatedReferences: database.References{ database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_set_reference", }: database.Reference{ "child": nil, "otherChild": nil, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "strong_atomic_optional_reference", }: database.Reference{ "child": []string{"parent"}, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_map_value_reference", FromValue: true, }: database.Reference{ "otherChild": nil, }, database.ReferenceSpec{ ToTable: "Child", FromTable: "Parent", FromColumn: "weak_set_reference", }: database.Reference{ "child": []string{"otherParent"}, // this reference is read by the reference tracker, but not changed "otherChild": nil, }, }, } } golang-github-ovn-org-libovsdb-0.7.0/updates/updates.go000066400000000000000000000322521464501522100231010ustar00rootroot00000000000000package updates import ( "fmt" "reflect" "github.com/ovn-org/libovsdb/mapper" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" ) type rowUpdate2 = ovsdb.RowUpdate2 // modelUpdate contains an update in model and OVSDB RowUpdate2 notation type modelUpdate struct { rowUpdate2 *rowUpdate2 old model.Model new model.Model } // isEmpty returns whether this update is empty func (mu modelUpdate) isEmpty() bool { return mu == modelUpdate{} } // ModelUpdates contains updates indexed by table and uuid type ModelUpdates struct { updates map[string]map[string]modelUpdate } // GetUpdatedTables returns the tables that have updates func (u ModelUpdates) GetUpdatedTables() []string { tables := make([]string, 0, len(u.updates)) for table, updates := range u.updates { if len(updates) > 0 { tables = append(tables, table) } } return tables } // ForEachModelUpdate processes each row update of a given table in model // notation func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error { models := u.updates[table] for uuid, model := range models { err := do(uuid, model.old, model.new) if err != nil { return err } } return nil } // ForEachRowUpdate processes each row update of a given table in OVSDB // RowUpdate2 notation func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error { rows := u.updates[table] for uuid, row := range rows { err := do(uuid, *row.rowUpdate2) if err != nil { return err } } return nil } // GetModel returns the last known state of the requested model. If the model is // unknown or has been deleted, returns nil. func (u ModelUpdates) GetModel(table, uuid string) model.Model { if u.updates == nil { return nil } if t, found := u.updates[table]; found { if update, found := t[uuid]; found { return update.new } } return nil } // GetRow returns the last known state of the requested row. If the row is // unknown or has been deleted, returns nil. func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row { if u.updates == nil { return nil } if t, found := u.updates[table]; found { if update, found := t[uuid]; found { return update.rowUpdate2.New } } return nil } // Merge a set of updates with an earlier set of updates func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, new ModelUpdates) error { for table, models := range new.updates { for uuid, update := range models { err := u.addUpdate(dbModel, table, uuid, update) if err != nil { return err } } } return nil } // AddOperation adds an update for a model from a OVSDB Operation. If several // updates for the same model are aggregated, the user is responsible that the // provided model to be updated matches the updated model of the previous // update. func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error { switch op.Op { case ovsdb.OperationInsert: return u.addInsertOperation(dbModel, table, uuid, op) case ovsdb.OperationUpdate: return u.addUpdateOperation(dbModel, table, uuid, current, op) case ovsdb.OperationMutate: return u.addMutateOperation(dbModel, table, uuid, current, op) case ovsdb.OperationDelete: return u.addDeleteOperation(dbModel, table, uuid, current, op) default: return fmt.Errorf("database update from operation %#v not supported", op.Op) } } // AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several // updates for the same model are aggregated, the user is responsible that the // provided model to be updated matches the updated model of the previous // update. func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error { switch { case ru.Old == nil && ru.New != nil: new, err := model.CreateModel(dbModel, table, ru.New, uuid) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &rowUpdate2{New: ru.New}}) if err != nil { return err } case ru.Old != nil && ru.New != nil: old := current new := model.Clone(current) info, err := dbModel.NewModelInfo(new) if err != nil { return err } changed, err := updateModel(dbModel, table, info, ru.New, nil) if !changed || err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}}) if err != nil { return err } case ru.New == nil: old := current err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}}) if err != nil { return err } } return nil } // AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several // updates for the same model are aggregated, the user is responsible that the // provided model to be updated matches the updated model of the previous // update. func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error { switch { case ru2.Initial != nil: ru2.Insert = ru2.Initial fallthrough case ru2.Insert != nil: new, err := model.CreateModel(dbModel, table, ru2.Insert, uuid) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &ru2}) if err != nil { return err } case ru2.Modify != nil: old := current new := model.Clone(current) info, err := dbModel.NewModelInfo(new) if err != nil { return err } changed, err := modifyModel(dbModel, table, info, ru2.Modify) if !changed || err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &ru2}) if err != nil { return err } default: old := current err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2}) if err != nil { return err } } return nil } func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error { if u.updates == nil { u.updates = map[string]map[string]modelUpdate{} } if _, ok := u.updates[table]; !ok { u.updates[table] = make(map[string]modelUpdate) } ts := dbModel.Schema.Table(table) update, err := merge(ts, u.updates[table][uuid], update) if err != nil { return err } if !update.isEmpty() { u.updates[table][uuid] = update return nil } // If after the merge this amounts to no update, remove it from the list and // clean up delete(u.updates[table], uuid) if len(u.updates[table]) == 0 { delete(u.updates, table) } if len(u.updates) == 0 { u.updates = nil } return nil } func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error { m := dbModel.Mapper model, err := dbModel.NewModel(table) if err != nil { return err } mapperInfo, err := dbModel.NewModelInfo(model) if err != nil { return err } err = m.GetRowData(&op.Row, mapperInfo) if err != nil { return err } err = mapperInfo.SetField("_uuid", uuid) if err != nil { return err } resultRow, err := m.NewRow(mapperInfo) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{ old: nil, new: model, rowUpdate2: &rowUpdate2{ Insert: &resultRow, New: &resultRow, Old: nil, }, }, ) return err } func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { m := dbModel.Mapper oldInfo, err := dbModel.NewModelInfo(old) if err != nil { return err } oldRow, err := m.NewRow(oldInfo) if err != nil { return err } new := model.Clone(old) newInfo, err := dbModel.NewModelInfo(new) if err != nil { return err } delta := ovsdb.NewRow() changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta) if err != nil { return err } if !changed { return nil } newRow, err := m.NewRow(newInfo) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{ old: old, new: new, rowUpdate2: &rowUpdate2{ Modify: &delta, Old: &oldRow, New: &newRow, }, }, ) return err } func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { m := dbModel.Mapper schema := dbModel.Schema.Table(table) oldInfo, err := dbModel.NewModelInfo(old) if err != nil { return err } oldRow, err := m.NewRow(oldInfo) if err != nil { return err } new := model.Clone(old) newInfo, err := dbModel.NewModelInfo(new) if err != nil { return err } differences := make(map[string]interface{}) for _, mutation := range op.Mutations { column := schema.Column(mutation.Column) if column == nil { continue } var nativeValue interface{} // Usually a mutation value is of the same type of the value being mutated // except for delete mutation of maps where it can also be a list of same type of // keys (rfc7047 5.1). Handle this special case here. if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) { nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value) if err != nil { return err } } else { nativeValue, err = ovsdb.OvsToNative(column, mutation.Value) if err != nil { return err } } if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil { return err } current, err := newInfo.FieldByColumn(mutation.Column) if err != nil { return err } newValue, diff := mutate(current, mutation.Mutator, nativeValue) if err := newInfo.SetField(mutation.Column, newValue); err != nil { return err } old, err := oldInfo.FieldByColumn(mutation.Column) if err != nil { return err } diff, changed := mergeDifference(old, differences[mutation.Column], diff) if changed { differences[mutation.Column] = diff } else { delete(differences, mutation.Column) } } if len(differences) == 0 { return nil } delta := ovsdb.NewRow() for column, diff := range differences { colSchema := schema.Column(column) diffOvs, err := ovsdb.NativeToOvs(colSchema, diff) if err != nil { return err } delta[column] = diffOvs } newRow, err := m.NewRow(newInfo) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{ old: old, new: new, rowUpdate2: &rowUpdate2{ Modify: &delta, Old: &oldRow, New: &newRow, }, }, ) return err } func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { m := dbModel.Mapper info, err := dbModel.NewModelInfo(old) if err != nil { return err } oldRow, err := m.NewRow(info) if err != nil { return err } err = u.addUpdate(dbModel, table, uuid, modelUpdate{ old: old, new: nil, rowUpdate2: &rowUpdate2{ Delete: &ovsdb.Row{}, Old: &oldRow, }, }, ) return err } func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) { return updateOrModifyModel(dbModel, table, info, update, modify, false) } func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) { return updateOrModifyModel(dbModel, table, info, modify, nil, true) } // updateOrModifyModel updates info about a model with a given row containing // the change. The change row itself can be interpreted as an update or a // modify. If the change is an update and a modify row is provided, it will be // filled with the modify data. func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) { schema := dbModel.Schema.Table(table) var changed bool for column, updateOvs := range *changeRow { colSchema := schema.Column(column) if colSchema == nil { // ignore columns we don't know about in our schema continue } currentNative, err := info.FieldByColumn(column) if err != nil { return false, err } updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs) if err != nil { return false, err } if isModify { differenceNative, isDifferent := applyDifference(currentNative, updateNative) if isDifferent && !colSchema.Mutable() { return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) } changed = changed || isDifferent err = info.SetField(column, differenceNative) if err != nil { return false, err } } else { differenceNative, isDifferent := difference(currentNative, updateNative) if isDifferent && !colSchema.Mutable() { return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) } changed = changed || isDifferent if isDifferent && modifyRow != nil { deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative) if err != nil { return false, err } (*modifyRow)[column] = deltaOvs } err = info.SetField(column, updateNative) if err != nil { return false, err } } } return changed, nil } golang-github-ovn-org-libovsdb-0.7.0/updates/updates_test.go000066400000000000000000001200161464501522100241340ustar00rootroot00000000000000package updates import ( "testing" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/libovsdb/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestUpdates_AddOperation(t *testing.T) { dbModel, err := test.GetModel() require.NoError(t, err) type fields struct { updates map[string]map[string]modelUpdate } type args struct { dbModel model.DatabaseModel table string uuid string current model.Model op *ovsdb.Operation } tests := []struct { name string fields fields args args expected fields wantErr bool }{ { name: "insert", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "name": "bridge", }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", }, }, }, }, }, }, }, { name: "insert after insert fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "name": "bridge", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, wantErr: true, }, { name: "insert after update fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "name": "bridge", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, Modify: &ovsdb.Row{ "datapath_type": "type", }, }, }, }, }, }, wantErr: true, }, { name: "insert after delete fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "name": "bridge", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Delete: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, wantErr: true, }, { name: "insert ignores unknown columns", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "unknown": "unknown", }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, }, }, }, }, }, }, }, { name: "insert with bad column type fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationInsert, Row: ovsdb.Row{ "datapath_type": 0, }, }, }, wantErr: true, }, { name: "update", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key": "value", "key1": "value1"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": "type", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key2": "value2"}}, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key": "value", "key1": "value1"}, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", ExternalIds: map[string]string{"key": "value1", "key2": "value2"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1"}}, }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "datapath_type": "type", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key2": "value2"}}, }, Modify: &ovsdb.Row{ "datapath_type": "type", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value1", "key1": "value1", "key2": "value2"}}, }, }, }, }, }, }, }, { name: "update no op", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key": "value", "key1": "value1"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value", "key1": "value1"}}, }, }, }, }, { name: "update after insert", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": "type", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ New: &ovsdb.Row{ "name": "bridge", }, Insert: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "datapath_type": "type", }, Insert: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "datapath_type": "type", }, }, }, }, }, }, }, { name: "update after update", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": "new", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "old", }, Modify: &ovsdb.Row{ "datapath_type": "old", }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "datapath_type": "new", }, Modify: &ovsdb.Row{ "datapath_type": "new", }, }, }, }, }, }, }, { name: "update after update results in no op", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": "", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, Modify: &ovsdb.Row{ "datapath_type": "type", }, }, }, }, }, }, expected: fields{ updates: nil, }, }, { name: "update after delete fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": "type", }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, Delete: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, wantErr: true, }, { name: "update nil model fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "name": "bridge", }, }, }, wantErr: true, }, { name: "update different type of model fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.OvsType{}, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "name": "bridge", }, }, }, wantErr: true, }, { name: "update an inmutable column fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "name": "bridge2", }, }, }, wantErr: true, }, { name: "update unknown column ignored", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "unknown": "bridge", }, }, }, }, { name: "update with bad column type fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationUpdate, Row: ovsdb.Row{ "datapath_type": 0, }, }, }, wantErr: true, }, { name: "mutate map multiple times", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1", "key2": "value2"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key3": "value3", "key1": "value2"}}, }, { Column: "external_ids", Mutator: ovsdb.MutateOperationDelete, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key2": "value2"}}, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1", "key2": "value2"}, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1", "key3": "value3"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key2": "value2"}}, }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key3": "value3"}}, }, Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key2": "value2", "key3": "value3"}}, }, }, }, }, }, }, }, { name: "mutate set multiple times", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"uuid1", "uuid2"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid1"}, ovsdb.UUID{GoUUID: "uuid3"}}}, }, { Column: "ports", Mutator: ovsdb.MutateOperationDelete, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid3"}, ovsdb.UUID{GoUUID: "uuid1"}}}, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"uuid1", "uuid2"}, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"uuid2"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid1"}, ovsdb.UUID{GoUUID: "uuid2"}}}, }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid2"}}}, }, Modify: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid1"}}}, }, }, }, }, }, }, }, { name: "mutate can result in no op", args: args{ table: "Flow_Sample_Collector_Set", uuid: "uuid", current: &test.FlowSampleCollectorSetType{ UUID: "uuid", ID: 1, ExternalIDs: map[string]string{"key": "value"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "id", Mutator: ovsdb.MutateOperationAdd, Value: 1, }, { Column: "id", Mutator: ovsdb.MutateOperationSubtract, Value: 1, }, { Column: "external_ids", Mutator: ovsdb.MutateOperationDelete, Value: ovsdb.OvsSet{GoSet: []interface{}{"key"}}, }, { Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key": "value"}}, }, }, }, }, }, { name: "mutate after insert", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"uuid"}, }, rowUpdate2: &ovsdb.RowUpdate2{ New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, Insert: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, }, }, }, }, }, }, { name: "mutate after update", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge2", }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge2", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge2", }, Modify: &ovsdb.Row{ "name": "bridge2", }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge2", Ports: []string{"uuid"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge2", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, Modify: &ovsdb.Row{ "name": "bridge2", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid"}}}, }, }, }, }, }, }, }, { name: "mutate after mutate", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1"}, }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "external_ids", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key2": "value2"}}, }, }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1"}}, }, Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1"}}, }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"key1": "value1", "key2": "value2"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key2": "value2"}}, }, Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"key1": "value1", "key2": "value2"}}, }, }, }, }, }, }, }, { name: "mutate after delete fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid-2"}}}, }, }, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"uuid-1"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid-1"}}}, }, Delete: &ovsdb.Row{ "name": "bridge", "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid-1"}}}, }, }, }, }, }, }, wantErr: true, }, { name: "mutate nil model fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid-2"}}}, }, }, }, }, wantErr: true, }, { name: "mutate different type of model fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.OvsType{}, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "ports", Mutator: ovsdb.MutateOperationInsert, Value: ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "uuid-2"}}}, }, }, }, }, wantErr: true, }, { name: "mutate an inmmutable column fails", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "name", Mutator: ovsdb.MutateOperationInsert, Value: "bridge2", }, }, }, }, wantErr: true, }, { name: "mutate with bad column type fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationMutate, Mutations: []ovsdb.Mutation{ { Column: "datapath_type", Mutator: ovsdb.MutateOperationInsert, Value: 0, }, }, }, }, wantErr: true, }, { name: "delete", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationDelete, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "_uuid": ovsdb.UUID{GoUUID: "uuid"}, "name": "bridge", }, Delete: &ovsdb.Row{}, }, }, }, }, }, }, { name: "delete after insert", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, op: &ovsdb.Operation{ Op: ovsdb.OperationDelete, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, New: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, expected: fields{ updates: nil, }, }, { name: "delete after update", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, op: &ovsdb.Operation{ Op: ovsdb.OperationDelete, }, }, fields: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "old", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "new", }, Modify: &ovsdb.Row{ "datapath_type": "new", }, }, }, }, }, }, expected: fields{ updates: map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "old", }, Delete: &ovsdb.Row{}, }, }, }, }, }, }, { name: "delete nil model fails", args: args{ table: "Bridge", uuid: "uuid", op: &ovsdb.Operation{ Op: ovsdb.OperationDelete, }, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { u := &ModelUpdates{ updates: tt.fields.updates, } tt.args.dbModel = dbModel err := u.AddOperation(tt.args.dbModel, tt.args.table, tt.args.uuid, tt.args.current, tt.args.op) if tt.wantErr { assert.Error(t, err) return } assert.NoError(t, err) assert.Equal(t, tt.expected.updates, u.updates) }) } } func TestModelUpdates_AddRowUpdate2(t *testing.T) { dbModel, err := test.GetModel() require.NoError(t, err) oldDatapathID := "old" newDatapathID := "new" type fields struct { updates map[string]map[string]modelUpdate } type args struct { dbModel model.DatabaseModel table string uuid string current model.Model ru2 ovsdb.RowUpdate2 } tests := []struct { name string fields fields args args expected fields wantErr bool }{ { name: "insert", args: args{ table: "Bridge", uuid: "uuid", ru2: ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Insert: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, }, { name: "modify", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "new", }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "new", }, }, }, }, }, }, }, { name: "modify, add and remove from set", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"foo"}, }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "foo"}, ovsdb.UUID{GoUUID: "bar"}}}, }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"foo"}, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", Ports: []string{"bar"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "ports": ovsdb.OvsSet{GoSet: []interface{}{ovsdb.UUID{GoUUID: "foo"}, ovsdb.UUID{GoUUID: "bar"}}}, }, }, }, }, }, }, }, { name: "modify, add, update and remove from map", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"foo": "bar", "baz": "qux"}, }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar", "bar": "baz", "baz": "quux"}}, }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"foo": "bar", "baz": "qux"}, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", ExternalIds: map[string]string{"bar": "baz", "baz": "quux"}, }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{"foo": "bar", "bar": "baz", "baz": "quux"}}, }, }, }, }, }, }, }, { name: "modify optional", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &oldDatapathID, }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &oldDatapathID, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, }, }, }, }, { name: "modify add optional", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &newDatapathID, }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{newDatapathID}}, }, }, }, }, }, }, }, { name: "modify remove optional", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &oldDatapathID, }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{}}, }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathID: &oldDatapathID, }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{}}, }, }, }, }, }, }, }, { name: "modify no op", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", DatapathID: &oldDatapathID, Ports: []string{"foo", "bar"}, ExternalIds: map[string]string{"foo": "bar", "baz": "qux"}, }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "type", "datapath_id": ovsdb.OvsSet{GoSet: []interface{}{oldDatapathID}}, "ports": ovsdb.OvsSet{GoSet: []interface{}{}}, "external_ids": ovsdb.OvsMap{GoMap: map[interface{}]interface{}{}}, }, }, }, }, { name: "modify unknown colum", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, ru2: ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "new", "unknown": "column", }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, rowUpdate2: &ovsdb.RowUpdate2{ Modify: &ovsdb.Row{ "datapath_type": "new", "unknown": "column", }, }, }, }, }, }, }, { name: "delete", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{}, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { u := &ModelUpdates{ updates: tt.fields.updates, } tt.args.dbModel = dbModel err := u.AddRowUpdate2(tt.args.dbModel, tt.args.table, tt.args.uuid, tt.args.current, tt.args.ru2) if tt.wantErr { assert.Error(t, err) return } assert.NoError(t, err) assert.Equal(t, tt.expected.updates, u.updates) }) } } func TestModelUpdates_AddRowUpdate(t *testing.T) { dbModel, err := test.GetModel() require.NoError(t, err) type fields struct { updates map[string]map[string]modelUpdate } type args struct { dbModel model.DatabaseModel table string uuid string current model.Model ru ovsdb.RowUpdate } tests := []struct { name string fields fields args args expected fields wantErr bool }{ { name: "insert", args: args{ table: "Bridge", uuid: "uuid", ru: ovsdb.RowUpdate{ New: &ovsdb.Row{ "name": "bridge", }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { new: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{ New: &ovsdb.Row{ "name": "bridge", }, }, }, }, }, }, }, { name: "update", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, ru: ovsdb.RowUpdate{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "old", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "new", }, }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "old", }, new: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "new", }, rowUpdate2: &ovsdb.RowUpdate2{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "old", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "new", }, }, }, }, }, }, }, { name: "update no op", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", DatapathType: "type", }, ru: ovsdb.RowUpdate{ Old: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, New: &ovsdb.Row{ "name": "bridge", "datapath_type": "type", }, }, }, }, { name: "delete", args: args{ table: "Bridge", uuid: "uuid", current: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, }, expected: fields{ map[string]map[string]modelUpdate{ "Bridge": { "uuid": { old: &test.BridgeType{ UUID: "uuid", Name: "bridge", }, rowUpdate2: &ovsdb.RowUpdate2{}, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { u := &ModelUpdates{ updates: tt.fields.updates, } tt.args.dbModel = dbModel err := u.AddRowUpdate(tt.args.dbModel, tt.args.table, tt.args.uuid, tt.args.current, tt.args.ru) if tt.wantErr { assert.Error(t, err) return } assert.NoError(t, err) assert.Equal(t, tt.expected.updates, u.updates) }) } }