pax_global_header00006660000000000000000000000064140664315400014515gustar00rootroot0000000000000052 comment=49d1d02c49a783de548d1ba8ae8fde466a20b9e6 go-immutable-radix-1.3.1/000077500000000000000000000000001406643154000152065ustar00rootroot00000000000000go-immutable-radix-1.3.1/.circleci/000077500000000000000000000000001406643154000170415ustar00rootroot00000000000000go-immutable-radix-1.3.1/.circleci/config.yml000066400000000000000000000034661406643154000210420ustar00rootroot00000000000000version: 2.1 references: images: go: &GOLANG_IMAGE docker.mirror.hashicorp.services/circleci/golang:1.15.3 # reusable 'executor' object for jobs executors: go: docker: - image: *GOLANG_IMAGE environment: - TEST_RESULTS: /tmp/test-results # path to where test results are saved jobs: go-fmt-and-vet: executor: go steps: - checkout # Restore go module cache if there is one - restore_cache: keys: - go-immutable-radix-modcache-v1-{{ checksum "go.mod" }} - run: go mod download # Save go module cache if the go.mod file has changed - save_cache: key: go-immutable-radix-modcache-v1-{{ checksum "go.mod" }} paths: - "/go/pkg/mod" # check go fmt output because it does not report non-zero when there are fmt changes - run: name: check go fmt command: | files=$(go fmt ./...) if [ -n "$files" ]; then echo "The following file(s) do not conform to go fmt:" echo "$files" exit 1 fi - run: go vet ./... go-test: executor: go steps: - checkout - run: mkdir -p $TEST_RESULTS - restore_cache: # restore cache from dev-build job keys: - go-immutable-radix-modcache-v1-{{ checksum "go.mod" }} # run go tests with gotestsum - run: | PACKAGE_NAMES=$(go list ./...) gotestsum --format=short-verbose --junitfile $TEST_RESULTS/gotestsum-report.xml -- $PACKAGE_NAMES - store_test_results: path: /tmp/test-results - store_artifacts: path: /tmp/test-results workflows: version: 2 test-and-build: jobs: - go-fmt-and-vet - go-test: requires: - go-fmt-and-vet go-immutable-radix-1.3.1/.gitignore000066400000000000000000000004121406643154000171730ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof go-immutable-radix-1.3.1/CHANGELOG.md000066400000000000000000000010721406643154000170170ustar00rootroot00000000000000# UNRELEASED # 1.3.0 (September 17th, 2020) FEATURES * Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] # 1.2.0 (March 18th, 2020) FEATURES * Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] # 1.1.0 (May 22nd, 2019) FEATURES * Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] # 1.0.0 (August 30th, 2018) * go mod adopted go-immutable-radix-1.3.1/LICENSE000066400000000000000000000370621406643154000162230ustar00rootroot00000000000000Mozilla Public License, version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. go-immutable-radix-1.3.1/README.md000066400000000000000000000036321406643154000164710ustar00rootroot00000000000000go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) ========= Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). The package only provides a single `Tree` implementation, optimized for sparse nodes. As a radix tree, it provides the following: * O(k) operations. In many cases, this can be faster than a hash table since the hash function is an O(k) operation, and hash tables have very poor cache locality. * Minimum / Maximum value lookups * Ordered iteration A tree supports using a transaction to batch multiple updates (insert, delete) in a more efficient manner than performing each operation one at a time. For a mutable variant, see [go-radix](https://github.com/armon/go-radix). Documentation ============= The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). Example ======= Below is a simple example of usage ```go // Create a tree r := iradix.New() r, _, _ = r.Insert([]byte("foo"), 1) r, _, _ = r.Insert([]byte("bar"), 2) r, _, _ = r.Insert([]byte("foobar"), 2) // Find the longest prefix match m, _, _ := r.Root().LongestPrefix([]byte("foozip")) if string(m) != "foo" { panic("should be foo") } ``` Here is an example of performing a range scan of the keys. ```go // Create a tree r := iradix.New() r, _, _ = r.Insert([]byte("001"), 1) r, _, _ = r.Insert([]byte("002"), 2) r, _, _ = r.Insert([]byte("005"), 5) r, _, _ = r.Insert([]byte("010"), 10) r, _, _ = r.Insert([]byte("100"), 10) // Range scan over the keys that sort lexicographically between [003, 050) it := r.Root().Iterator() it.SeekLowerBound([]byte("003")) for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { if key >= "050" { break } fmt.Println(key) } // Output: // 005 // 010 ``` go-immutable-radix-1.3.1/edges.go000066400000000000000000000004131406643154000166220ustar00rootroot00000000000000package iradix import "sort" type edges []edge func (e edges) Len() int { return len(e) } func (e edges) Less(i, j int) bool { return e[i].label < e[j].label } func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e edges) Sort() { sort.Sort(e) } go-immutable-radix-1.3.1/go.mod000066400000000000000000000002111406643154000163060ustar00rootroot00000000000000module github.com/hashicorp/go-immutable-radix require ( github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/golang-lru v0.5.0 ) go-immutable-radix-1.3.1/go.sum000066400000000000000000000005441406643154000163440ustar00rootroot00000000000000github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= go-immutable-radix-1.3.1/iradix.go000066400000000000000000000452441406643154000170260ustar00rootroot00000000000000package iradix import ( "bytes" "strings" "github.com/hashicorp/golang-lru/simplelru" ) const ( // defaultModifiedCache is the default size of the modified node // cache used per transaction. This is used to cache the updates // to the nodes near the root, while the leaves do not need to be // cached. This is important for very large transactions to prevent // the modified cache from growing to be enormous. This is also used // to set the max size of the mutation notify maps since those should // also be bounded in a similar way. defaultModifiedCache = 8192 ) // Tree implements an immutable radix tree. This can be treated as a // Dictionary abstract data type. The main advantage over a standard // hash map is prefix-based lookups and ordered iteration. The immutability // means that it is safe to concurrently read from a Tree without any // coordination. type Tree struct { root *Node size int } // New returns an empty Tree func New() *Tree { t := &Tree{ root: &Node{ mutateCh: make(chan struct{}), }, } return t } // Len is used to return the number of elements in the tree func (t *Tree) Len() int { return t.size } // Txn is a transaction on the tree. This transaction is applied // atomically and returns a new tree when committed. A transaction // is not thread safe, and should only be used by a single goroutine. type Txn struct { // root is the modified root for the transaction. root *Node // snap is a snapshot of the root node for use if we have to run the // slow notify algorithm. snap *Node // size tracks the size of the tree as it is modified during the // transaction. size int // writable is a cache of writable nodes that have been created during // the course of the transaction. This allows us to re-use the same // nodes for further writes and avoid unnecessary copies of nodes that // have never been exposed outside the transaction. This will only hold // up to defaultModifiedCache number of entries. writable *simplelru.LRU // trackChannels is used to hold channels that need to be notified to // signal mutation of the tree. This will only hold up to // defaultModifiedCache number of entries, after which we will set the // trackOverflow flag, which will cause us to use a more expensive // algorithm to perform the notifications. Mutation tracking is only // performed if trackMutate is true. trackChannels map[chan struct{}]struct{} trackOverflow bool trackMutate bool } // Txn starts a new transaction that can be used to mutate the tree func (t *Tree) Txn() *Txn { txn := &Txn{ root: t.root, snap: t.root, size: t.size, } return txn } // Clone makes an independent copy of the transaction. The new transaction // does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. func (t *Txn) Clone() *Txn { // reset the writable node cache to avoid leaking future writes into the clone t.writable = nil txn := &Txn{ root: t.root, snap: t.snap, size: t.size, } return txn } // TrackMutate can be used to toggle if mutations are tracked. If this is enabled // then notifications will be issued for affected internal nodes and leaves when // the transaction is committed. func (t *Txn) TrackMutate(track bool) { t.trackMutate = track } // trackChannel safely attempts to track the given mutation channel, setting the // overflow flag if we can no longer track any more. This limits the amount of // state that will accumulate during a transaction and we have a slower algorithm // to switch to if we overflow. func (t *Txn) trackChannel(ch chan struct{}) { // In overflow, make sure we don't store any more objects. if t.trackOverflow { return } // If this would overflow the state we reject it and set the flag (since // we aren't tracking everything that's required any longer). if len(t.trackChannels) >= defaultModifiedCache { // Mark that we are in the overflow state t.trackOverflow = true // Clear the map so that the channels can be garbage collected. It is // safe to do this since we have already overflowed and will be using // the slow notify algorithm. t.trackChannels = nil return } // Create the map on the fly when we need it. if t.trackChannels == nil { t.trackChannels = make(map[chan struct{}]struct{}) } // Otherwise we are good to track it. t.trackChannels[ch] = struct{}{} } // writeNode returns a node to be modified, if the current node has already been // modified during the course of the transaction, it is used in-place. Set // forLeafUpdate to true if you are getting a write node to update the leaf, // which will set leaf mutation tracking appropriately as well. func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { // Ensure the writable set exists. if t.writable == nil { lru, err := simplelru.NewLRU(defaultModifiedCache, nil) if err != nil { panic(err) } t.writable = lru } // If this node has already been modified, we can continue to use it // during this transaction. We know that we don't need to track it for // a node update since the node is writable, but if this is for a leaf // update we track it, in case the initial write to this node didn't // update the leaf. if _, ok := t.writable.Get(n); ok { if t.trackMutate && forLeafUpdate && n.leaf != nil { t.trackChannel(n.leaf.mutateCh) } return n } // Mark this node as being mutated. if t.trackMutate { t.trackChannel(n.mutateCh) } // Mark its leaf as being mutated, if appropriate. if t.trackMutate && forLeafUpdate && n.leaf != nil { t.trackChannel(n.leaf.mutateCh) } // Copy the existing node. If you have set forLeafUpdate it will be // safe to replace this leaf with another after you get your node for // writing. You MUST replace it, because the channel associated with // this leaf will be closed when this transaction is committed. nc := &Node{ mutateCh: make(chan struct{}), leaf: n.leaf, } if n.prefix != nil { nc.prefix = make([]byte, len(n.prefix)) copy(nc.prefix, n.prefix) } if len(n.edges) != 0 { nc.edges = make([]edge, len(n.edges)) copy(nc.edges, n.edges) } // Mark this node as writable. t.writable.Add(nc, nil) return nc } // Visit all the nodes in the tree under n, and add their mutateChannels to the transaction // Returns the size of the subtree visited func (t *Txn) trackChannelsAndCount(n *Node) int { // Count only leaf nodes leaves := 0 if n.leaf != nil { leaves = 1 } // Mark this node as being mutated. if t.trackMutate { t.trackChannel(n.mutateCh) } // Mark its leaf as being mutated, if appropriate. if t.trackMutate && n.leaf != nil { t.trackChannel(n.leaf.mutateCh) } // Recurse on the children for _, e := range n.edges { leaves += t.trackChannelsAndCount(e.node) } return leaves } // mergeChild is called to collapse the given node with its child. This is only // called when the given node is not a leaf and has a single edge. func (t *Txn) mergeChild(n *Node) { // Mark the child node as being mutated since we are about to abandon // it. We don't need to mark the leaf since we are retaining it if it // is there. e := n.edges[0] child := e.node if t.trackMutate { t.trackChannel(child.mutateCh) } // Merge the nodes. n.prefix = concat(n.prefix, child.prefix) n.leaf = child.leaf if len(child.edges) != 0 { n.edges = make([]edge, len(child.edges)) copy(n.edges, child.edges) } else { n.edges = nil } } // insert does a recursive insertion func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { // Handle key exhaustion if len(search) == 0 { var oldVal interface{} didUpdate := false if n.isLeaf() { oldVal = n.leaf.val didUpdate = true } nc := t.writeNode(n, true) nc.leaf = &leafNode{ mutateCh: make(chan struct{}), key: k, val: v, } return nc, oldVal, didUpdate } // Look for the edge idx, child := n.getEdge(search[0]) // No edge, create one if child == nil { e := edge{ label: search[0], node: &Node{ mutateCh: make(chan struct{}), leaf: &leafNode{ mutateCh: make(chan struct{}), key: k, val: v, }, prefix: search, }, } nc := t.writeNode(n, false) nc.addEdge(e) return nc, nil, false } // Determine longest prefix of the search key on match commonPrefix := longestPrefix(search, child.prefix) if commonPrefix == len(child.prefix) { search = search[commonPrefix:] newChild, oldVal, didUpdate := t.insert(child, k, search, v) if newChild != nil { nc := t.writeNode(n, false) nc.edges[idx].node = newChild return nc, oldVal, didUpdate } return nil, oldVal, didUpdate } // Split the node nc := t.writeNode(n, false) splitNode := &Node{ mutateCh: make(chan struct{}), prefix: search[:commonPrefix], } nc.replaceEdge(edge{ label: search[0], node: splitNode, }) // Restore the existing child node modChild := t.writeNode(child, false) splitNode.addEdge(edge{ label: modChild.prefix[commonPrefix], node: modChild, }) modChild.prefix = modChild.prefix[commonPrefix:] // Create a new leaf node leaf := &leafNode{ mutateCh: make(chan struct{}), key: k, val: v, } // If the new key is a subset, add to to this node search = search[commonPrefix:] if len(search) == 0 { splitNode.leaf = leaf return nc, nil, false } // Create a new edge for the node splitNode.addEdge(edge{ label: search[0], node: &Node{ mutateCh: make(chan struct{}), leaf: leaf, prefix: search, }, }) return nc, nil, false } // delete does a recursive deletion func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { // Check for key exhaustion if len(search) == 0 { if !n.isLeaf() { return nil, nil } // Copy the pointer in case we are in a transaction that already // modified this node since the node will be reused. Any changes // made to the node will not affect returning the original leaf // value. oldLeaf := n.leaf // Remove the leaf node nc := t.writeNode(n, true) nc.leaf = nil // Check if this node should be merged if n != t.root && len(nc.edges) == 1 { t.mergeChild(nc) } return nc, oldLeaf } // Look for an edge label := search[0] idx, child := n.getEdge(label) if child == nil || !bytes.HasPrefix(search, child.prefix) { return nil, nil } // Consume the search prefix search = search[len(child.prefix):] newChild, leaf := t.delete(n, child, search) if newChild == nil { return nil, nil } // Copy this node. WATCH OUT - it's safe to pass "false" here because we // will only ADD a leaf via nc.mergeChild() if there isn't one due to // the !nc.isLeaf() check in the logic just below. This is pretty subtle, // so be careful if you change any of the logic here. nc := t.writeNode(n, false) // Delete the edge if the node has no edges if newChild.leaf == nil && len(newChild.edges) == 0 { nc.delEdge(label) if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { t.mergeChild(nc) } } else { nc.edges[idx].node = newChild } return nc, leaf } // delete does a recursive deletion func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { // Check for key exhaustion if len(search) == 0 { nc := t.writeNode(n, true) if n.isLeaf() { nc.leaf = nil } nc.edges = nil return nc, t.trackChannelsAndCount(n) } // Look for an edge label := search[0] idx, child := n.getEdge(label) // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix // Need to do both so that we can delete prefixes that don't correspond to any node in the tree if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { return nil, 0 } // Consume the search prefix if len(child.prefix) > len(search) { search = []byte("") } else { search = search[len(child.prefix):] } newChild, numDeletions := t.deletePrefix(n, child, search) if newChild == nil { return nil, 0 } // Copy this node. WATCH OUT - it's safe to pass "false" here because we // will only ADD a leaf via nc.mergeChild() if there isn't one due to // the !nc.isLeaf() check in the logic just below. This is pretty subtle, // so be careful if you change any of the logic here. nc := t.writeNode(n, false) // Delete the edge if the node has no edges if newChild.leaf == nil && len(newChild.edges) == 0 { nc.delEdge(label) if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { t.mergeChild(nc) } } else { nc.edges[idx].node = newChild } return nc, numDeletions } // Insert is used to add or update a given key. The return provides // the previous value and a bool indicating if any was set. func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) if newRoot != nil { t.root = newRoot } if !didUpdate { t.size++ } return oldVal, didUpdate } // Delete is used to delete a given key. Returns the old value if any, // and a bool indicating if the key was set. func (t *Txn) Delete(k []byte) (interface{}, bool) { newRoot, leaf := t.delete(nil, t.root, k) if newRoot != nil { t.root = newRoot } if leaf != nil { t.size-- return leaf.val, true } return nil, false } // DeletePrefix is used to delete an entire subtree that matches the prefix // This will delete all nodes under that prefix func (t *Txn) DeletePrefix(prefix []byte) bool { newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) if newRoot != nil { t.root = newRoot t.size = t.size - numDeletions return true } return false } // Root returns the current root of the radix tree within this // transaction. The root is not safe across insert and delete operations, // but can be used to read the current state during a transaction. func (t *Txn) Root() *Node { return t.root } // Get is used to lookup a specific key, returning // the value and if it was found func (t *Txn) Get(k []byte) (interface{}, bool) { return t.root.Get(k) } // GetWatch is used to lookup a specific key, returning // the watch channel, value and if it was found func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { return t.root.GetWatch(k) } // Commit is used to finalize the transaction and return a new tree. If mutation // tracking is turned on then notifications will also be issued. func (t *Txn) Commit() *Tree { nt := t.CommitOnly() if t.trackMutate { t.Notify() } return nt } // CommitOnly is used to finalize the transaction and return a new tree, but // does not issue any notifications until Notify is called. func (t *Txn) CommitOnly() *Tree { nt := &Tree{t.root, t.size} t.writable = nil return nt } // slowNotify does a complete comparison of the before and after trees in order // to trigger notifications. This doesn't require any additional state but it // is very expensive to compute. func (t *Txn) slowNotify() { snapIter := t.snap.rawIterator() rootIter := t.root.rawIterator() for snapIter.Front() != nil || rootIter.Front() != nil { // If we've exhausted the nodes in the old snapshot, we know // there's nothing remaining to notify. if snapIter.Front() == nil { return } snapElem := snapIter.Front() // If we've exhausted the nodes in the new root, we know we need // to invalidate everything that remains in the old snapshot. We // know from the loop condition there's something in the old // snapshot. if rootIter.Front() == nil { close(snapElem.mutateCh) if snapElem.isLeaf() { close(snapElem.leaf.mutateCh) } snapIter.Next() continue } // Do one string compare so we can check the various conditions // below without repeating the compare. cmp := strings.Compare(snapIter.Path(), rootIter.Path()) // If the snapshot is behind the root, then we must have deleted // this node during the transaction. if cmp < 0 { close(snapElem.mutateCh) if snapElem.isLeaf() { close(snapElem.leaf.mutateCh) } snapIter.Next() continue } // If the snapshot is ahead of the root, then we must have added // this node during the transaction. if cmp > 0 { rootIter.Next() continue } // If we have the same path, then we need to see if we mutated a // node and possibly the leaf. rootElem := rootIter.Front() if snapElem != rootElem { close(snapElem.mutateCh) if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { close(snapElem.leaf.mutateCh) } } snapIter.Next() rootIter.Next() } } // Notify is used along with TrackMutate to trigger notifications. This must // only be done once a transaction is committed via CommitOnly, and it is called // automatically by Commit. func (t *Txn) Notify() { if !t.trackMutate { return } // If we've overflowed the tracking state we can't use it in any way and // need to do a full tree compare. if t.trackOverflow { t.slowNotify() } else { for ch := range t.trackChannels { close(ch) } } // Clean up the tracking state so that a re-notify is safe (will trigger // the else clause above which will be a no-op). t.trackChannels = nil t.trackOverflow = false } // Insert is used to add or update a given key. The return provides // the new tree, previous value and a bool indicating if any was set. func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { txn := t.Txn() old, ok := txn.Insert(k, v) return txn.Commit(), old, ok } // Delete is used to delete a given key. Returns the new tree, // old value if any, and a bool indicating if the key was set. func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { txn := t.Txn() old, ok := txn.Delete(k) return txn.Commit(), old, ok } // DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, // and a bool indicating if the prefix matched any nodes func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { txn := t.Txn() ok := txn.DeletePrefix(k) return txn.Commit(), ok } // Root returns the root node of the tree which can be used for richer // query operations. func (t *Tree) Root() *Node { return t.root } // Get is used to lookup a specific key, returning // the value and if it was found func (t *Tree) Get(k []byte) (interface{}, bool) { return t.root.Get(k) } // longestPrefix finds the length of the shared prefix // of two strings func longestPrefix(k1, k2 []byte) int { max := len(k1) if l := len(k2); l < max { max = l } var i int for i = 0; i < max; i++ { if k1[i] != k2[i] { break } } return i } // concat two byte slices, returning a third new copy func concat(a, b []byte) []byte { c := make([]byte, len(a)+len(b)) copy(c, a) copy(c[len(a):], b) return c } go-immutable-radix-1.3.1/iradix_test.go000066400000000000000000001107461406643154000200650ustar00rootroot00000000000000package iradix import ( "fmt" "math/rand" "reflect" "sort" "testing" "testing/quick" "github.com/hashicorp/go-uuid" ) func CopyTree(t *Tree) *Tree { nt := &Tree{ root: CopyNode(t.root), size: t.size, } return nt } func CopyNode(n *Node) *Node { nn := &Node{} if n.mutateCh != nil { nn.mutateCh = n.mutateCh } if n.prefix != nil { nn.prefix = make([]byte, len(n.prefix)) copy(nn.prefix, n.prefix) } if n.leaf != nil { nn.leaf = CopyLeaf(n.leaf) } if len(n.edges) != 0 { nn.edges = make([]edge, len(n.edges)) for idx, edge := range n.edges { nn.edges[idx].label = edge.label nn.edges[idx].node = CopyNode(edge.node) } } return nn } func CopyLeaf(l *leafNode) *leafNode { ll := &leafNode{ mutateCh: l.mutateCh, key: l.key, val: l.val, } return ll } func TestRadix_HugeTxn(t *testing.T) { r := New() // Insert way more nodes than the cache can fit txn1 := r.Txn() var expect []string for i := 0; i < defaultModifiedCache*100; i++ { gen, err := uuid.GenerateUUID() if err != nil { t.Fatalf("err: %v", err) } txn1.Insert([]byte(gen), i) expect = append(expect, gen) } r = txn1.Commit() sort.Strings(expect) // Collect the output, should be sorted var out []string fn := func(k []byte, v interface{}) bool { out = append(out, string(k)) return false } r.Root().Walk(fn) // Verify the match if len(out) != len(expect) { t.Fatalf("length mis-match: %d vs %d", len(out), len(expect)) } for i := 0; i < len(out); i++ { if out[i] != expect[i] { t.Fatalf("mis-match: %v %v", out[i], expect[i]) } } } func TestRadix(t *testing.T) { var min, max string inp := make(map[string]interface{}) for i := 0; i < 1000; i++ { gen, err := uuid.GenerateUUID() if err != nil { t.Fatalf("err: %v", err) } inp[gen] = i if gen < min || i == 0 { min = gen } if gen > max || i == 0 { max = gen } } r := New() rCopy := CopyTree(r) for k, v := range inp { newR, _, _ := r.Insert([]byte(k), v) if !reflect.DeepEqual(r, rCopy) { t.Errorf("r: %#v rc: %#v", r, rCopy) t.Errorf("r: %#v rc: %#v", r.root, rCopy.root) t.Fatalf("structure modified %d", newR.Len()) } r = newR rCopy = CopyTree(r) } if r.Len() != len(inp) { t.Fatalf("bad length: %v %v", r.Len(), len(inp)) } for k, v := range inp { out, ok := r.Get([]byte(k)) if !ok { t.Fatalf("missing key: %v", k) } if out != v { t.Fatalf("value mis-match: %v %v", out, v) } } // Check min and max outMin, _, _ := r.Root().Minimum() if string(outMin) != min { t.Fatalf("bad minimum: %v %v", outMin, min) } outMax, _, _ := r.Root().Maximum() if string(outMax) != max { t.Fatalf("bad maximum: %v %v", outMax, max) } // Copy the full tree before delete orig := r origCopy := CopyTree(r) for k, v := range inp { tree, out, ok := r.Delete([]byte(k)) r = tree if !ok { t.Fatalf("missing key: %v", k) } if out != v { t.Fatalf("value mis-match: %v %v", out, v) } } if r.Len() != 0 { t.Fatalf("bad length: %v", r.Len()) } if !reflect.DeepEqual(orig, origCopy) { t.Fatalf("structure modified") } } func TestRoot(t *testing.T) { r := New() r, _, ok := r.Delete(nil) if ok { t.Fatalf("bad") } r, _, ok = r.Insert(nil, true) if ok { t.Fatalf("bad") } val, ok := r.Get(nil) if !ok || val != true { t.Fatalf("bad: %#v", val) } r, val, ok = r.Delete(nil) if !ok || val != true { t.Fatalf("bad: %v", val) } } func TestInsert_UpdateFeedback(t *testing.T) { r := New() txn1 := r.Txn() for i := 0; i < 10; i++ { var old interface{} var didUpdate bool old, didUpdate = txn1.Insert([]byte("helloworld"), i) if i == 0 { if old != nil || didUpdate { t.Fatalf("bad: %d %v %v", i, old, didUpdate) } } else { if old == nil || old.(int) != i-1 || !didUpdate { t.Fatalf("bad: %d %v %v", i, old, didUpdate) } } } } func TestDelete(t *testing.T) { r := New() s := []string{"", "A", "AB"} for _, ss := range s { r, _, _ = r.Insert([]byte(ss), true) } var ok bool for _, ss := range s { r, _, ok = r.Delete([]byte(ss)) if !ok { t.Fatalf("bad %q", ss) } } } func TestDeletePrefix(t *testing.T) { type exp struct { desc string treeNodes []string prefix string expectedOut []string } //various test cases where DeletePrefix should succeed cases := []exp{ { "prefix not a node in tree", []string{ "", "test/test1", "test/test2", "test/test3", "R", "RA"}, "test", []string{ "", "R", "RA", }, }, { "prefix matches a node in tree", []string{ "", "test", "test/test1", "test/test2", "test/test3", "test/testAAA", "R", "RA", }, "test", []string{ "", "R", "RA", }, }, { "longer prefix, but prefix is not a node in tree", []string{ "", "test/test1", "test/test2", "test/test3", "test/testAAA", "R", "RA", }, "test/test", []string{ "", "R", "RA", }, }, { "prefix only matches one node", []string{ "", "AB", "ABC", "AR", "R", "RA", }, "AR", []string{ "", "AB", "ABC", "R", "RA", }, }, } for _, testCase := range cases { t.Run(testCase.desc, func(t *testing.T) { r := New() for _, ss := range testCase.treeNodes { r, _, _ = r.Insert([]byte(ss), true) } if got, want := r.Len(), len(testCase.treeNodes); got != want { t.Fatalf("Unexpected tree length after insert, got %d want %d ", got, want) } r, ok := r.DeletePrefix([]byte(testCase.prefix)) if !ok { t.Fatalf("DeletePrefix should have returned true for tree %v, deleting prefix %v", testCase.treeNodes, testCase.prefix) } if got, want := r.Len(), len(testCase.expectedOut); got != want { t.Fatalf("Bad tree length, got %d want %d tree %v, deleting prefix %v ", got, want, testCase.treeNodes, testCase.prefix) } verifyTree(t, testCase.expectedOut, r) //Delete a non-existant node r, ok = r.DeletePrefix([]byte("CCCCC")) if ok { t.Fatalf("Expected DeletePrefix to return false ") } verifyTree(t, testCase.expectedOut, r) }) } } func TestTrackMutate_DeletePrefix(t *testing.T) { r := New() keys := []string{ "foo", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "bazbaz", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } rootWatch, _, _ := r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("Should have returned a watch") } nodeWatch1, _, _ := r.Root().GetWatch([]byte("foo/bar/baz")) if nodeWatch1 == nil { t.Fatalf("Should have returned a watch") } nodeWatch2, _, _ := r.Root().GetWatch([]byte("foo/baz/bar")) if nodeWatch2 == nil { t.Fatalf("Should have returned a watch") } nodeWatch3, _, _ := r.Root().GetWatch([]byte("foo/zip/zap")) if nodeWatch3 == nil { t.Fatalf("Should have returned a watch") } unknownNodeWatch, _, _ := r.Root().GetWatch([]byte("bazbaz")) if unknownNodeWatch == nil { t.Fatalf("Should have returned a watch") } // Verify that deleting prefixes triggers the right set of watches txn := r.Txn() txn.TrackMutate(true) ok := txn.DeletePrefix([]byte("foo")) if !ok { t.Fatalf("Expected delete prefix to return true") } if hasAnyClosedMutateCh(r) { t.Fatalf("Transaction was not committed, no channel should have been closed") } txn.Commit() // Verify that all the leaf nodes we set up watches for above get triggered from the delete prefix call select { case <-rootWatch: default: t.Fatalf("root watch was not triggered") } select { case <-nodeWatch1: default: t.Fatalf("node watch was not triggered") } select { case <-nodeWatch2: default: t.Fatalf("node watch was not triggered") } select { case <-nodeWatch3: default: t.Fatalf("node watch was not triggered") } select { case <-unknownNodeWatch: t.Fatalf("Unrelated node watch was triggered during a prefix delete") default: } } func verifyTree(t *testing.T, expected []string, r *Tree) { root := r.Root() out := []string{} fn := func(k []byte, v interface{}) bool { out = append(out, string(k)) return false } root.Walk(fn) if !reflect.DeepEqual(expected, out) { t.Fatalf("Unexpected contents of tree after delete prefix: expected %v, but got %v", expected, out) } } func TestLongestPrefix(t *testing.T) { r := New() keys := []string{ "", "foo", "foobar", "foobarbaz", "foobarbazzip", "foozip", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } type exp struct { inp string out string } cases := []exp{ {"a", ""}, {"abc", ""}, {"fo", ""}, {"foo", "foo"}, {"foob", "foo"}, {"foobar", "foobar"}, {"foobarba", "foobar"}, {"foobarbaz", "foobarbaz"}, {"foobarbazzi", "foobarbaz"}, {"foobarbazzip", "foobarbazzip"}, {"foozi", "foo"}, {"foozip", "foozip"}, {"foozipzap", "foozip"}, } root := r.Root() for _, test := range cases { m, _, ok := root.LongestPrefix([]byte(test.inp)) if !ok { t.Fatalf("no match: %v", test) } if string(m) != test.out { t.Fatalf("mis-match: %v %v", m, test) } } } func TestWalkPrefix(t *testing.T) { r := New() keys := []string{ "foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } type exp struct { inp string out []string } cases := []exp{ { "f", []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, }, { "foo", []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, }, { "foob", []string{"foobar"}, }, { "foo/", []string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, }, { "foo/b", []string{"foo/bar/baz", "foo/baz/bar"}, }, { "foo/ba", []string{"foo/bar/baz", "foo/baz/bar"}, }, { "foo/bar", []string{"foo/bar/baz"}, }, { "foo/bar/baz", []string{"foo/bar/baz"}, }, { "foo/bar/bazoo", []string{}, }, { "z", []string{"zipzap"}, }, } root := r.Root() for _, test := range cases { out := []string{} fn := func(k []byte, v interface{}) bool { out = append(out, string(k)) return false } root.WalkPrefix([]byte(test.inp), fn) sort.Strings(out) sort.Strings(test.out) if !reflect.DeepEqual(out, test.out) { t.Fatalf("mis-match: %v %v", out, test.out) } } } func TestWalkPath(t *testing.T) { r := New() keys := []string{ "foo", "foo/bar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } type exp struct { inp string out []string } cases := []exp{ { "f", []string{}, }, { "foo", []string{"foo"}, }, { "foo/", []string{"foo"}, }, { "foo/ba", []string{"foo"}, }, { "foo/bar", []string{"foo", "foo/bar"}, }, { "foo/bar/baz", []string{"foo", "foo/bar", "foo/bar/baz"}, }, { "foo/bar/bazoo", []string{"foo", "foo/bar", "foo/bar/baz"}, }, { "z", []string{}, }, } root := r.Root() for _, test := range cases { out := []string{} fn := func(k []byte, v interface{}) bool { out = append(out, string(k)) return false } root.WalkPath([]byte(test.inp), fn) sort.Strings(out) sort.Strings(test.out) if !reflect.DeepEqual(out, test.out) { t.Fatalf("mis-match: %v %v", out, test.out) } } } func TestIteratePrefix(t *testing.T) { r := New() keys := []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } type exp struct { inp string out []string } cases := []exp{ { "", keys, }, { "f", []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", }, }, { "foo", []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", }, }, { "foob", []string{"foobar"}, }, { "foo/", []string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, }, { "foo/b", []string{"foo/bar/baz", "foo/baz/bar"}, }, { "foo/ba", []string{"foo/bar/baz", "foo/baz/bar"}, }, { "foo/bar", []string{"foo/bar/baz"}, }, { "foo/bar/baz", []string{"foo/bar/baz"}, }, { "foo/bar/bazoo", []string{}, }, { "z", []string{"zipzap"}, }, } root := r.Root() for idx, test := range cases { iter := root.Iterator() if test.inp != "" { iter.SeekPrefix([]byte(test.inp)) } // Consume all the keys out := []string{} for { key, _, ok := iter.Next() if !ok { break } out = append(out, string(key)) } if !reflect.DeepEqual(out, test.out) { t.Fatalf("mis-match: %d %v %v", idx, out, test.out) } } } func TestMergeChildNilEdges(t *testing.T) { r := New() r, _, _ = r.Insert([]byte("foobar"), 42) r, _, _ = r.Insert([]byte("foozip"), 43) r, _, _ = r.Delete([]byte("foobar")) root := r.Root() out := []string{} fn := func(k []byte, v interface{}) bool { out = append(out, string(k)) return false } root.Walk(fn) expect := []string{"foozip"} sort.Strings(out) sort.Strings(expect) if !reflect.DeepEqual(out, expect) { t.Fatalf("mis-match: %v %v", out, expect) } } func TestMergeChildVisibility(t *testing.T) { r := New() r, _, _ = r.Insert([]byte("foobar"), 42) r, _, _ = r.Insert([]byte("foobaz"), 43) r, _, _ = r.Insert([]byte("foozip"), 10) txn1 := r.Txn() txn2 := r.Txn() // Ensure we get the expected value foobar and foobaz if val, ok := txn1.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := txn1.Get([]byte("foobaz")); !ok || val != 43 { t.Fatalf("bad: %v", val) } if val, ok := txn2.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := txn2.Get([]byte("foobaz")); !ok || val != 43 { t.Fatalf("bad: %v", val) } // Delete of foozip will cause a merge child between the // "foo" and "ba" nodes. if val, ok := txn2.Delete([]byte("foozip")); !ok || val != 10 { t.Fatalf("bad: %v", val) } // Insert of "foobaz" will update the slice of the "fooba" node // in-place to point to the new "foobaz" node. This in-place update // will cause the visibility of the update to leak into txn1 (prior // to the fix). if val, ok := txn2.Insert([]byte("foobaz"), 44); !ok || val != 43 { t.Fatalf("bad: %v", val) } // Ensure we get the expected value foobar and foobaz if val, ok := txn1.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := txn1.Get([]byte("foobaz")); !ok || val != 43 { t.Fatalf("bad: %v", val) } if val, ok := txn2.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := txn2.Get([]byte("foobaz")); !ok || val != 44 { t.Fatalf("bad: %v", val) } // Commit txn2 r = txn2.Commit() // Ensure we get the expected value foobar and foobaz if val, ok := txn1.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := txn1.Get([]byte("foobaz")); !ok || val != 43 { t.Fatalf("bad: %v", val) } if val, ok := r.Get([]byte("foobar")); !ok || val != 42 { t.Fatalf("bad: %v", val) } if val, ok := r.Get([]byte("foobaz")); !ok || val != 44 { t.Fatalf("bad: %v", val) } } // isClosed returns true if the given channel is closed. func isClosed(ch chan struct{}) bool { select { case <-ch: return true default: return false } } // hasAnyClosedMutateCh scans the given tree and returns true if there are any // closed mutate channels on any nodes or leaves. func hasAnyClosedMutateCh(r *Tree) bool { for iter := r.root.rawIterator(); iter.Front() != nil; iter.Next() { n := iter.Front() if isClosed(n.mutateCh) { return true } if n.isLeaf() && isClosed(n.leaf.mutateCh) { return true } } return false } func TestTrackMutate_SeekPrefixWatch(t *testing.T) { for i := 0; i < 3; i++ { r := New() keys := []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } iter := r.Root().Iterator() rootWatch := iter.SeekPrefixWatch([]byte("nope")) iter = r.Root().Iterator() parentWatch := iter.SeekPrefixWatch([]byte("foo")) iter = r.Root().Iterator() leafWatch := iter.SeekPrefixWatch([]byte("foobar")) iter = r.Root().Iterator() missingWatch := iter.SeekPrefixWatch([]byte("foobarbaz")) iter = r.Root().Iterator() otherWatch := iter.SeekPrefixWatch([]byte("foo/b")) // Write to a sub-child should trigger the leaf! txn := r.Txn() txn.TrackMutate(true) txn.Insert([]byte("foobarbaz"), nil) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Verify root and parent triggered, and leaf affected select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: default: t.Fatalf("bad") } select { case <-missingWatch: default: t.Fatalf("bad") } select { case <-otherWatch: t.Fatalf("bad") default: } iter = r.Root().Iterator() rootWatch = iter.SeekPrefixWatch([]byte("nope")) iter = r.Root().Iterator() parentWatch = iter.SeekPrefixWatch([]byte("foo")) iter = r.Root().Iterator() leafWatch = iter.SeekPrefixWatch([]byte("foobar")) iter = r.Root().Iterator() missingWatch = iter.SeekPrefixWatch([]byte("foobarbaz")) // Delete to a sub-child should trigger the leaf! txn = r.Txn() txn.TrackMutate(true) txn.Delete([]byte("foobarbaz")) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Verify root and parent triggered, and leaf affected select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: default: t.Fatalf("bad") } select { case <-missingWatch: default: t.Fatalf("bad") } select { case <-otherWatch: t.Fatalf("bad") default: } } } func TestTrackMutate_GetWatch(t *testing.T) { for i := 0; i < 3; i++ { r := New() keys := []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", "zipzap", } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } if r.Len() != len(keys) { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } rootWatch, _, ok := r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("bad") } parentWatch, _, ok := r.Root().GetWatch([]byte("foo")) if parentWatch == nil { t.Fatalf("bad") } leafWatch, _, ok := r.Root().GetWatch([]byte("foobar")) if !ok { t.Fatalf("should be found") } if leafWatch == nil { t.Fatalf("bad") } otherWatch, _, ok := r.Root().GetWatch([]byte("foo/b")) if otherWatch == nil { t.Fatalf("bad") } // Write to a sub-child should not trigger the leaf! txn := r.Txn() txn.TrackMutate(true) txn.Insert([]byte("foobarbaz"), nil) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Verify root and parent triggered, not leaf affected select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: t.Fatalf("bad") default: } select { case <-otherWatch: t.Fatalf("bad") default: } // Setup new watchers rootWatch, _, ok = r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("bad") } parentWatch, _, ok = r.Root().GetWatch([]byte("foo")) if parentWatch == nil { t.Fatalf("bad") } // Write to a exactly leaf should trigger the leaf! txn = r.Txn() txn.TrackMutate(true) txn.Insert([]byte("foobar"), nil) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: default: t.Fatalf("bad") } select { case <-otherWatch: t.Fatalf("bad") default: } // Setup all the watchers again rootWatch, _, ok = r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("bad") } parentWatch, _, ok = r.Root().GetWatch([]byte("foo")) if parentWatch == nil { t.Fatalf("bad") } leafWatch, _, ok = r.Root().GetWatch([]byte("foobar")) if !ok { t.Fatalf("should be found") } if leafWatch == nil { t.Fatalf("bad") } // Delete to a sub-child should not trigger the leaf! txn = r.Txn() txn.TrackMutate(true) txn.Delete([]byte("foobarbaz")) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Verify root and parent triggered, not leaf affected select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: t.Fatalf("bad") default: } select { case <-otherWatch: t.Fatalf("bad") default: } // Setup new watchers rootWatch, _, ok = r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("bad") } parentWatch, _, ok = r.Root().GetWatch([]byte("foo")) if parentWatch == nil { t.Fatalf("bad") } // Write to a exactly leaf should trigger the leaf! txn = r.Txn() txn.TrackMutate(true) txn.Delete([]byte("foobar")) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: default: t.Fatalf("bad") } select { case <-otherWatch: t.Fatalf("bad") default: } } } func TestTrackMutate_HugeTxn(t *testing.T) { r := New() keys := []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", "nochange", } for i := 0; i < defaultModifiedCache; i++ { key := fmt.Sprintf("aaa%d", i) r, _, _ = r.Insert([]byte(key), nil) } for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } for i := 0; i < defaultModifiedCache; i++ { key := fmt.Sprintf("zzz%d", i) r, _, _ = r.Insert([]byte(key), nil) } if r.Len() != len(keys)+2*defaultModifiedCache { t.Fatalf("bad len: %v %v", r.Len(), len(keys)) } rootWatch, _, ok := r.Root().GetWatch(nil) if rootWatch == nil { t.Fatalf("bad") } parentWatch, _, ok := r.Root().GetWatch([]byte("foo")) if parentWatch == nil { t.Fatalf("bad") } leafWatch, _, ok := r.Root().GetWatch([]byte("foobar")) if !ok { t.Fatalf("should be found") } if leafWatch == nil { t.Fatalf("bad") } nopeWatch, _, ok := r.Root().GetWatch([]byte("nochange")) if !ok { t.Fatalf("should be found") } if nopeWatch == nil { t.Fatalf("bad") } beforeWatch, _, ok := r.Root().GetWatch([]byte("aaa123")) if beforeWatch == nil { t.Fatalf("bad") } afterWatch, _, ok := r.Root().GetWatch([]byte("zzz123")) if afterWatch == nil { t.Fatalf("bad") } // Start the transaction. txn := r.Txn() txn.TrackMutate(true) // Add new nodes on both sides of the tree and delete enough nodes to // overflow the tracking. txn.Insert([]byte("aaa"), nil) for i := 0; i < defaultModifiedCache; i++ { key := fmt.Sprintf("aaa%d", i) txn.Delete([]byte(key)) } for i := 0; i < defaultModifiedCache; i++ { key := fmt.Sprintf("zzz%d", i) txn.Delete([]byte(key)) } txn.Insert([]byte("zzz"), nil) // Hit the leaf, and add a child so we make multiple mutations to the // same node. txn.Insert([]byte("foobar"), nil) txn.Insert([]byte("foobarbaz"), nil) // Commit and make sure we overflowed but didn't take on extra stuff. r = txn.CommitOnly() if !txn.trackOverflow || txn.trackChannels != nil { t.Fatalf("bad") } // Now do the trigger. txn.Notify() // Make sure no closed channels escaped the transaction. if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Verify the watches fired as expected. select { case <-rootWatch: default: t.Fatalf("bad") } select { case <-parentWatch: default: t.Fatalf("bad") } select { case <-leafWatch: default: t.Fatalf("bad") } select { case <-nopeWatch: t.Fatalf("bad") default: } select { case <-beforeWatch: default: t.Fatalf("bad") } select { case <-afterWatch: default: t.Fatalf("bad") } } func TestTrackMutate_mergeChild(t *testing.T) { // This case does a delete of the "acb" leaf, which causes the "aca" // leaf to get merged with the old "ac" node: // // [root] [root] // |a |a // [node] [node] // b/ \c b/ \c // (ab) [node] (ab) (aca) // a/ \b // (aca) (acb) // for i := 0; i < 3; i++ { r := New() r, _, _ = r.Insert([]byte("ab"), nil) r, _, _ = r.Insert([]byte("aca"), nil) r, _, _ = r.Insert([]byte("acb"), nil) snapIter := r.root.rawIterator() // Run through all notification methods as there were bugs in // both that affected these operations. The slowNotify path // would detect copied but otherwise identical leaves as changed // and wrongly close channels. The normal path would fail to // notify on a child node that had been merged. txn := r.Txn() txn.TrackMutate(true) txn.Delete([]byte("acb")) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Run through the old tree and make sure the exact channels we // expected were closed. for ; snapIter.Front() != nil; snapIter.Next() { n := snapIter.Front() path := snapIter.Path() switch path { case "", "a", "ac": // parent nodes all change if !isClosed(n.mutateCh) || n.leaf != nil { t.Fatalf("bad") } case "ab": // unrelated node / leaf sees no change if isClosed(n.mutateCh) || isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } case "aca": // this node gets merged, but the leaf doesn't change if !isClosed(n.mutateCh) || isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } case "acb": // this node / leaf gets deleted if !isClosed(n.mutateCh) || !isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } default: t.Fatalf("bad: %s", path) } } } } func TestTrackMutate_cachedNodeChange(t *testing.T) { // This case does a delete of the "acb" leaf, which causes the "aca" // leaf to get merged with the old "ac" node: // // [root] [root] // |a |a // [node] [node] // b/ \c b/ \c // (ab) [node] (ab) (aca*) <- this leaf gets modified // a/ \b post-merge // (aca) (acb) // // Then it makes a modification to the "aca" leaf on a node that will // be in the cache, so this makes sure that the leaf watch fires. for i := 0; i < 3; i++ { r := New() r, _, _ = r.Insert([]byte("ab"), nil) r, _, _ = r.Insert([]byte("aca"), nil) r, _, _ = r.Insert([]byte("acb"), nil) snapIter := r.root.rawIterator() txn := r.Txn() txn.TrackMutate(true) txn.Delete([]byte("acb")) txn.Insert([]byte("aca"), nil) switch i { case 0: r = txn.Commit() case 1: r = txn.CommitOnly() txn.Notify() default: r = txn.CommitOnly() txn.slowNotify() } if hasAnyClosedMutateCh(r) { t.Fatalf("bad") } // Run through the old tree and make sure the exact channels we // expected were closed. for ; snapIter.Front() != nil; snapIter.Next() { n := snapIter.Front() path := snapIter.Path() switch path { case "", "a", "ac": // parent nodes all change if !isClosed(n.mutateCh) || n.leaf != nil { t.Fatalf("bad") } case "ab": // unrelated node / leaf sees no change if isClosed(n.mutateCh) || isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } case "aca": // merge changes the node, then we update the leaf if !isClosed(n.mutateCh) || !isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } case "acb": // this node / leaf gets deleted if !isClosed(n.mutateCh) || !isClosed(n.leaf.mutateCh) { t.Fatalf("bad") } default: t.Fatalf("bad: %s", path) } } } } func TestLenTxn(t *testing.T) { r := New() if r.Len() != 0 { t.Fatalf("not starting with empty tree") } txn := r.Txn() keys := []string{ "foo/bar/baz", "foo/baz/bar", "foo/zip/zap", "foobar", "nochange", } for _, k := range keys { txn.Insert([]byte(k), nil) } r = txn.Commit() if r.Len() != len(keys) { t.Fatalf("bad: expected %d, got %d", len(keys), r.Len()) } txn = r.Txn() for _, k := range keys { txn.Delete([]byte(k)) } r = txn.Commit() if r.Len() != 0 { t.Fatalf("tree len should be zero, got %d", r.Len()) } } func TestIterateLowerBound(t *testing.T) { // these should be defined in order var fixedLenKeys = []string{ "00000", "00001", "00004", "00010", "00020", "20020", } // these should be defined in order var mixedLenKeys = []string{ "a1", "abc", "barbazboo", "f", "foo", "found", "zap", "zip", } type exp struct { keys []string search string want []string } cases := []exp{ { fixedLenKeys, "00000", fixedLenKeys, }, { fixedLenKeys, "00003", []string{ "00004", "00010", "00020", "20020", }, }, { fixedLenKeys, "00010", []string{ "00010", "00020", "20020", }, }, { fixedLenKeys, "20000", []string{ "20020", }, }, { fixedLenKeys, "20020", []string{ "20020", }, }, { fixedLenKeys, "20022", []string{}, }, { mixedLenKeys, "A", // before all lower case letters mixedLenKeys, }, { mixedLenKeys, "a1", mixedLenKeys, }, { mixedLenKeys, "b", []string{ "barbazboo", "f", "foo", "found", "zap", "zip", }, }, { mixedLenKeys, "bar", []string{ "barbazboo", "f", "foo", "found", "zap", "zip", }, }, { mixedLenKeys, "barbazboo0", []string{ "f", "foo", "found", "zap", "zip", }, }, { mixedLenKeys, "zippy", []string{}, }, { mixedLenKeys, "zi", []string{ "zip", }, }, // This is a case found by TestIterateLowerBoundFuzz simplified by hand. The // lowest node should be the first, but it is split on the same char as the // second char in the search string. My initial implementation didn't take // that into account (i.e. propagate the fact that we already know we are // greater than the input key into the recursion). This would skip the first // result. { []string{ "bb", "bc", }, "ac", []string{"bb", "bc"}, }, // This is a case found by TestIterateLowerBoundFuzz. { []string{"aaaba", "aabaa", "aabab", "aabcb", "aacca", "abaaa", "abacb", "abbcb", "abcaa", "abcba", "abcbb", "acaaa", "acaab", "acaac", "acaca", "acacb", "acbaa", "acbbb", "acbcc", "accca", "babaa", "babcc", "bbaaa", "bbacc", "bbbab", "bbbac", "bbbcc", "bbcab", "bbcca", "bbccc", "bcaac", "bcbca", "bcbcc", "bccac", "bccbc", "bccca", "caaab", "caacc", "cabac", "cabbb", "cabbc", "cabcb", "cacac", "cacbc", "cacca", "cbaba", "cbabb", "cbabc", "cbbaa", "cbbab", "cbbbc", "cbcbb", "cbcbc", "cbcca", "ccaaa", "ccabc", "ccaca", "ccacc", "ccbac", "cccaa", "cccac", "cccca"}, "cbacb", []string{"cbbaa", "cbbab", "cbbbc", "cbcbb", "cbcbc", "cbcca", "ccaaa", "ccabc", "ccaca", "ccacc", "ccbac", "cccaa", "cccac", "cccca"}, }, // Panic case found be TestIterateLowerBoundFuzz. { []string{"gcgc"}, "", []string{"gcgc"}, }, // We SHOULD support keys that are prefixes of each other despite some // confusion in the original implementation. { []string{"f", "fo", "foo", "food", "bug"}, "foo", []string{"foo", "food"}, }, // We also support the empty key (which is a prefix of every other key) as a // valid key to insert and search for. { []string{"f", "fo", "foo", "food", "bug", ""}, "foo", []string{"foo", "food"}, }, { []string{"f", "bug", ""}, "", []string{"", "bug", "f"}, }, { []string{"f", "bug", "xylophone"}, "", []string{"bug", "f", "xylophone"}, }, // This is a case we realized we were not covering while fixing // SeekReverseLowerBound and could panic before. { []string{"bar", "foo00", "foo11"}, "foo", []string{"foo00", "foo11"}, }, } for idx, test := range cases { t.Run(fmt.Sprintf("case%03d", idx), func(t *testing.T) { r := New() // Insert keys for _, k := range test.keys { var ok bool r, _, ok = r.Insert([]byte(k), nil) if ok { t.Fatalf("duplicate key %s in keys", k) } } if r.Len() != len(test.keys) { t.Fatal("failed adding keys") } // Get and seek iterator root := r.Root() iter := root.Iterator() iter.SeekLowerBound([]byte(test.search)) // Consume all the keys out := []string{} for { key, _, ok := iter.Next() if !ok { break } out = append(out, string(key)) } if !reflect.DeepEqual(out, test.want) { t.Fatalf("mis-match: key=%s\n got=%v\n want=%v", test.search, out, test.want) } }) } } type readableString string func (s readableString) Generate(rand *rand.Rand, size int) reflect.Value { // Pick a random string from a limited alphabet that makes it easy to read the // failure cases. const letters = "abcdefg" // Ignore size and make them all shortish to provoke bigger chance of hitting // prefixes and more intersting tree shapes. size = rand.Intn(8) b := make([]byte, size) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return reflect.ValueOf(readableString(b)) } func TestIterateLowerBoundFuzz(t *testing.T) { r := New() set := []string{} // This specifies a property where each call adds a new random key to the radix // tree. // // It also maintains a plain sorted list of the same set of keys and asserts // that iterating from some random key to the end using LowerBound produces // the same list as filtering all sorted keys that are lower. radixAddAndScan := func(newKey, searchKey readableString) []string { r, _, _ = r.Insert([]byte(newKey), nil) t.Logf("NewKey: %q, SearchKey: %q", newKey, searchKey) // Now iterate the tree from searchKey to the end it := r.Root().Iterator() result := []string{} it.SeekLowerBound([]byte(searchKey)) for { key, _, ok := it.Next() if !ok { break } result = append(result, string(key)) } return result } sliceAddSortAndFilter := func(newKey, searchKey readableString) []string { // Append the key to the set and re-sort set = append(set, string(newKey)) sort.Strings(set) t.Logf("Current Set: %#v", set) t.Logf("Search Key: %#v %v", searchKey, "" >= string(searchKey)) result := []string{} for i, k := range set { // Check this is not a duplicate of the previous value. Note we don't just // store the last string to compare because empty string is a valid value // in the set and makes comparing on the first iteration awkward. if i > 0 && set[i-1] == k { continue } if k >= string(searchKey) { result = append(result, k) } } return result } if err := quick.CheckEqual(radixAddAndScan, sliceAddSortAndFilter, nil); err != nil { t.Error(err) } } func TestClone(t *testing.T) { r := New() t1 := r.Txn() t1.Insert([]byte("foo"), 7) t2 := t1.Clone() t1.Insert([]byte("bar"), 42) t2.Insert([]byte("baz"), 43) if val, ok := t1.Get([]byte("foo")); !ok || val != 7 { t.Fatalf("bad foo in t1") } if val, ok := t2.Get([]byte("foo")); !ok || val != 7 { t.Fatalf("bad foo in t2") } if val, ok := t1.Get([]byte("bar")); !ok || val != 42 { t.Fatalf("bad bar in t1") } if _, ok := t2.Get([]byte("bar")); ok { t.Fatalf("bar found in t2") } if _, ok := t1.Get([]byte("baz")); ok { t.Fatalf("baz found in t1") } if val, ok := t2.Get([]byte("baz")); !ok || val != 43 { t.Fatalf("bad baz in t2") } } go-immutable-radix-1.3.1/iter.go000066400000000000000000000121311406643154000164760ustar00rootroot00000000000000package iradix import ( "bytes" ) // Iterator is used to iterate over a set of nodes // in pre-order type Iterator struct { node *Node stack []edges } // SeekPrefixWatch is used to seek the iterator to a given prefix // and returns the watch channel of the finest granularity func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { // Wipe the stack i.stack = nil n := i.node watch = n.mutateCh search := prefix for { // Check for key exhaustion if len(search) == 0 { i.node = n return } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { i.node = nil return } // Update to the finest granularity as the search makes progress watch = n.mutateCh // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else if bytes.HasPrefix(n.prefix, search) { i.node = n return } else { i.node = nil return } } } // SeekPrefix is used to seek the iterator to a given prefix func (i *Iterator) SeekPrefix(prefix []byte) { i.SeekPrefixWatch(prefix) } func (i *Iterator) recurseMin(n *Node) *Node { // Traverse to the minimum child if n.leaf != nil { return n } nEdges := len(n.edges) if nEdges > 1 { // Add all the other edges to the stack (the min node will be added as // we recurse) i.stack = append(i.stack, n.edges[1:]) } if nEdges > 0 { return i.recurseMin(n.edges[0].node) } // Shouldn't be possible return nil } // SeekLowerBound is used to seek the iterator to the smallest key that is // greater or equal to the given key. There is no watch variant as it's hard to // predict based on the radix structure which node(s) changes might affect the // result. func (i *Iterator) SeekLowerBound(key []byte) { // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we // go because we need only a subset of edges of many nodes in the path to the // leaf with the lower bound. Note that the iterator will still recurse into // children that we don't traverse on the way to the reverse lower bound as it // walks the stack. i.stack = []edges{} // i.node starts off in the common case as pointing to the root node of the // tree. By the time we return we have either found a lower bound and setup // the stack to traverse all larger keys, or we have not and the stack and // node should both be nil to prevent the iterator from assuming it is just // iterating the whole tree from the root node. Either way this needs to end // up as nil so just set it here. n := i.node i.node = nil search := key found := func(n *Node) { i.stack = append(i.stack, edges{edge{node: n}}) } findMin := func(n *Node) { n = i.recurseMin(n) if n != nil { found(n) return } } for { // Compare current prefix with the search key's same-length prefix. var prefixCmp int if len(n.prefix) < len(search) { prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) } else { prefixCmp = bytes.Compare(n.prefix, search) } if prefixCmp > 0 { // Prefix is larger, that means the lower bound is greater than the search // and from now on we need to follow the minimum path to the smallest // leaf under this subtree. findMin(n) return } if prefixCmp < 0 { // Prefix is smaller than search prefix, that means there is no lower // bound i.node = nil return } // Prefix is equal, we are still heading for an exact match. If this is a // leaf and an exact match we're done. if n.leaf != nil && bytes.Equal(n.leaf.key, key) { found(n) return } // Consume the search prefix if the current node has one. Note that this is // safe because if n.prefix is longer than the search slice prefixCmp would // have been > 0 above and the method would have already returned. search = search[len(n.prefix):] if len(search) == 0 { // We've exhausted the search key, but the current node is not an exact // match or not a leaf. That means that the leaf value if it exists, and // all child nodes must be strictly greater, the smallest key in this // subtree must be the lower bound. findMin(n) return } // Otherwise, take the lower bound next edge. idx, lbNode := n.getLowerBoundEdge(search[0]) if lbNode == nil { return } // Create stack edges for the all strictly higher edges in this node. if idx+1 < len(n.edges) { i.stack = append(i.stack, n.edges[idx+1:]) } // Recurse n = lbNode } } // Next returns the next node in order func (i *Iterator) Next() ([]byte, interface{}, bool) { // Initialize our stack if needed if i.stack == nil && i.node != nil { i.stack = []edges{ { edge{node: i.node}, }, } } for len(i.stack) > 0 { // Inspect the last element of the stack n := len(i.stack) last := i.stack[n-1] elem := last[0].node // Update the stack if len(last) > 1 { i.stack[n-1] = last[1:] } else { i.stack = i.stack[:n-1] } // Push the edges onto the frontier if len(elem.edges) > 0 { i.stack = append(i.stack, elem.edges) } // Return the leaf values if any if elem.leaf != nil { return elem.leaf.key, elem.leaf.val, true } } return nil, nil, false } go-immutable-radix-1.3.1/node.go000066400000000000000000000156531406643154000164740ustar00rootroot00000000000000package iradix import ( "bytes" "sort" ) // WalkFn is used when walking the tree. Takes a // key and value, returning if iteration should // be terminated. type WalkFn func(k []byte, v interface{}) bool // leafNode is used to represent a value type leafNode struct { mutateCh chan struct{} key []byte val interface{} } // edge is used to represent an edge node type edge struct { label byte node *Node } // Node is an immutable node in the radix tree type Node struct { // mutateCh is closed if this node is modified mutateCh chan struct{} // leaf is used to store possible leaf leaf *leafNode // prefix is the common prefix we ignore prefix []byte // Edges should be stored in-order for iteration. // We avoid a fully materialized slice to save memory, // since in most cases we expect to be sparse edges edges } func (n *Node) isLeaf() bool { return n.leaf != nil } func (n *Node) addEdge(e edge) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { return n.edges[i].label >= e.label }) n.edges = append(n.edges, e) if idx != num { copy(n.edges[idx+1:], n.edges[idx:num]) n.edges[idx] = e } } func (n *Node) replaceEdge(e edge) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { return n.edges[i].label >= e.label }) if idx < num && n.edges[idx].label == e.label { n.edges[idx].node = e.node return } panic("replacing missing edge") } func (n *Node) getEdge(label byte) (int, *Node) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { return n.edges[i].label >= label }) if idx < num && n.edges[idx].label == label { return idx, n.edges[idx].node } return -1, nil } func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { return n.edges[i].label >= label }) // we want lower bound behavior so return even if it's not an exact match if idx < num { return idx, n.edges[idx].node } return -1, nil } func (n *Node) delEdge(label byte) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { return n.edges[i].label >= label }) if idx < num && n.edges[idx].label == label { copy(n.edges[idx:], n.edges[idx+1:]) n.edges[len(n.edges)-1] = edge{} n.edges = n.edges[:len(n.edges)-1] } } func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { search := k watch := n.mutateCh for { // Check for key exhaustion if len(search) == 0 { if n.isLeaf() { return n.leaf.mutateCh, n.leaf.val, true } break } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { break } // Update to the finest granularity as the search makes progress watch = n.mutateCh // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else { break } } return watch, nil, false } func (n *Node) Get(k []byte) (interface{}, bool) { _, val, ok := n.GetWatch(k) return val, ok } // LongestPrefix is like Get, but instead of an // exact match, it will return the longest prefix match. func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { var last *leafNode search := k for { // Look for a leaf node if n.isLeaf() { last = n.leaf } // Check for key exhaution if len(search) == 0 { break } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { break } // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else { break } } if last != nil { return last.key, last.val, true } return nil, nil, false } // Minimum is used to return the minimum value in the tree func (n *Node) Minimum() ([]byte, interface{}, bool) { for { if n.isLeaf() { return n.leaf.key, n.leaf.val, true } if len(n.edges) > 0 { n = n.edges[0].node } else { break } } return nil, nil, false } // Maximum is used to return the maximum value in the tree func (n *Node) Maximum() ([]byte, interface{}, bool) { for { if num := len(n.edges); num > 0 { n = n.edges[num-1].node continue } if n.isLeaf() { return n.leaf.key, n.leaf.val, true } else { break } } return nil, nil, false } // Iterator is used to return an iterator at // the given node to walk the tree func (n *Node) Iterator() *Iterator { return &Iterator{node: n} } // ReverseIterator is used to return an iterator at // the given node to walk the tree backwards func (n *Node) ReverseIterator() *ReverseIterator { return NewReverseIterator(n) } // rawIterator is used to return a raw iterator at the given node to walk the // tree. func (n *Node) rawIterator() *rawIterator { iter := &rawIterator{node: n} iter.Next() return iter } // Walk is used to walk the tree func (n *Node) Walk(fn WalkFn) { recursiveWalk(n, fn) } // WalkBackwards is used to walk the tree in reverse order func (n *Node) WalkBackwards(fn WalkFn) { reverseRecursiveWalk(n, fn) } // WalkPrefix is used to walk the tree under a prefix func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { search := prefix for { // Check for key exhaution if len(search) == 0 { recursiveWalk(n, fn) return } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { break } // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else if bytes.HasPrefix(n.prefix, search) { // Child may be under our search prefix recursiveWalk(n, fn) return } else { break } } } // WalkPath is used to walk the tree, but only visiting nodes // from the root down to a given leaf. Where WalkPrefix walks // all the entries *under* the given prefix, this walks the // entries *above* the given prefix. func (n *Node) WalkPath(path []byte, fn WalkFn) { search := path for { // Visit the leaf values if any if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { return } // Check for key exhaution if len(search) == 0 { return } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { return } // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else { break } } } // recursiveWalk is used to do a pre-order walk of a node // recursively. Returns true if the walk should be aborted func recursiveWalk(n *Node, fn WalkFn) bool { // Visit the leaf values if any if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { return true } // Recurse on the children for _, e := range n.edges { if recursiveWalk(e.node, fn) { return true } } return false } // reverseRecursiveWalk is used to do a reverse pre-order // walk of a node recursively. Returns true if the walk // should be aborted func reverseRecursiveWalk(n *Node, fn WalkFn) bool { // Visit the leaf values if any if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { return true } // Recurse on the children in reverse order for i := len(n.edges) - 1; i >= 0; i-- { e := n.edges[i] if reverseRecursiveWalk(e.node, fn) { return true } } return false } go-immutable-radix-1.3.1/node_test.go000066400000000000000000000015351406643154000175250ustar00rootroot00000000000000package iradix import ( "testing" ) func TestNodeWalk(t *testing.T) { r := New() keys := []string{"001", "002", "005", "010", "100"} for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } i := 0 r.Root().Walk(func(k []byte, _ interface{}) bool { got := string(k) want := keys[i] if got != want { t.Errorf("got %s, want: %s", got, want) } i++ if i >= len(keys) { return true } return false }) } func TestNodeWalkBackwards(t *testing.T) { r := New() keys := []string{"001", "002", "005", "010", "100"} for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } i := len(keys) - 1 r.Root().WalkBackwards(func(k []byte, _ interface{}) bool { got := string(k) want := keys[i] if got != want { t.Errorf("got %s, want: %s", got, want) } i-- if i < 0 { return true } return false }) } go-immutable-radix-1.3.1/raw_iter.go000066400000000000000000000034611406643154000173550ustar00rootroot00000000000000package iradix // rawIterator visits each of the nodes in the tree, even the ones that are not // leaves. It keeps track of the effective path (what a leaf at a given node // would be called), which is useful for comparing trees. type rawIterator struct { // node is the starting node in the tree for the iterator. node *Node // stack keeps track of edges in the frontier. stack []rawStackEntry // pos is the current position of the iterator. pos *Node // path is the effective path of the current iterator position, // regardless of whether the current node is a leaf. path string } // rawStackEntry is used to keep track of the cumulative common path as well as // its associated edges in the frontier. type rawStackEntry struct { path string edges edges } // Front returns the current node that has been iterated to. func (i *rawIterator) Front() *Node { return i.pos } // Path returns the effective path of the current node, even if it's not actually // a leaf. func (i *rawIterator) Path() string { return i.path } // Next advances the iterator to the next node. func (i *rawIterator) Next() { // Initialize our stack if needed. if i.stack == nil && i.node != nil { i.stack = []rawStackEntry{ { edges: edges{ edge{node: i.node}, }, }, } } for len(i.stack) > 0 { // Inspect the last element of the stack. n := len(i.stack) last := i.stack[n-1] elem := last.edges[0].node // Update the stack. if len(last.edges) > 1 { i.stack[n-1].edges = last.edges[1:] } else { i.stack = i.stack[:n-1] } // Push the edges onto the frontier. if len(elem.edges) > 0 { path := last.path + string(elem.prefix) i.stack = append(i.stack, rawStackEntry{path, elem.edges}) } i.pos = elem i.path = last.path + string(elem.prefix) return } i.pos = nil i.path = "" } go-immutable-radix-1.3.1/reverse_iter.go000066400000000000000000000204111406643154000202310ustar00rootroot00000000000000package iradix import ( "bytes" ) // ReverseIterator is used to iterate over a set of nodes // in reverse in-order type ReverseIterator struct { i *Iterator // expandedParents stores the set of parent nodes whose relevant children have // already been pushed into the stack. This can happen during seek or during // iteration. // // Unlike forward iteration we need to recurse into children before we can // output the value stored in an internal leaf since all children are greater. // We use this to track whether we have already ensured all the children are // in the stack. expandedParents map[*Node]struct{} } // NewReverseIterator returns a new ReverseIterator at a node func NewReverseIterator(n *Node) *ReverseIterator { return &ReverseIterator{ i: &Iterator{node: n}, } } // SeekPrefixWatch is used to seek the iterator to a given prefix // and returns the watch channel of the finest granularity func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { return ri.i.SeekPrefixWatch(prefix) } // SeekPrefix is used to seek the iterator to a given prefix func (ri *ReverseIterator) SeekPrefix(prefix []byte) { ri.i.SeekPrefixWatch(prefix) } // SeekReverseLowerBound is used to seek the iterator to the largest key that is // lower or equal to the given key. There is no watch variant as it's hard to // predict based on the radix structure which node(s) changes might affect the // result. func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we // go because we need only a subset of edges of many nodes in the path to the // leaf with the lower bound. Note that the iterator will still recurse into // children that we don't traverse on the way to the reverse lower bound as it // walks the stack. ri.i.stack = []edges{} // ri.i.node starts off in the common case as pointing to the root node of the // tree. By the time we return we have either found a lower bound and setup // the stack to traverse all larger keys, or we have not and the stack and // node should both be nil to prevent the iterator from assuming it is just // iterating the whole tree from the root node. Either way this needs to end // up as nil so just set it here. n := ri.i.node ri.i.node = nil search := key if ri.expandedParents == nil { ri.expandedParents = make(map[*Node]struct{}) } found := func(n *Node) { ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) // We need to mark this node as expanded in advance too otherwise the // iterator will attempt to walk all of its children even though they are // greater than the lower bound we have found. We've expanded it in the // sense that all of its children that we want to walk are already in the // stack (i.e. none of them). ri.expandedParents[n] = struct{}{} } for { // Compare current prefix with the search key's same-length prefix. var prefixCmp int if len(n.prefix) < len(search) { prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) } else { prefixCmp = bytes.Compare(n.prefix, search) } if prefixCmp < 0 { // Prefix is smaller than search prefix, that means there is no exact // match for the search key. But we are looking in reverse, so the reverse // lower bound will be the largest leaf under this subtree, since it is // the value that would come right before the current search key if it // were in the tree. So we need to follow the maximum path in this subtree // to find it. Note that this is exactly what the iterator will already do // if it finds a node in the stack that has _not_ been marked as expanded // so in this one case we don't call `found` and instead let the iterator // do the expansion and recursion through all the children. ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) return } if prefixCmp > 0 { // Prefix is larger than search prefix, or there is no prefix but we've // also exhausted the search key. Either way, that means there is no // reverse lower bound since nothing comes before our current search // prefix. return } // If this is a leaf, something needs to happen! Note that if it's a leaf // and prefixCmp was zero (which it must be to get here) then the leaf value // is either an exact match for the search, or it's lower. It can't be // greater. if n.isLeaf() { // Firstly, if it's an exact match, we're done! if bytes.Equal(n.leaf.key, key) { found(n) return } // It's not so this node's leaf value must be lower and could still be a // valid contender for reverse lower bound. // If it has no children then we are also done. if len(n.edges) == 0 { // This leaf is the lower bound. found(n) return } // Finally, this leaf is internal (has children) so we'll keep searching, // but we need to add it to the iterator's stack since it has a leaf value // that needs to be iterated over. It needs to be added to the stack // before its children below as it comes first. ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) // We also need to mark it as expanded since we'll be adding any of its // relevant children below and so don't want the iterator to re-add them // on its way back up the stack. ri.expandedParents[n] = struct{}{} } // Consume the search prefix. Note that this is safe because if n.prefix is // longer than the search slice prefixCmp would have been > 0 above and the // method would have already returned. search = search[len(n.prefix):] if len(search) == 0 { // We've exhausted the search key but we are not at a leaf. That means all // children are greater than the search key so a reverse lower bound // doesn't exist in this subtree. Note that there might still be one in // the whole radix tree by following a different path somewhere further // up. If that's the case then the iterator's stack will contain all the // smaller nodes already and Previous will walk through them correctly. return } // Otherwise, take the lower bound next edge. idx, lbNode := n.getLowerBoundEdge(search[0]) // From here, we need to update the stack with all values lower than // the lower bound edge. Since getLowerBoundEdge() returns -1 when the // search prefix is larger than all edges, we need to place idx at the // last edge index so they can all be place in the stack, since they // come before our search prefix. if idx == -1 { idx = len(n.edges) } // Create stack edges for the all strictly lower edges in this node. if len(n.edges[:idx]) > 0 { ri.i.stack = append(ri.i.stack, n.edges[:idx]) } // Exit if there's no lower bound edge. The stack will have the previous // nodes already. if lbNode == nil { return } // Recurse n = lbNode } } // Previous returns the previous node in reverse order func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { // Initialize our stack if needed if ri.i.stack == nil && ri.i.node != nil { ri.i.stack = []edges{ { edge{node: ri.i.node}, }, } } if ri.expandedParents == nil { ri.expandedParents = make(map[*Node]struct{}) } for len(ri.i.stack) > 0 { // Inspect the last element of the stack n := len(ri.i.stack) last := ri.i.stack[n-1] m := len(last) elem := last[m-1].node _, alreadyExpanded := ri.expandedParents[elem] // If this is an internal node and we've not seen it already, we need to // leave it in the stack so we can return its possible leaf value _after_ // we've recursed through all its children. if len(elem.edges) > 0 && !alreadyExpanded { // record that we've seen this node! ri.expandedParents[elem] = struct{}{} // push child edges onto stack and skip the rest of the loop to recurse // into the largest one. ri.i.stack = append(ri.i.stack, elem.edges) continue } // Remove the node from the stack if m > 1 { ri.i.stack[n-1] = last[:m-1] } else { ri.i.stack = ri.i.stack[:n-1] } // We don't need this state any more as it's no longer in the stack so we // won't visit it again if alreadyExpanded { delete(ri.expandedParents, elem) } // If this is a leaf, return it if elem.leaf != nil { return elem.leaf.key, elem.leaf.val, true } // it's not a leaf so keep walking the stack to find the previous leaf } return nil, nil, false } go-immutable-radix-1.3.1/reverse_iter_test.go000066400000000000000000000161051406643154000212750ustar00rootroot00000000000000package iradix import ( "fmt" "reflect" "sort" "testing" "testing/quick" ) func TestReverseIterator_SeekReverseLowerBoundFuzz(t *testing.T) { r := New() set := []string{} // This specifies a property where each call adds a new random key to the radix // tree. // // It also maintains a plain sorted list of the same set of keys and asserts // that iterating from some random key to the beginning using ReverseLowerBound // produces the same list as filtering all sorted keys that are bigger. radixAddAndScan := func(newKey, searchKey readableString) []string { r, _, _ = r.Insert([]byte(newKey), nil) // Now iterate the tree from searchKey to the beginning it := r.Root().ReverseIterator() result := []string{} it.SeekReverseLowerBound([]byte(searchKey)) for { key, _, ok := it.Previous() if !ok { break } result = append(result, string(key)) } return result } sliceAddSortAndFilter := func(newKey, searchKey readableString) []string { // Append the key to the set and re-sort set = append(set, string(newKey)) sort.Strings(set) t.Logf("Current Set: %#v", set) t.Logf("Search Key: %#v %v", searchKey, "" >= string(searchKey)) result := []string{} for i := len(set) - 1; i >= 0; i-- { k := set[i] // Check this is not a duplicate of the previous value we just included. // Note we don't just store the last string to compare because empty // string is a valid value in the set and makes comparing on the first // iteration awkward. if i < len(set)-1 && set[i+1] == k { continue } if k <= string(searchKey) { result = append(result, k) } } return result } if err := quick.CheckEqual(radixAddAndScan, sliceAddSortAndFilter, nil); err != nil { t.Error(err) } } func TestReverseIterator_SeekLowerBound(t *testing.T) { // these should be defined in order var fixedLenKeys = []string{ "20020", "00020", "00010", "00004", "00001", "00000", } // these should be defined in order var mixedLenKeys = []string{ "zip", "zap", "found", "foo", "f", "barbazboo", "abc", "a1", } type exp struct { keys []string search string want []string } cases := []exp{ { fixedLenKeys, "20020", fixedLenKeys, }, { fixedLenKeys, "20000", []string{ "00020", "00010", "00004", "00001", "00000", }, }, { fixedLenKeys, "00010", []string{ "00010", "00004", "00001", "00000", }, }, { fixedLenKeys, "00000", []string{ "00000", }, }, { fixedLenKeys, "0", []string{}, }, { mixedLenKeys, "{", // after all lower case letters mixedLenKeys, }, { mixedLenKeys, "zip", mixedLenKeys, }, { mixedLenKeys, "b", []string{ "abc", "a1", }, }, { mixedLenKeys, "barbazboo0", []string{ "barbazboo", "abc", "a1", }, }, { mixedLenKeys, "a", []string{}, }, { mixedLenKeys, "a1", []string{ "a1", }, }, // We SHOULD support keys that are prefixes of each other despite some // confusion in the original implementation. { []string{"f", "fo", "foo", "food", "bug"}, "foo", []string{"foo", "fo", "f", "bug"}, }, { []string{"f", "fo", "foo", "food", "bug"}, "foozzzzzzzzzz", // larger than any key but with shared prefix []string{"food", "foo", "fo", "f", "bug"}, }, // We also support the empty key (which is a prefix of every other key) as a // valid key to insert and search for. { []string{"f", "fo", "foo", "food", "bug", ""}, "foo", []string{"foo", "fo", "f", "bug", ""}, }, { []string{"f", "bug", ""}, "", []string{""}, }, { []string{"f", "bug", "xylophone"}, "", []string{}, }, // This case could panic before. it involves a node with a shared prefix and // children where the reverse lower bound is greater than all the children { []string{"foo00", "foo11"}, "foo", []string{}, }, // When fixing the panic above the above test could pass but we need to // verify the logic is still correct in the case there was a lower bound in // another node. { []string{"bar", "foo00", "foo11"}, "foo", []string{"bar"}, }, // Found by fuzz test that hit code that wasn't covered by any other example // here. { []string{"bdgedcdc", "agcbcaba"}, "beefdafg", []string{"bdgedcdc", "agcbcaba"}, }, { []string{"", "acc", "accea", "accgbbb", "b", "bdebfc", "bdfdcbb", "becccc", "bgefcfc", "c", "cab", "cbd", "cgeaff", "cggfbcb", "cggge", "dcgbd", "ddd", "decfd", "dgb", "e", "edaffec", "ee", "eedc", "efafdbd", "eg", "egf", "egfcd", "f", "fggfdad", "g", "gageecc", "ggd"}, "adgba", []string{"accgbbb", "accea", "acc", ""}, }, } for idx, test := range cases { t.Run(fmt.Sprintf("case%03d", idx), func(t *testing.T) { r := New() // Insert keys for _, k := range test.keys { var ok bool r, _, ok = r.Insert([]byte(k), nil) if ok { t.Fatalf("duplicate key %s in keys", k) } } if r.Len() != len(test.keys) { t.Fatal("failed adding keys") } // Get and seek iterator root := r.Root() iter := root.ReverseIterator() iter.SeekReverseLowerBound([]byte(test.search)) // Consume all the keys out := []string{} for { key, _, ok := iter.Previous() if !ok { break } out = append(out, string(key)) } if !reflect.DeepEqual(out, test.want) { t.Fatalf("mis-match: key=%s\n got=%v\n want=%v", test.search, out, test.want) } }) } } func TestReverseIterator_SeekPrefix(t *testing.T) { r := New() keys := []string{"001", "002", "005", "010", "100"} for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } cases := []struct { name string prefix string expectResult bool }{ { name: "existing prefix", prefix: "005", expectResult: true, }, { name: "non-existing prefix", prefix: "2", expectResult: false, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { it := r.Root().ReverseIterator() it.SeekPrefix([]byte(c.prefix)) if c.expectResult && it.i.node == nil { t.Errorf("expexted prefix %s to exist", c.prefix) return } if !c.expectResult && it.i.node != nil { t.Errorf("unexpected node for prefix '%s'", c.prefix) return } }) } } func TestReverseIterator_SeekPrefixWatch(t *testing.T) { key := []byte("key") // Create tree r := New() r, _, _ = r.Insert(key, nil) // Find mutate channel it := r.Root().ReverseIterator() ch := it.SeekPrefixWatch(key) // Change prefix tx := r.Txn() tx.TrackMutate(true) tx.Insert(key, "value") tx.Commit() // Check if channel closed select { case <-ch: default: t.Errorf("channel not closed") } } func TestReverseIterator_Previous(t *testing.T) { r := New() keys := []string{"001", "002", "005", "010", "100"} for _, k := range keys { r, _, _ = r.Insert([]byte(k), nil) } it := r.Root().ReverseIterator() for i := len(keys) - 1; i >= 0; i-- { got, _, _ := it.Previous() want := keys[i] if string(got) != want { t.Errorf("got: %v, want: %v", got, want) } } }