pax_global_header00006660000000000000000000000064142651261510014515gustar00rootroot0000000000000052 comment=87b8a9a0b91fe0c08c676574c276df23d310af28 tensor-0.9.24/000077500000000000000000000000001426512615100131235ustar00rootroot00000000000000tensor-0.9.24/.gitgnore000066400000000000000000000000341426512615100147370ustar00rootroot00000000000000*.vscode/* .gitignore *.dot tensor-0.9.24/.github/000077500000000000000000000000001426512615100144635ustar00rootroot00000000000000tensor-0.9.24/.github/FUNDING.yml000066400000000000000000000013411426512615100162770ustar00rootroot00000000000000# These are supported funding model platforms github: [chewxy, owulveryck, dcu] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] tensor-0.9.24/.github/workflows/000077500000000000000000000000001426512615100165205ustar00rootroot00000000000000tensor-0.9.24/.github/workflows/.go.yml000066400000000000000000000055601426512615100177340ustar00rootroot00000000000000on: push: branches: [ master ] pull_request: name: test and build env: GOPROXY: "https://proxy.golang.org" CI_NO_PYTHON: "true" jobs: test: strategy: matrix: go: [1.18.x, 1.17.x, 1.16.x, 1.15.x] os: [ubuntu-latest, macos-latest, windows-latest] tags: [avx, sse] allowfail: [false] include: - go: tip os: ubuntu-latest allowfail: true runs-on: ${{ matrix.os }} continue-on-error: ${{ matrix.allowfail }} timeout-minutes: 5 steps: - name: Install Go ${{ matrix.go }} on ${{ matrix.os }} if: matrix.go != 'tip' uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} # tempoary hack: # https://github.com/actions/setup-go/issues/21#issuecomment-565704236 - name: Install Go ${{ matrix.go }} on ${{ matrix.os }} if: matrix.go == 'tip' run: | git clone --depth=1 https://go.googlesource.com/go $HOME/gotip cd $HOME/gotip/src ./make.bash echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV echo "$HOME/gotip/bin" >> $GITHUB_PATH - name: Checkout code uses: actions/checkout@v2 - name: Run tests run: | go test ./... -v -race go test ./... -race -tags=${{ matrix.tags }} coverage: env: CI_NO_PYTHON: "false" PYTHON_COMMAND: python strategy: matrix: tags: [avx, sse] runs-on: ubuntu-latest steps: - name: Install Go uses: actions/setup-go@v2 with: go-version: 1.14.x - name: Install Python uses: actions/setup-python@v2 with: python-version: '3.x' architecture: 'x64' - name: Install Pip uses: BSFishy/pip-action@v1 with: packages: numpy - name: Checkout code uses: actions/checkout@v2 - name: Calc coverage run: | export PATH=$PATH:$(go env GOPATH)/bin go test ./... -v -covermode=atomic -coverprofile=coverage.out - name: Convert coverage to lcov uses: jandelgado/gcov2lcov-action@v1.0.0 with: infile: coverage.out outfile: coverage.lcov - name: Coveralls uses: coverallsapp/github-action@v1.0.1 with: github-token: ${{ secrets.github_token }} path-to-lcov: coverage.lcov build: strategy: matrix: go: [1.13, 1.14] goos: [linux, darwin] goarch: [amd64, arm] exclude: # windows/386 and darwin/386 seems useless - goarch: "arm" goos: darwin runs-on: ubuntu-latest needs: [test] steps: - name: Install Go ${{ matrix.go }} uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} - name: Checkout code uses: actions/checkout@v2 - name: build run: go build . env: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} tensor-0.9.24/.gitignore000066400000000000000000000004331426512615100151130ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof # vendor /vendortensor-0.9.24/ALTERNATIVEDESIGNS.md000066400000000000000000000132351426512615100162440ustar00rootroot00000000000000# Alternateive Designs # This document holds the alternative designs for the various tensor data structures that had been tried in the past and why they didn't make it to the final designs. That doesn't mean that the current design is the best. It just means that the authors may not have gone far enough with these other designs. ## Single interface, multiple packages ## In this design, there is a single interface for dense tensors, which is rather similar to the one that is currently there right now: ``` type Tensor interface { Shape() Shape Strides() []int Dtype() Dtype Dims() int Size() int DataSize() int // Basic operations all tensors must support Slice(...Slice) (Tensor, error) At(...int) (interface{}, error) SetAt(v interface{}, coord ...int) error Reshape(...int) error T(axes ...int) error UT() Transpose() error // Transpose actually moves the data Apply(fn interface{}, opts ...FuncOpt) (Tensor, error) } ``` The idea is then to have subpackages for each type that would implement the `Tensor` like such: ``` // in tensor/f32 type Tensor struct { } // implements tensor.Tensor // in tensor/f64 type Tensor struct { } // implements tensor.Tensor ``` Additionally there are interfaces which defined operational types: ``` type Adder interface { Add(other Tensor) (Tensor, error) } type Number interface { Adder Suber Muler Diver } type Real interface { Number Tanher Exper } type Complex interface { Real } ``` And there are functions which operated on the `Tensor`s: ``` func Add(a, b Tensor) (Tensor, error){ if adder, ok := a.(Adder); ok { return a.Add(other) } return nil, errors.New("Cannot Add: Not an Adder") } ``` ### Pros ### It is very idiomatic Go, and no reflection was used. It is an ideal model of an abstract data type. ### Cons ### 1. Having all packages import a common "tensor/types" (which holds `*AP`, `Shape` and `Slice` definitions). 2. It'd be ideal to keep all the packages in sync in terms of the methods and functions that the subpackages export. In reality that turns out to be more difficult than expected. 3. Performance issues in hot loops: In a number of hot loops, the amount of `runtime.assertI2I2` ended up taking up a large portion of the cycles. 4. Performance issues wrt allocation of objects. Instead of a single pool, every sub pacakge would have to implement its own object pool and manage it. 5. There was a central registry of `Dtype`s, and a variant of the SQL driver pattern was used (you had to `import _ "github.com/chewxy/gorgonia/tensor/f32" to register the `Float32` Dtype). This is ugly. 6. Cross package requirements: for `Argmax` and `Argmin` related functions, it'd be nice to be able to return a `Tensor` of `int`. That meant having `tensor/i` as a core dependency in the rest of the packages. #### Workarounds #### * `Slice` is a interface. All packages that implement `tensor.Tensor` *coulc* implement their own `Slice`. But that'd be a lot of repeat work. * `AP` and `Shape` could be made interfaces, but for the latter it means dropping the ability to loop through the shape dimensions. * Keeping the packages in sync could be solved with code generation programs, but if we were to do that, we might as well merge everything into one package ### Notes for revisits ### This idea is nice. I'd personally love to revisit (and do from time to time). If we were to revisit this idea, there would have to be some changes, which I will suggest here: 1. Make `Transpose` and `T` functions that work on `Tensor` instead of making it a `Tensor`-defining method. This would be done the same way as `Stack` and `RollAxis` and `Concat`. 2. Perhaps re-weight the importance of having a inplace transpose. The in-place transpose was the result of dealing with a very large matrix when my machine didn't have enough memory. It's generally slower than reallocating a new backing array anyway. # One struct, multiple backing interfaces # In this design, we abstract away the backing array into a interface. So we'd have this: ``` type Tensor struct { *AP t Dtype data Array } type Array interface { Len() int Cap() int Get(int) interface{} Set(int, interface{}) error Map(fn interface{}) error } ``` And we'd have these types which implemented the `Array` interface: ``` type Ints []int type F32s []float64 type F64s []float32 // and so on and so forth, and each would implement Array ``` ### Pros ### * Multiple subpackages only when necessary (external, "unhandled" dtypes ) * Shared definition of `*AP`, `Shape`, `Dtype` (no more use of a common package) * Clean package structure - easier to generate code for ### Cons ### * Difficult to implement other tensor types (sparse for example) * VERY VERY slow The slowness was caused by excessive calls from `runtime.convT2E` when using `Get` and `Set` methods which for primitive types cause plenty of allocations on the heap. It was unacceptably slow for any deep learning work. #### Workarounds #### Type switch on known data types, and use slower methods for out-of-bounds data types that do not have specializations on it. This led to ugly unwieldly code, and also changes the pressure from `runtime.convT2E` to `runtime.assertI2I2`, which while performs better than having to allocate primitive values on the heap, still led to a lot of unnecessary cycles being spent on it. # Reflection + Pointers + Interfaces # This was the design that was reigning before the refactor at #127. The idea is to combine parts of the first attempt and second attempt and fill up the remaining missing bits with the use of reflections.tensor-0.9.24/CONTRIBUTING.md000066400000000000000000000055401426512615100153600ustar00rootroot00000000000000# Contributing # We want to make contributing as easy as possible. There are a number of [issues](https://github.com/chewxy/gorgonia/issues) that can be solved. Most of the issues are labelled. ## Labels ## Here's the current explanation of the labels:
LabelExpertise RequiredNotes
easyFamiliar with GoUsually there would be a link to an existing implementation that is similar
requires Linear Algebra knowledgeLinear algebra knowledge required on top of being familiar with GoLinear Algebra knowledge will go a long way in helping identify subtler bugs
no labelKnowledge about Gorgonia
help wantedVarious expertiseTypically it means the task requires extranormal speciality that the author doesn't possess
complicated-The code change will affect, and involve many files
## Steps ## 1. Fork this project on Github 2. Clone to your local drive 3. Check if there are any pending issues in the issues tracker 4. Pick an unassigned issue that you can accomplish. Comment on the issue to pick it up. 5. Work on it, using topic branches is highly recommended. ## Testing ## Testing is important ## How To Get Your Pull Request Accepted ## 1. Test, test, test. Make sure your new code doesn't break the existing tests 2. If you add new code, you must add tests. 3. `gofmt` your code 5. Atomic pull requests - solve one issue per pull request. Some issues may break down to atomic tasks. In those cases, it's OK to solve them partially. ## Git Workflow ## The master branch is considered to be the "canonical" branch. There is no develop branch. The author prefers use of topic branches. The workflow can best be described by the [Github Flow](https://guides.github.com/introduction/flow/). Please try to keep to this flow. # Development # ## How the `Tensor` Libs Are Developed ## ## Debugging ## Whilst the author encourages the use of [Delve](https://github.com/derekparker/delve), it may often be easier to log the trace using the debug loggers. Gorgonia comes with a debug build tag precisely to help with that. To build debug builds, simply do this: ```go go build -tags='debug' . ``` The debug tag enables various tracing options, available in `debug.go`. There are several debug constants that are used: * `compileDev` * `shapeInferenceDev` * `typeSystemDev` * `symdiffDev` * `autodiffDev` * `machineDev` * `stabilizationDev` * `solverDev` * `cudaDev` These are the bools that you need to set in order to get a trace. If for example, you think there is something wrong with the type system, simply set `typeSystemDev` to `true` and then insert `typeSysLogf` into wherever you want to trace. tensor-0.9.24/CONTRIBUTORS.md000066400000000000000000000015041426512615100154020ustar00rootroot00000000000000# Significant Contributors # * Xuanyi Chew (@chewxy) - initial package * Naseer Dari (@ndari) - errors and error handling * Joe Kabaka (@kabaka0) - masked array functionality * Stuart Carnie (@stuartcarnie) - performance optimization for iterators * Jorge Landivar (@docmerlin) - performance optimization for `*Dense` # Contributors * Andrew Murray | @radarhere * Ankit Raj | @aj0t * David Soller | @3ygun * Davor Kapsa | @dvrkps * James Michael DuPont | @h4ck3rm1k3 * Yuanlin Lian | @alienchow * Andrew SnodGrass | @pointlander For more contributors, check out the [github contributors page](https://github.com/gorgonia/tensor/graphs/contributors). A large number of contributors were from before the repository split off (the `tensor` repo was originally a subpackage of the `gorgonia` repository), so some bits may be missing.tensor-0.9.24/LICENCE000066400000000000000000000261231426512615100141140ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2019 Gorgonia Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tensor-0.9.24/README.md000066400000000000000000000336041426512615100144100ustar00rootroot00000000000000# Package `tensor` [![GoDoc](https://godoc.org/gorgonia.org/tensor?status.svg)](https://godoc.org/gorgonia.org/tensor) [![GitHub version](https://badge.fury.io/gh/gorgonia%2Ftensor.svg)](https://badge.fury.io/gh/gorgonia%2Ftensor) [![Build Status](https://travis-ci.org/gorgonia/tensor.svg?branch=master)](https://travis-ci.org/gorgonia/tensor) [![Coverage Status](https://coveralls.io/repos/github/gorgonia/tensor/badge.svg?branch=master)](https://coveralls.io/github/gorgonia/tensor?branch=master) [![Go Report Card](https://goreportcard.com/badge/gorgonia.org/tensor)](https://goreportcard.com/report/gorgonia.org/tensor) [![unstable](http://badges.github.io/stability-badges/dist/unstable.svg)](http://github.com/badges/stability-badges)# Package `tensor` is a package that provides efficient, generic (by some definitions of generic) n-dimensional arrays in Go. Also in this package are functions and methods that are used commonly in arithmetic, comparison and linear algebra operations. The main purpose of this package is to support the operations required by [Gorgonia](https://gorgonia.org/gorgonia). ## Introduction ## In the data analysis world, [Numpy](http://http://www.numpy.org/) and [Matlab](https://www.mathworks.com/products/matlab.html) currently reign supreme. Both tools rely heavily on having performant n-dimensional arrays, or tensors. **There is an obvious need for multidimensional arrays in Go**. While slices are cool, a large majority of scientific and numeric computing work relies heavily on matrices (two-dimensional arrays), three dimensional arrays and so on. In Go, the typical way of getting multidimensional arrays is to use something like `[][]T`. Applications that are more math heavy may opt to use the very excellent Gonum [`matrix` package](https://github.com/gonum/matrix). What then if we want to go beyond having a `float64` matrix? What if we wanted a 3-dimensional `float32` array? It comes to reason then there should be a data structure that handles these things. The `tensor` package fits in that niche. ### Basic Idea: Tensor ### A tensor is a multidimensional array. It's like a slice, but works in multiple dimensions. With slices, there are usage patterns that are repeated enough that warrant abstraction - `append`, `len`, `cap`, `range` are abstractions used to manipulate and query slices. Additionally slicing operations (`a[:1]` for example) are also abstractions provided by the language. Andrew Gerrand wrote a very good write up on [Go's slice usage and internals](https://blog.golang.org/go-slices-usage-and-internals). Tensors come with their own set of usage patterns and abstractions. Most of these have analogues in slices, enumerated below (do note that certain slice operation will have more than one tensor analogue - this is due to the number of options available): | Slice Operation | Tensor Operation | |:---------------:|:----------------:| | `len(a)` | `T.Shape()` | | `cap(a)` | `T.DataSize()` | | `a[:]` | `T.Slice(...)` | | `a[0]` | `T.At(x,y)` | | `append(a, ...)`| `T.Stack(...)`, `T.Concat(...)` | | `copy(dest, src)`| `T.CopyTo(dest)`, `tensor.Copy(dest, src)` | | `for _, v := range a` | `for i, err := iterator.Next(); err == nil; i, err = iterator.Next()` | Some operations for a tensor does not have direct analogues to slice operations. However, they stem from the same idea, and can be considered a superset of all operations common to slices. They're enumerated below: | Tensor Operation | Basic idea in slices | |:----------------:|:--------------------:| |`T.Strides()` | The stride of a slice will always be one element | |`T.Dims()` | The dimensions of a slice will always be one | |`T.Size()` | The size of a slice will always be its length | |`T.Dtype()` | The type of a slice is always known at compile time | |`T.Reshape()` | Given the shape of a slice is static, you can't really reshape a slice | |`T.T(...)` / `T.Transpose()` / `T.UT()` | No equivalent with slices | ## The Types of Tensors ## As of the current revision of this package, only dense tensors are supported. Support for sparse matrix (in form of a sparse column matrix and dictionary of keys matrix) will be coming shortly. ### Dense Tensors ### The `*Dense` tensor is the primary tensor and is represented by a singular flat array, regardless of dimensions. See the [Design of `*Dense`](#design-of-dense) section for more information. It can hold any data type. ### Compressed Sparse Column Matrix ### Documentation Coming soon ### Compressed Sparse Row Matrix ### Documentation Coming soon ## Usage ## To install: `go get -u "gorgonia.org/tensor"` To create a matrix with package `tensor` is easy: ```go // Creating a (2,2) matrix of int: a := New(WithShape(2, 2), WithBacking([]int{1, 2, 3, 4})) fmt.Printf("a:\n%v\n", a) // Output: // a: // ⎡1 2⎤ // ⎣3 4⎦ // ``` To create a 3-Tensor is just as easy - just put the correct shape and you're good to go: ```go // Creating a (2,3,4) 3-Tensor of float32 b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4)) fmt.Printf("b:\n%1.1f\n", b) // Output: // b: // ⎡ 0.0 1.0 2.0 3.0⎤ // ⎢ 4.0 5.0 6.0 7.0⎥ // ⎣ 8.0 9.0 10.0 11.0⎦ // // ⎡12.0 13.0 14.0 15.0⎤ // ⎢16.0 17.0 18.0 19.0⎥ // ⎣20.0 21.0 22.0 23.0⎦ ``` Accessing and Setting data is fairly easy. Dimensions are 0-indexed, so if you come from an R background, suck it up like I did. Be warned, this is the inefficient way if you want to do a batch access/setting: ```go // Accessing data: b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4)) x, _ := b.At(0, 1, 2) fmt.Printf("x: %v\n", x) // Setting data b.SetAt(float32(1000), 0, 1, 2) fmt.Printf("b:\n%v", b) // Output: // x: 6 // b: // ⎡ 0 1 2 3⎤ // ⎢ 4 5 1000 7⎥ // ⎣ 8 9 10 11⎦ // ⎡ 12 13 14 15⎤ // ⎢ 16 17 18 19⎥ // ⎣ 20 21 22 23⎦ ``` Bear in mind to pass in data of the correct type. This example will cause a panic: ```go // Accessing data: b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4)) x, _ := b.At(0, 1, 2) fmt.Printf("x: %v\n", x) // Setting data b.SetAt(1000, 0, 1, 2) fmt.Printf("b:\n%v", b) ``` There is a whole laundry list of methods and functions available at the [godoc](https://godoc.org/gorgonia.org/tensor) page ## Design of `*Dense` ## The design of the `*Dense` tensor is quite simple in concept. However, let's start with something more familiar. This is a visual representation of a slice in Go (taken from rsc's excellent blog post on [Go data structures](https://research.swtch.com/godata)): ![slice](https://github.com/gorgonia/tensor/blob/master/media/slice.png?raw=true) The data structure for `*Dense` is similar, but a lot more complex. Much of the complexity comes from the need to do accounting work on the data structure as well as preserving references to memory locations. This is how the `*Dense` is defined: ```go type Dense struct { *AP array e Engine // other fields elided for simplicity's sake } ``` And here's a visual representation of the `*Dense`. ![dense](https://github.com/gorgonia/tensor/blob/master/media/dense.png?raw=true) `*Dense` draws its inspiration from Go's slice. Underlying it all is a flat array, and access to elements are controlled by `*AP`. Where a Go is able to store its metadata in a 3-word structure (obviating the need to allocate memory), a `*Dense` unfortunately needs to allocate some memory. The majority of the data is stored in the `*AP` structure, which contains metadata such as shape, stride, and methods for accessing the array. `*Dense` embeds an `array` (not to be confused with Go's array), which is an abstracted data structure that looks like this: ``` type array struct { storage.Header t Dtype v interface{} } ``` `*storage.Header` is the same structure as `reflect.SliceHeader`, except it stores a `unsafe.Pointer` instead of a `uintptr`. This is done so that eventually when more tests are done to determine how the garbage collector marks data, the `v` field may be removed. The `storage.Header` field of the `array` (and hence `*Dense`) is there to provide a quick and easy way to translate back into a slice for operations that use familiar slice semantics, of which much of the operations are dependent upon. By default, `*Dense` operations try to use the language builtin slice operations by casting the `*storage.Header` field into a slice. However, to accomodate a larger subset of types, the `*Dense` operations have a fallback to using pointer arithmetic to iterate through the slices for other types with non-primitive kinds (yes, you CAN do pointer arithmetic in Go. It's slow and unsafe). The result is slower operations for types with non-primitive kinds. ### Memory Allocation ### `New()` functions as expected - it returns a pointer of `*Dense` to a array of zeroed memory. The underlying array is allocated, depending on what `ConsOpt` is passed in. With `New()`, `ConsOpt`s are used to determine the exact nature of the `*Dense`. It's a bit icky (I'd have preferred everything to have been known statically at compile time), but it works. Let's look at some examples: ``` go x := New(Of(Float64), WithShape(2,2)) // works y := New(WithShape(2,2)) // panics z := New(WithBacking([]int{1,2,3,4})) // works ``` The following will happen: * Line 1 works: This will allocate a `float64` array of size 4. * Line 2 will cause a panic. This is because the function doesn't know what to allocate - it only knows to allocate an array of *something* for the size of 4. * Line 3 will NOT fail, because the array has already been allocated (the `*Dense` reuses the same backing array as the slice passed in). Its shape will be set to `(4)`. Alternatively you may also pass in an `Engine`. If that's the case then the allocation will use the `Alloc` method of the `Engine` instead: ```go x := New(Of(Float64), WithEngine(myEngine), WithShape(2,2)) ``` The above call will use `myEngine` to allocate memory instead. This is useful in cases where you may want to manually manage your memory. ### Other failed designs ### The alternative designs can be seen in the [ALTERNATIVE DESIGNS document](https://github.com/tensor/blob/master/ALTERNATIVEDESIGNS.md) ## Generic Features ## Example: ```go x := New(WithBacking([]string{"hello", "world", "hello", "world"}), WithShape(2,2)) x = New(WithBacking([]int{1,2,3,4}), WithShape(2,2)) ``` The above code will not cause a compile error, because the structure holding the underlying array (of `string`s and then of `int`s) is a `*Dense`. One could argue that this sidesteps the compiler's type checking system, deferring it to runtime (which a number of people consider dangerous). However, tools are being developed to type check these things, and until Go does support typechecked generics, unfortunately this will be the way it has to be. Currently, the tensor package supports limited type of genericity - limited to a tensor of any primitive type. # How This Package is Developed # Much of the code in this package is generated. The code to generate them is in the directory `genlib2`. `genlib2` requires [`goimports`](https://godoc.org/golang.org/x/tools/cmd/goimports) binary to be available in the $PATH. ## Tests ## Tests require python with numpy installed. You can select which python intepreter is being used by setting the environment variable `PYTHON_COMMAND` accordingly. The default value is `python`. ## Things Knowingly Untested For ## - `complex64` and `complex128` are excluded from quick check generation process [Issue #11](https://github.com/gorgonia/tensor/issues/11) ### TODO ### * [ ] Identity optimizations for op * [ ] Zero value optimizations * [ ] fix Random() - super dodgy # How To Get Support # The best way of support right now is to open a ticket on Github. # Contributing # Obviously since you are most probably reading this on Github, Github will form the major part of the workflow for contributing to this package. See also: CONTRIBUTING.md ## Contributors and Significant Contributors ## All contributions are welcome. However, there is a new class of contributor, called Significant Contributors. A Significant Contributor is one who has shown *deep understanding* of how the library works and/or its environs. Here are examples of what constitutes a Significant Contribution: * Wrote significant amounts of documentation pertaining to **why**/the mechanics of particular functions/methods and how the different parts affect one another * Wrote code, and tests around the more intricately connected parts of Gorgonia * Wrote code and tests, and have at least 5 pull requests accepted * Provided expert analysis on parts of the package (for example, you may be a floating point operations expert who optimized one function) * Answered at least 10 support questions. Significant Contributors list will be updated once a month (if anyone even uses Gorgonia that is). # Licence # Gorgonia and the `tensor` package are licenced under a variant of Apache 2.0. It's for all intents and purposes the same as the Apache 2.0 Licence, with the exception of not being able to commercially profit directly from the package unless you're a Significant Contributor (for example, providing commercial support for the package). It's perfectly fine to profit directly from a derivative of Gorgonia (for example, if you use Gorgonia as a library in your product) Everyone is still allowed to use Gorgonia for commercial purposes (example: using it in a software for your business). ## Various Other Copyright Notices ## These are the packages and libraries which inspired and were adapted from in the process of writing Gorgonia (the Go packages that were used were already declared above): | Source | How it's Used | Licence | |------|---|-------| | Numpy | Inspired large portions. Directly adapted algorithms for a few methods (explicitly labelled in the docs) | MIT/BSD-like. [Numpy Licence](https://github.com/numpy/numpy/blob/master/LICENSE.txt) | tensor-0.9.24/ap.go000066400000000000000000000273401426512615100140600ustar00rootroot00000000000000package tensor import ( "fmt" "github.com/pkg/errors" ) // An AP is an access pattern. It tells the various ndarrays how to access their data through the use of strides // Through the AP, there are several definitions of things, most notably there are two very specific "special cases": // Scalar has Dims() of 0. // - (1) // Scalarlikes are higher order tensors, but each with a size of 1. The Dims() are not 0. // - (1, 1) // - (1, 1, 1) // - (1, 1, 1, 1), etc // Vector has Dims() of 1, but its shape can take several forms: // - (x, 1) // - (1, x) // - (x) // Matrix has Dims() of 2. This is the most basic form. The len(shape) has to be equal to 2 as well // ndarray has Dims() of n. type AP struct { shape Shape // len(shape) is the operational definition of the dimensions strides []int // strides is usually calculated from shape fin bool // is this struct change-proof? o DataOrder Δ Triangle } func makeAP(size int) AP { return AP{ shape: Shape(BorrowInts(size)), strides: BorrowInts(size), } } // MakeAP creates an AP, given the shape and strides. func MakeAP(shape Shape, strides []int, o DataOrder, Δ Triangle) AP { return AP{ shape: shape, strides: strides, o: o, Δ: Δ, fin: true, } } // Init initializes an already created AP with a shape and stries. // It will panic if AP is nil. func (ap *AP) Init(shape Shape, strides []int) { ap.shape = shape ap.strides = strides ap.fin = true } // SetShape is for very specific times when modifying the AP is necessary, such as reshaping and doing I/O related stuff // // Caveats: // // - SetShape will recalculate the strides. // // - If the AP is locked, nothing will happen func (ap *AP) SetShape(s ...int) { if !ap.fin { // scalars are a special case, we don't want to remove it completely if len(s) == 0 { if ap.shape == nil || ap.strides == nil { ap.shape = Shape{} } ap.shape = ap.shape[:0] ap.strides = ap.strides[:0] return } if ap.shape != nil { ReturnInts(ap.shape) ap.shape = nil } if ap.strides != nil { ReturnInts(ap.strides) ap.strides = nil } ap.shape = Shape(s).Clone() ap.strides = ap.calcStrides() } } // Shape returns the shape of the AP func (ap *AP) Shape() Shape { return ap.shape } // Strides returns the strides of the AP func (ap *AP) Strides() []int { return ap.strides } // Dims returns the dimensions of the shape in the AP func (ap *AP) Dims() int { return ap.shape.Dims() } // Size returns the expected array size of the shape func (ap *AP) Size() int { return ap.shape.TotalSize() } // String implements fmt.Stringer and runtime.Stringer func (ap *AP) String() string { return fmt.Sprintf("%v", ap) } // Format implements fmt.Formatter func (ap *AP) Format(state fmt.State, c rune) { fmt.Fprintf(state, "Shape: %v, Stride: %v, Lock: %t", ap.shape, ap.strides, ap.fin) } // IsVector returns whether the access pattern falls into one of three possible definitions of vectors: // vanilla vector (not a row or a col) // column vector // row vector func (ap *AP) IsVector() bool { return ap.shape.IsVector() } // IsVectorLike returns true if the shape is vector-like (i.e. the shape only has one dim that is a non-1). func (ap *AP) IsVectorLike() bool { return ap.shape.IsVectorLike() && allones(ap.strides) } // IsColVec returns true when the access pattern has the shape (x, 1) func (ap *AP) IsColVec() bool { return ap.shape.IsColVec() } // IsRowVec returns true when the access pattern has the shape (1, x) func (ap *AP) IsRowVec() bool { return ap.shape.IsRowVec() } // IsScalar returns true if the access pattern indicates it's a scalar value. func (ap *AP) IsScalar() bool { return ap.shape.IsScalar() } // IsScalarEquiv returns true if the access pattern is equivalent to a scalar shape. func (ap *AP) IsScalarEquiv() bool { return ap.shape.IsScalarEquiv() } // IsMatrix returns true if it's a matrix. This is mostly a convenience method. RowVec and ColVecs are also considered matrices func (ap *AP) IsMatrix() bool { return len(ap.shape) == 2 } // IsZero tell us if the ap has zero size func (ap *AP) IsZero() bool { return len(ap.shape) == 0 && len(ap.strides) == 0 && !ap.fin && ap.o == 0 && ap.Δ == 0 } // Zero zeros out an AP. func (ap *AP) zero() { // log.Printf("ZEROING. Called by %v", string(debug.Stack())) // Jorge's original implementation for zeroing a AP is as below // but to cater for the (*Dense).fix() method of the *Dense // a nil shape is used to signal unsetness // so we cannot just truncate the shape even though it would be a lot more efficient // ap.shape = ap.shape[:0] // ap.strides = ap.strides[:0] ReturnInts([]int(ap.shape)) ReturnInts(ap.strides) ap.zeroOnly() } // side effect free zeroing func (ap *AP) zeroOnly() { ap.shape = nil ap.strides = nil ap.fin = false ap.o = 0 ap.Δ = 0 } func (ap *AP) zeroWithDims(dims int) { //ap.shape = BorrowInts(dims) //ap.strides = BorrowInts(dims) if cap(ap.shape) >= dims { ap.shape = ap.shape[:dims] } ap.shape = BorrowInts(dims) if cap(ap.strides) >= dims { ap.strides = ap.strides[:dims] } ap.strides = BorrowInts(dims) } // Clone clones the *AP. Clearly. It returns AP func (ap *AP) Clone() (retVal AP) { retVal = makeAP(cap(ap.shape)) copy(retVal.shape, ap.shape) copy(retVal.strides, ap.strides) // handle vectors retVal.shape = retVal.shape[:len(ap.shape)] retVal.strides = retVal.strides[:len(ap.strides)] retVal.fin = ap.fin retVal.o = ap.o retVal.Δ = ap.Δ return } func (ap *AP) CloneTo(dest *AP) { dest.shape = append(dest.shape[:0], ap.shape...) dest.strides = append(dest.strides[:0], ap.strides...) dest.fin = ap.fin dest.o = ap.o dest.Δ = ap.Δ } // DataOrder returns the data order of the AP. func (ap *AP) DataOrder() DataOrder { return ap.o } // C returns true if the access pattern is C-contiguous array func (ap *AP) C() bool { return ap.o.IsRowMajor() && ap.o.IsContiguous() } // F returns true if the access pattern is Fortran contiguous array func (ap *AP) F() bool { return ap.o.IsColMajor() && ap.o.IsContiguous() } // S returns the metadata of the sliced tensor. func (ap *AP) S(size int, slices ...Slice) (newAP AP, ndStart, ndEnd int, err error) { if len(slices) > len(ap.shape) { // error err = errors.Errorf(dimMismatch, len(ap.shape), len(slices)) return } ndEnd = size newShape := ap.shape.Clone() // the new shape dims := ap.Dims() // reported dimensions newStrides := BorrowInts(dims) // the new strides var outerDim int order := ap.o if ap.o.IsRowMajor() || ap.IsVector() { outerDim = 0 } else { outerDim = len(ap.shape) - 1 } for i := 0; i < dims; i++ { var sl Slice if i <= len(slices)-1 { sl = slices[i] } size := ap.shape[i] var stride int stride = ap.strides[i] // if ap.IsVector() { // // handles non-vanilla vectors // stride = ap.strides[0] // } else { // stride = ap.strides[i] // } var start, end, step int if start, end, step, err = SliceDetails(sl, size); err != nil { err = errors.Wrapf(err, "Unable to get slice details on slice %d with size %d: %v", i, sl, size) return } // a slice where start == end is [] ndStart = ndStart + start*stride ndEnd = ndEnd - (size-end)*stride if step > 0 { if newShape[i] = (end - start) / step; (end-start)%step > 0 && i > 0 { newShape[i]++ } newStrides[i] = stride * step //fix if newShape[i] <= 0 { newShape[i] = 1 } } else { newShape[i] = (end - start) newStrides[i] = stride } if (sl != nil && (!ap.IsVector() && i != outerDim)) || step > 1 { order = MakeDataOrder(order, NonContiguous) } } if ndEnd-ndStart == 1 { // scalars are a special case newAP = AP{} newAP.SetShape() // make it a Scalar newAP.lock() } else { // drop any dimension with size 1, except the last dimension offset := 0 for d := 0; d < dims; d++ { if newShape[d] == 1 && offset+d <= len(slices)-1 && slices[offset+d] != nil /*&& d != t.dims-1 && dims > 2*/ { newShape = append(newShape[:d], newShape[d+1:]...) newStrides = append(newStrides[:d], newStrides[d+1:]...) d-- dims-- offset++ } } newAP = MakeAP(newShape, newStrides, order, ap.Δ) } return } // T returns the transposed metadata based on the given input func (ap *AP) T(axes ...int) (retVal AP, a []int, err error) { // prep axes if len(axes) > 0 && len(axes) != ap.Dims() { err = errors.Errorf(dimMismatch, ap.Dims(), len(axes)) return } dims := len(ap.shape) if len(axes) == 0 || axes == nil { axes = make([]int, dims) for i := 0; i < dims; i++ { axes[i] = dims - 1 - i } } a = axes if ap.shape.IsScalarEquiv() { return ap.Clone(), a, noopError{} } // if axes is 0, 1, 2, 3... then no op if monotonic, incr1 := IsMonotonicInts(axes); monotonic && incr1 && axes[0] == 0 { return ap.Clone(), a, noopError{} } currentShape := ap.shape currentStride := ap.strides shape := make(Shape, len(currentShape)) strides := make([]int, len(currentStride)) switch { case ap.IsScalar(): return case ap.IsVector(): if axes[0] == 0 { return } strides[0], strides[1] = 1, 1 shape[0], shape[1] = currentShape[1], currentShape[0] default: copy(shape, currentShape) copy(strides, currentStride) err = UnsafePermute(axes, shape, strides) if err != nil { err = handleNoOp(err) } } o := MakeDataOrder(ap.o, Transposed) retVal = MakeAP(shape, strides, o, ap.Δ) retVal.fin = true return } // locking and unlocking is used to ensure that the shape and stride doesn't change (it's not really safe though, as a direct mutation of the strides/shape would still mutate it, but at least the dimensions cannot change) func (ap *AP) lock() { ap.fin = true } func (ap *AP) unlock() { ap.fin = false } func (ap *AP) calcStrides() []int { switch { case ap.o.IsRowMajor(): return ap.shape.CalcStrides() case ap.o.IsColMajor(): return ap.shape.CalcStridesColMajor() } panic("unreachable") } // setDataOrder is a method such that any tensor that embeds *AP will have the same method func (ap *AP) setDataOrder(o DataOrder) { if !o.HasSameOrder(ap.o) { ap.o = ap.o.toggleColMajor() } } // TransposeIndex returns the new index given the old index func TransposeIndex(i int, oldShape, pattern, oldStrides, newStrides []int) int { oldCoord, err := Itol(i, oldShape, oldStrides) if err != nil { panic(err) // or return error? } /* coordss, _ := Permute(pattern, oldCoord) coords := coordss[0] index, _ := Ltoi(newShape, strides, coords...) */ // The above is the "conceptual" algorithm. // Too many checks above slows things down, so the below is the "optimized" edition var index int for i, axis := range pattern { index += oldCoord[axis] * newStrides[i] } return index } // UntransposeIndex returns the old index given the new index func UntransposeIndex(i int, oldShape, pattern, oldStrides, newStrides []int) int { newPattern := make([]int, len(pattern)) for i, p := range pattern { newPattern[p] = i } return TransposeIndex(i, oldShape, newPattern, oldStrides, newStrides) } // BroadcastStrides handles broadcasting from different shapes. // // Deprecated: this function will be unexported func BroadcastStrides(destShape, srcShape Shape, destStrides, srcStrides []int) (retVal []int, err error) { dims := len(destShape) start := dims - len(srcShape) if destShape.IsVector() && srcShape.IsVector() { return []int{srcStrides[0]}, nil } if start < 0 { //error err = errors.Errorf(dimMismatch, dims, len(srcShape)) return } retVal = BorrowInts(len(destStrides)) for i := dims - 1; i >= start; i-- { s := srcShape[i-start] switch { case s == 1: retVal[i] = 0 case s != destShape[i]: // error err = errors.Errorf("Cannot broadcast from %v to %v", srcShape, destShape) return default: retVal[i] = srcStrides[i-start] } } for i := 0; i < start; i++ { retVal[i] = 0 } return } tensor-0.9.24/ap_test.go000066400000000000000000000212531426512615100151140ustar00rootroot00000000000000package tensor import ( //"fmt" "testing" "github.com/stretchr/testify/assert" ) func dummyScalar1() AP { return AP{} } func dummyScalar2() AP { return AP{shape: Shape{1}} } func dummyColVec() AP { return AP{ shape: Shape{5, 1}, strides: []int{1}, } } func dummyRowVec() AP { return AP{ shape: Shape{1, 5}, strides: []int{1}, } } func dummyVec() AP { return AP{ shape: Shape{5}, strides: []int{1}, } } func twothree() AP { return AP{ shape: Shape{2, 3}, strides: []int{3, 1}, } } func twothreefour() AP { return AP{ shape: Shape{2, 3, 4}, strides: []int{12, 4, 1}, } } func TestAccessPatternBasics(t *testing.T) { assert := assert.New(t) ap := new(AP) ap.SetShape(1, 2) assert.Equal(Shape{1, 2}, ap.Shape()) assert.Equal([]int{2, 1}, ap.Strides()) assert.Equal(2, ap.Dims()) assert.Equal(2, ap.Size()) ap.SetShape(2, 3, 2) assert.Equal(Shape{2, 3, 2}, ap.Shape()) assert.Equal([]int{6, 2, 1}, ap.Strides()) assert.Equal(12, ap.Size()) ap.lock() ap.SetShape(1, 2, 3) assert.Equal(Shape{2, 3, 2}, ap.shape) assert.Equal([]int{6, 2, 1}, ap.strides) ap.unlock() ap.SetShape(1, 2) assert.Equal(Shape{1, 2}, ap.Shape()) assert.Equal([]int{2, 1}, ap.Strides()) assert.Equal(2, ap.Dims()) assert.Equal(2, ap.Size()) if ap.String() != "Shape: (1, 2), Stride: [2 1], Lock: false" { t.Errorf("AP formatting error. Got %q", ap.String()) } ap2 := ap.Clone() assert.Equal(*ap, ap2) } func TestAccessPatternIsX(t *testing.T) { assert := assert.New(t) var ap AP ap = dummyScalar1() assert.True(ap.IsScalar()) assert.True(ap.IsScalarEquiv()) assert.False(ap.IsVector()) assert.False(ap.IsColVec()) assert.False(ap.IsRowVec()) ap = dummyScalar2() assert.False(ap.IsScalar()) assert.True(ap.IsScalarEquiv()) assert.True(ap.IsVectorLike()) assert.True(ap.IsVector()) assert.False(ap.IsColVec()) assert.False(ap.IsRowVec()) ap = dummyColVec() assert.True(ap.IsColVec()) assert.True(ap.IsVector()) assert.False(ap.IsRowVec()) assert.False(ap.IsScalar()) ap = dummyRowVec() assert.True(ap.IsRowVec()) assert.True(ap.IsVector()) assert.False(ap.IsColVec()) assert.False(ap.IsScalar()) ap = twothree() assert.True(ap.IsMatrix()) assert.False(ap.IsScalar()) assert.False(ap.IsVector()) assert.False(ap.IsRowVec()) assert.False(ap.IsColVec()) } func TestAccessPatternT(t *testing.T) { assert := assert.New(t) var ap, apT AP var axes []int var err error ap = twothree() // test no axes apT, axes, err = ap.T() if err != nil { t.Error(err) } assert.Equal(Shape{3, 2}, apT.shape) assert.Equal([]int{1, 3}, apT.strides) assert.Equal([]int{1, 0}, axes) assert.Equal(2, apT.Dims()) // test no op apT, _, err = ap.T(0, 1) if err != nil { if _, ok := err.(NoOpError); !ok { t.Error(err) } } // test 3D ap = twothreefour() apT, axes, err = ap.T(2, 0, 1) if err != nil { t.Error(err) } assert.Equal(Shape{4, 2, 3}, apT.shape) assert.Equal([]int{1, 12, 4}, apT.strides) assert.Equal([]int{2, 0, 1}, axes) assert.Equal(3, apT.Dims()) // test stupid axes _, _, err = ap.T(1, 2, 3) if err == nil { t.Error("Expected an error") } } var sliceTests = []struct { name string shape Shape slices []Slice correctStart int correctEnd int correctShape Shape correctStride []int contiguous bool }{ // vectors {"a[0]", Shape{5}, []Slice{S(0)}, 0, 1, ScalarShape(), nil, true}, {"a[0:2]", Shape{5}, []Slice{S(0, 2)}, 0, 2, Shape{2}, []int{1}, true}, {"a[1:3]", Shape{5}, []Slice{S(1, 3)}, 1, 3, Shape{2}, []int{1}, true}, {"a[1:5:2]", Shape{5}, []Slice{S(1, 5, 2)}, 1, 5, Shape{2}, []int{2}, false}, // matrix {"A[0]", Shape{2, 3}, []Slice{S(0)}, 0, 3, Shape{1, 3}, []int{1}, true}, {"A[1:3]", Shape{4, 5}, []Slice{S(1, 3)}, 5, 15, Shape{2, 5}, []int{5, 1}, true}, {"A[0:10] (intentionally over)", Shape{4, 5}, []Slice{S(0, 10)}, 0, 20, Shape{4, 5}, []int{5, 1}, true}, // as if nothing happened {"A[:, 1:3]", Shape{4, 5}, []Slice{nil, S(1, 3)}, 1, 18, Shape{4, 2}, []int{5, 1}, false}, // tensor {"tensor[0, :, :]", Shape{1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil}, 0, 4, Shape{2, 2}, []int{2, 1}, true}, {"tensor[:, 0, :]", Shape{1, 2, 2}, []Slice{nil, rs{0, 1, 1}, nil}, 0, 2, Shape{1, 2}, []int{4, 1}, false}, {"tensor[0, :, :, :]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil, nil}, 0, 4, Shape{1, 2, 2}, []int{4, 2, 1}, true}, {"tensor[0,]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}}, 0, 4, Shape{1, 2, 2}, []int{4, 2, 1}, true}, } func TestAccessPatternS(t *testing.T) { assert := assert.New(t) var ap, apS AP var ndStart, ndEnd int var err error for _, sts := range sliceTests { ap = MakeAP(sts.shape, sts.shape.CalcStrides(), 0, 0) if apS, ndStart, ndEnd, err = ap.S(sts.shape.TotalSize(), sts.slices...); err != nil { t.Errorf("%v errored: %v", sts.name, err) continue } assert.Equal(sts.correctStart, ndStart, "Wrong start: %v. Want %d Got %d", sts.name, sts.correctStart, ndStart) assert.Equal(sts.correctEnd, ndEnd, "Wrong end: %v. Want %d Got %d", sts.name, sts.correctEnd, ndEnd) assert.True(sts.correctShape.Eq(apS.shape), "Wrong shape: %v. Want %v. Got %v", sts.name, sts.correctShape, apS.shape) assert.Equal(sts.correctStride, apS.strides, "Wrong strides: %v. Want %v. Got %v", sts.name, sts.correctStride, apS.strides) assert.Equal(sts.contiguous, apS.DataOrder().IsContiguous(), "Wrong contiguity for %v Want %t.", sts.name, sts.contiguous) } } func TestTransposeIndex(t *testing.T) { var newInd int var oldShape Shape var pattern, oldStrides, newStrides, corrects []int /* (2,3)->(3,2) 0, 1, 2 3, 4, 5 becomes 0, 3 1, 4 2, 5 1 -> 2 2 -> 4 3 -> 1 4 -> 3 0 and 5 stay the same */ oldShape = Shape{2, 3} pattern = []int{1, 0} oldStrides = []int{3, 1} newStrides = []int{2, 1} corrects = []int{0, 2, 4, 1, 3, 5} for i := 0; i < 6; i++ { newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } /* (2,3,4) -(1,0,2)-> (3,2,4) 0, 1, 2, 3 4, 5, 6, 7 8, 9, 10, 11 12, 13, 14, 15 16, 17, 18, 19 20, 21, 22, 23 becomes 0, 1, 2, 3 12, 13, 14, 15, 4, 5, 6, 7 16, 17, 18, 19 8, 9, 10, 11 20, 21, 22, 23 */ oldShape = Shape{2, 3, 4} pattern = []int{1, 0, 2} oldStrides = []int{12, 4, 1} newStrides = []int{8, 4, 1} corrects = []int{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23} for i := 0; i < len(corrects); i++ { newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } /* (2,3,4) -(2,0,1)-> (4,2,3) 0, 1, 2, 3 4, 5, 6, 7 8, 9, 10, 11 12, 13, 14, 15 16, 17, 18, 19 20, 21, 22, 23 becomes 0, 4, 8 12, 16, 20 1, 5, 9 13, 17, 21 2, 6, 10 14, 18, 22 3, 7, 11 15, 19, 23 */ oldShape = Shape{2, 3, 4} pattern = []int{2, 0, 1} oldStrides = []int{12, 4, 1} newStrides = []int{6, 3, 1} corrects = []int{0, 6, 12, 18, 1, 7, 13, 19, 2, 8, 14, 20, 3, 9, 15, 21, 4, 10, 16, 22, 5, 11, 17, 23} for i := 0; i < len(corrects); i++ { newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } } func TestUntransposeIndex(t *testing.T) { var newInd int var oldShape Shape var pattern, oldStrides, newStrides, corrects []int // vice versa oldShape = Shape{3, 2} oldStrides = []int{2, 1} newStrides = []int{3, 1} corrects = []int{0, 3, 1, 4, 2, 5} pattern = []int{1, 0} for i := 0; i < 6; i++ { newInd = UntransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } oldShape = Shape{3, 2, 4} oldStrides = []int{8, 4, 1} newStrides = []int{12, 4, 1} pattern = []int{1, 0, 2} corrects = []int{0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23} for i := 0; i < len(corrects); i++ { newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } oldShape = Shape{4, 2, 3} pattern = []int{2, 0, 1} newStrides = []int{12, 4, 1} oldStrides = []int{6, 3, 1} corrects = []int{0, 4, 8, 12, 16, 20} for i := 0; i < len(corrects); i++ { newInd = UntransposeIndex(i, oldShape, pattern, oldStrides, newStrides) if newInd != corrects[i] { t.Errorf("Want %d, got %d instead", corrects[i], newInd) } } } func TestBroadcastStrides(t *testing.T) { ds := Shape{4, 4} ss := Shape{4} dst := []int{4, 1} sst := []int{1} st, err := BroadcastStrides(ds, ss, dst, sst) if err != nil { t.Error(err) } t.Log(st) } tensor-0.9.24/api_arith.go000066400000000000000000000463661426512615100154310ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) // exported API for arithmetics and the stupidly crazy amount of overloaded semantics // Add performs a pointwise a+b. a and b can either be float64 or Tensor // // If both operands are Tensor, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. // // Add performs elementwise addition on the Tensor(s). These operations are supported: // Add(*Dense, scalar) // Add(scalar, *Dense) // Add(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Add(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var adder Adder var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition if oe != nil { return oe.Add(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Add(at, bt, opts...) } if adder, ok = at.Engine().(Adder); ok { return adder.Add(at, bt, opts...) } if adder, ok = bt.Engine().(Adder); ok { return adder.Add(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Add") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.AddScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.AddScalar(at, bt, leftTensor, opts...) } if adder, ok = at.Engine().(Adder); ok { return adder.AddScalar(at, bt, leftTensor, opts...) } if adder, ok = bt.Engine().(Adder); ok { return adder.AddScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Add") } default: if oe != nil { return oe.AddScalar(at, bt, true, opts...) } if adder, ok = at.Engine().(Adder); ok { return adder.AddScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Add") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.AddScalar(bt, at, false, opts...) } if adder, ok = bt.Engine().(Adder); ok { return adder.AddScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Add") default: return nil, errors.Errorf("Cannot perform Add of %T and %T", a, b) } } panic("Unreachable") } // Sub performs elementwise subtraction on the Tensor(s). These operations are supported: // Sub(*Dense, scalar) // Sub(scalar, *Dense) // Sub(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Sub(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var suber Suber var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor substraction if oe != nil { return oe.Sub(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Sub(at, bt, opts...) } if suber, ok = at.Engine().(Suber); ok { return suber.Sub(at, bt, opts...) } if suber, ok = bt.Engine().(Suber); ok { return suber.Sub(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Sub") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.SubScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.SubScalar(at, bt, leftTensor, opts...) } if suber, ok = at.Engine().(Suber); ok { return suber.SubScalar(at, bt, leftTensor, opts...) } if suber, ok = bt.Engine().(Suber); ok { return suber.SubScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Sub") } default: if oe != nil { return oe.SubScalar(at, bt, true, opts...) } if suber, ok = at.Engine().(Suber); ok { return suber.SubScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Sub") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.SubScalar(bt, at, false, opts...) } if suber, ok = bt.Engine().(Suber); ok { return suber.SubScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Sub") default: return nil, errors.Errorf("Cannot perform Sub of %T and %T", a, b) } } panic("Unreachable") } // Mul performs elementwise multiplication on the Tensor(s). These operations are supported: // Mul(*Dense, scalar) // Mul(scalar, *Dense) // Mul(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Mul(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var muler Muler var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor multiplication if oe != nil { return oe.Mul(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Mul(at, bt, opts...) } if muler, ok = at.Engine().(Muler); ok { return muler.Mul(at, bt, opts...) } if muler, ok = bt.Engine().(Muler); ok { return muler.Mul(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Mul") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.MulScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.MulScalar(at, bt, leftTensor, opts...) } if muler, ok = at.Engine().(Muler); ok { return muler.MulScalar(at, bt, leftTensor, opts...) } if muler, ok = bt.Engine().(Muler); ok { return muler.MulScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Mul") } default: // a Tensor * b interface if oe != nil { return oe.MulScalar(at, bt, true, opts...) } if muler, ok = at.Engine().(Muler); ok { return muler.MulScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Mul") } default: switch bt := b.(type) { case Tensor: // b Tensor * a interface if oe = bt.standardEngine(); oe != nil { return oe.MulScalar(bt, at, false, opts...) } if muler, ok = bt.Engine().(Muler); ok { return muler.MulScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Mul") default: // b interface * a interface return nil, errors.Errorf("Cannot perform Mul of %T and %T", a, b) } } panic("Unreachable") } // Div performs elementwise division on the Tensor(s). These operations are supported: // Div(*Dense, scalar) // Div(scalar, *Dense) // Div(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Div(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var diver Diver var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor division if oe != nil { return oe.Div(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Div(at, bt, opts...) } if diver, ok = at.Engine().(Diver); ok { return diver.Div(at, bt, opts...) } if diver, ok = bt.Engine().(Diver); ok { return diver.Div(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Div") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.DivScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.DivScalar(at, bt, leftTensor, opts...) } if diver, ok = at.Engine().(Diver); ok { return diver.DivScalar(at, bt, leftTensor, opts...) } if diver, ok = bt.Engine().(Diver); ok { return diver.DivScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Div") } default: if oe != nil { return oe.DivScalar(at, bt, true, opts...) } if diver, ok = at.Engine().(Diver); ok { return diver.DivScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Div") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.DivScalar(bt, at, false, opts...) } if diver, ok = bt.Engine().(Diver); ok { return diver.DivScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Div") default: return nil, errors.Errorf("Cannot perform Div of %T and %T", a, b) } } panic("Unreachable") } // Pow performs elementwise exponentiation on the Tensor(s). These operations are supported: // Pow(*Dense, scalar) // Pow(scalar, *Dense) // Pow(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Pow(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var power Power var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor exponentiation if oe != nil { return oe.Pow(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Pow(at, bt, opts...) } if power, ok = at.Engine().(Power); ok { return power.Pow(at, bt, opts...) } if power, ok = bt.Engine().(Power); ok { return power.Pow(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Pow") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.PowScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.PowScalar(at, bt, leftTensor, opts...) } if power, ok = at.Engine().(Power); ok { return power.PowScalar(at, bt, leftTensor, opts...) } if power, ok = bt.Engine().(Power); ok { return power.PowScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Pow") } default: if oe != nil { return oe.PowScalar(at, bt, true, opts...) } if power, ok = at.Engine().(Power); ok { return power.PowScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Pow") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.PowScalar(bt, at, false, opts...) } if power, ok = bt.Engine().(Power); ok { return power.PowScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Pow") default: return nil, errors.Errorf("Cannot perform Pow of %T and %T", a, b) } } panic("Unreachable") } // Mod performs elementwise modulo on the Tensor(s). These operations are supported: // Mod(*Dense, scalar) // Mod(scalar, *Dense) // Mod(*Dense, *Dense) // If the Unsafe flag is passed in, the data of the first tensor will be overwritten func Mod(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var moder Moder var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor modulo if oe != nil { return oe.Mod(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.Mod(at, bt, opts...) } if moder, ok = at.Engine().(Moder); ok { return moder.Mod(at, bt, opts...) } if moder, ok = bt.Engine().(Moder); ok { return moder.Mod(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support Mod") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.ModScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.ModScalar(at, bt, leftTensor, opts...) } if moder, ok = at.Engine().(Moder); ok { return moder.ModScalar(at, bt, leftTensor, opts...) } if moder, ok = bt.Engine().(Moder); ok { return moder.ModScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support Mod") } default: if oe != nil { return oe.ModScalar(at, bt, true, opts...) } if moder, ok = at.Engine().(Moder); ok { return moder.ModScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support Mod") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.ModScalar(bt, at, false, opts...) } if moder, ok = bt.Engine().(Moder); ok { return moder.ModScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support Mod") default: return nil, errors.Errorf("Cannot perform Mod of %T and %T", a, b) } } panic("Unreachable") } // Dot is a highly opinionated API for performing dot product operations on two *Denses, a and b. // This function is opinionated with regard to the vector operations because of how it treats operations with vectors. // Vectors in this package comes in two flavours - column or row vectors. Column vectors have shape (x, 1), while row vectors have shape (1, x). // // As such, it is easy to assume that performing a linalg operation on vectors would follow the same rules (i.e shapes have to be aligned for things to work). // For the most part in this package, this is true. This function is one of the few notable exceptions. // // Here I give three specific examples of how the expectations of vector operations will differ. // Given two vectors, a, b with shapes (4, 1) and (4, 1), Dot() will perform an inner product as if the shapes were (1, 4) and (4, 1). This will result in a scalar value // Given matrix A and vector b with shapes (2, 4) and (1, 4), Dot() will perform a matrix-vector multiplication as if the shapes were (2,4) and (4,1). This will result in a column vector with shape (2,1) // Given vector a and matrix B with shapes (3, 1) and (3, 2), Dot() will perform a matrix-vector multiplication as if it were Bᵀ * a // // The main reason why this opinionated route was taken was due to the author's familiarity with NumPy, and general laziness in translating existing machine learning algorithms // to fit the API of the package. func Dot(x, y Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if xdottir, ok := x.Engine().(Dotter); ok { return xdottir.Dot(x, y, opts...) } if ydottir, ok := y.Engine().(Dotter); ok { return ydottir.Dot(x, y, opts...) } return nil, errors.New("Neither x's nor y's engines support Dot") } // FMA performs Y = A * X + Y. func FMA(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) { if xTensor, ok := x.(Tensor); ok { if oe := a.standardEngine(); oe != nil { return oe.FMA(a, xTensor, y) } if oe := xTensor.standardEngine(); oe != nil { return oe.FMA(a, xTensor, y) } if oe := y.standardEngine(); oe != nil { return oe.FMA(a, xTensor, y) } if e, ok := a.Engine().(FMAer); ok { return e.FMA(a, xTensor, y) } if e, ok := xTensor.Engine().(FMAer); ok { return e.FMA(a, xTensor, y) } if e, ok := y.Engine().(FMAer); ok { return e.FMA(a, xTensor, y) } } else { if oe := a.standardEngine(); oe != nil { return oe.FMAScalar(a, x, y) } if oe := y.standardEngine(); oe != nil { return oe.FMAScalar(a, x, y) } if e, ok := a.Engine().(FMAer); ok { return e.FMAScalar(a, x, y) } if e, ok := y.Engine().(FMAer); ok { return e.FMAScalar(a, x, y) } } return Mul(a, x, WithIncr(y)) } // MatMul performs matrix-matrix multiplication between two Tensors func MatMul(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if a.Dtype() != b.Dtype() { err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype()) return } switch at := a.(type) { case *Dense: bt := b.(*Dense) return at.MatMul(bt, opts...) } panic("Unreachable") } // MatVecMul performs matrix-vector multiplication between two Tensors. `a` is expected to be a matrix, and `b` is expected to be a vector func MatVecMul(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if a.Dtype() != b.Dtype() { err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype()) return } switch at := a.(type) { case *Dense: bt := b.(*Dense) return at.MatVecMul(bt, opts...) } panic("Unreachable") } // Inner finds the inner products of two vector Tensors. Both arguments to the functions are eexpected to be vectors. func Inner(a, b Tensor) (retVal interface{}, err error) { if a.Dtype() != b.Dtype() { err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype()) return } switch at := a.(type) { case *Dense: bt := b.(*Dense) return at.Inner(bt) } panic("Unreachable") } // Outer performs the outer product of two vector Tensors. Both arguments to the functions are expected to be vectors. func Outer(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if a.Dtype() != b.Dtype() { err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype()) return } switch at := a.(type) { case *Dense: bt := b.(*Dense) return at.Outer(bt, opts...) } panic("Unreachable") } // Contract performs a contraction of given tensors along given axes func Contract(a, b Tensor, aAxes, bAxes []int) (retVal Tensor, err error) { if a.Dtype() != b.Dtype() { err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype()) return } switch at := a.(type) { case *Dense: bt := b.(*Dense) return at.TensorMul(bt, aAxes, bAxes) default: panic("Unreachable") } } tensor-0.9.24/api_arith_generated_test.go000066400000000000000000001300331426512615100204670ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" "testing/quick" ) func TestAdd(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := Add(a, b) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestSub(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := Sub(a, b) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestMul(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := Mul(a, b) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDiv(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := Div(a, b) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestPow(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := Pow(a, b) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestAdd_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := Add(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestSub_unsafe(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestMul_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDiv_unsafe(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := Div(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestPow_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := Pow(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestAdd_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := Add(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestSub_reuse(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestMul_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDiv_reuse(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := Div(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestPow_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := Pow(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestAdd_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := Add(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestSub_incr(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestMul_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDiv_incr(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := Div(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestPow_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := Pow(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestAddScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(a, b) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(b, a) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Add(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Add (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Add(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Add (tensor as right, scalar as left) failed: %v", err) } } func TestSubScalar(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(a, b) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(b, a) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Sub(b, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Sub(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Sub (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Sub(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Sub (tensor as right, scalar as left) failed: %v", err) } } func TestMulScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(a, b) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(b, a) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Mul(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Mul (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Mul(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Mul (tensor as right, scalar as left) failed: %v", err) } } func TestDivScalar(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := Div(a, b) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Div(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Div (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Div(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Div (tensor as right, scalar as left) failed: %v", err) } } func TestPowScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := Pow(a, b) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Pow(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Pow (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Pow(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Pow (tensor as right, scalar as left) failed: %v", err) } } func TestAddScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(b, a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } } func TestSubScalar_unsafe(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(b, a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Sub(b, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } } func TestMulScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(b, a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } } func TestDivScalar_unsafe(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := Div(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } } func TestPowScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := Pow(a, b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } func TestAddScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(b, a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } } func TestSubScalar_reuse(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(b, a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Sub(b, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } } func TestMulScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(b, a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } } func TestDivScalar_reuse(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := Div(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } } func TestPowScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := Pow(a, b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } func TestAddScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := Add(b, a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } } func TestSubScalar_incr(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := Sub(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Add(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } } func TestMulScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := Mul(b, a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } } func TestDivScalar_incr(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := Div(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } } func TestPowScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := Pow(a, b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } tensor-0.9.24/api_arith_test.go000066400000000000000000000274571426512615100164700ustar00rootroot00000000000000package tensor import ( "log" "math/rand" "testing" "testing/quick" "time" "github.com/stretchr/testify/assert" ) // This file contains the tests for API functions that aren't generated by genlib func TestMod(t *testing.T) { a := New(WithBacking([]float64{1, 2, 3, 4})) b := New(WithBacking([]float64{1, 1, 1, 1})) var correct interface{} = []float64{0, 0, 0, 0} // vec-vec res, err := Mod(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar if res, err = Mod(a, 1.0); err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestFMA(t *testing.T) { same := func(q *Dense) bool { a := q.Clone().(*Dense) x := q.Clone().(*Dense) y := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...)) y.Memset(identityVal(100, q.Dtype())) WithEngine(q.Engine())(y) y2 := y.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok1 := q.Engine().(FMAer) _, ok2 := q.Engine().(Muler) _, ok3 := q.Engine().(Adder) we = we || (!ok1 && (!ok2 || !ok3)) f, err := FMA(a, x, y) if err, retEarly := qcErrCheck(t, "FMA#1", a, x, we, err); retEarly { if err != nil { log.Printf("q.Engine() %T", q.Engine()) return false } return true } we, _ = willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok wi, err := Mul(a, x, WithIncr(y2)) if err, retEarly := qcErrCheck(t, "FMA#2", a, x, we, err); retEarly { if err != nil { return false } return true } return qcEqCheck(t, q.Dtype(), willFailEq, wi, f) } r := rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(same, &quick.Config{Rand: r}); err != nil { t.Error(err) } // specific engines var eng Engine // FLOAT64 ENGINE // vec-vec eng = Float64Engine{} a := New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) x := New(WithBacking(Range(Float64, 1, 101)), WithEngine(eng)) y := New(Of(Float64), WithShape(100), WithEngine(eng)) f, err := FMA(a, x, y) if err != nil { t.Fatal(err) } a2 := New(WithBacking(Range(Float64, 0, 100))) x2 := New(WithBacking(Range(Float64, 1, 101))) y2 := New(Of(Float64), WithShape(100)) f2, err := Mul(a2, x2, WithIncr(y2)) if err != nil { t.Fatal(err) } assert.Equal(t, f.Data(), f2.Data()) // vec-scalar a = New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng)) y = New(Of(Float64), WithShape(100)) if f, err = FMA(a, 2.0, y); err != nil { t.Fatal(err) } a2 = New(WithBacking(Range(Float64, 0, 100))) y2 = New(Of(Float64), WithShape(100)) if f2, err = Mul(a2, 2.0, WithIncr(y2)); err != nil { t.Fatal(err) } assert.Equal(t, f.Data(), f2.Data()) // FLOAT32 engine eng = Float32Engine{} a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) x = New(WithBacking(Range(Float32, 1, 101)), WithEngine(eng)) y = New(Of(Float32), WithShape(100), WithEngine(eng)) f, err = FMA(a, x, y) if err != nil { t.Fatal(err) } a2 = New(WithBacking(Range(Float32, 0, 100))) x2 = New(WithBacking(Range(Float32, 1, 101))) y2 = New(Of(Float32), WithShape(100)) f2, err = Mul(a2, x2, WithIncr(y2)) if err != nil { t.Fatal(err) } assert.Equal(t, f.Data(), f2.Data()) // vec-scalar a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng)) y = New(Of(Float32), WithShape(100)) if f, err = FMA(a, float32(2), y); err != nil { t.Fatal(err) } a2 = New(WithBacking(Range(Float32, 0, 100))) y2 = New(Of(Float32), WithShape(100)) if f2, err = Mul(a2, float32(2), WithIncr(y2)); err != nil { t.Fatal(err) } assert.Equal(t, f.Data(), f2.Data()) } func TestMulScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{2})) b := New(WithBacking([]float64{3})) var correct interface{} = 6.0 res, err := Mul(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Mul(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{3, 2})) b = New(WithBacking([]float64{2})) correct = []float64{6, 4} res, err = Mul(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Mul(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{3, 5})) b = New(WithBacking([]float64{7, 2})) correct = []float64{21, 10} res, err = Mul(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Mul(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Interface - tensor ai := 2.0 b = NewDense(Float64, Shape{1, 1}, WithBacking([]float64{3})) correct = []float64{6.0} res, err = Mul(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Commutativity res, err = Mul(b, ai) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestDivScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = 3.0 res, err := Div(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{6, 4})) b = New(WithBacking([]float64{2})) correct = []float64{3, 2} res, err = Div(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{6})) b = New(WithBacking([]float64{3, 2})) correct = []float64{2, 3} res, err = Div(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 10})) b = New(WithBacking([]float64{7, 2})) correct = []float64{3, 5} res, err = Div(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // interface-scalar ai := 6.0 b = New(WithBacking([]float64{2})) correct = 3.0 res, err = Div(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-interface a = New(WithBacking([]float64{6})) bi := 2.0 correct = 3.0 res, err = Div(a, bi) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestAddScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{2})) b := New(WithBacking([]float64{3})) var correct interface{} = 5.0 res, err := Add(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Add(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{3, 2})) b = New(WithBacking([]float64{2})) correct = []float64{5, 4} res, err = Add(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Add(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{3, 5})) b = New(WithBacking([]float64{7, 2})) correct = []float64{10, 7} res, err = Add(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Add(b, a) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // interface-scalar ai := 2.0 b = New(WithBacking([]float64{3})) correct = 5.0 res, err = Add(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // Test commutativity res, err = Add(b, ai) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestSubScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = 4.0 res, err := Sub(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{6, 4})) b = New(WithBacking([]float64{2})) correct = []float64{4, 2} res, err = Sub(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{6})) b = New(WithBacking([]float64{3, 2})) correct = []float64{3, 4} res, err = Sub(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 10})) b = New(WithBacking([]float64{7, 2})) correct = []float64{14, 8} res, err = Sub(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // interface-scalar ai := 6.0 b = New(WithBacking([]float64{2})) correct = 4.0 res, err = Sub(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-interface a = New(WithBacking([]float64{6})) bi := 2.0 correct = 4.0 res, err = Sub(a, bi) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestModScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{5})) b := New(WithBacking([]float64{2})) var correct interface{} = 1.0 res, err := Mod(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{5, 4})) b = New(WithBacking([]float64{2})) correct = []float64{1, 0} res, err = Mod(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{5})) b = New(WithBacking([]float64{3, 2})) correct = []float64{2, 1} res, err = Mod(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{22, 10})) b = New(WithBacking([]float64{7, 2})) correct = []float64{1, 0} res, err = Mod(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // interface-scalar ai := 5.0 b = New(WithBacking([]float64{2})) correct = 1.0 res, err = Mod(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-interface a = New(WithBacking([]float64{5})) bi := 2.0 correct = 1.0 res, err = Mod(a, bi) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestPowScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = 36.0 res, err := Pow(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{6, 4})) b = New(WithBacking([]float64{2})) correct = []float64{36, 16} res, err = Pow(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{6})) b = New(WithBacking([]float64{3, 2})) correct = []float64{216, 36} res, err = Pow(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{3, 10})) b = New(WithBacking([]float64{7, 2})) correct = []float64{2187, 100} res, err = Pow(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // interface-scalar ai := 6.0 b = New(WithBacking([]float64{2})) correct = 36.0 res, err = Pow(ai, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-interface a = New(WithBacking([]float64{6})) bi := 2.0 correct = 36.0 res, err = Pow(a, bi) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } tensor-0.9.24/api_cmp.go000066400000000000000000000226011426512615100150630ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" // public API for comparison ops // Lt performs a elementwise less than comparison (a < b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func Lt(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var lter Lter var ok bool switch at := a.(type) { case Tensor: lter, ok = at.Engine().(Lter) switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison if !ok { if lter, ok = bt.Engine().(Lter); !ok { return nil, errors.Errorf("Neither operands have engines that support Lt") } } return lter.Lt(at, bt, opts...) } else { var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if !ok { return nil, errors.Errorf("Engine does not support Lt") } return lter.LtScalar(at, bt, leftTensor, opts...) } default: if !ok { return nil, errors.Errorf("Engine does not support Lt") } return lter.LtScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if lter, ok = bt.Engine().(Lter); !ok { return nil, errors.Errorf("Engine does not support Lt") } return lter.LtScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform Lt on %T and %T", a, b) } } } // Gt performs a elementwise greater than comparison (a > b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func Gt(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var gter Gter var ok bool switch at := a.(type) { case Tensor: gter, ok = at.Engine().(Gter) switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison if !ok { if gter, ok = bt.Engine().(Gter); !ok { return nil, errors.Errorf("Neither operands have engines that support Gt") } } return gter.Gt(at, bt, opts...) } else { var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if !ok { return nil, errors.Errorf("Engine does not support Gt") } return gter.GtScalar(at, bt, leftTensor, opts...) } default: if !ok { return nil, errors.Errorf("Engine does not support Gt") } return gter.GtScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if gter, ok = bt.Engine().(Gter); !ok { return nil, errors.Errorf("Engine does not support Gt") } return gter.GtScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform Gt on %T and %T", a, b) } } } // Lte performs a elementwise less than eq comparison (a <= b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func Lte(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var lteer Lteer var ok bool switch at := a.(type) { case Tensor: lteer, ok = at.Engine().(Lteer) switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison if !ok { if lteer, ok = bt.Engine().(Lteer); !ok { return nil, errors.Errorf("Neither operands have engines that support Lte") } } return lteer.Lte(at, bt, opts...) } else { var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if !ok { return nil, errors.Errorf("Engine does not support Lte") } return lteer.LteScalar(at, bt, leftTensor, opts...) } default: if !ok { return nil, errors.Errorf("Engine does not support Lte") } return lteer.LteScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if lteer, ok = bt.Engine().(Lteer); !ok { return nil, errors.Errorf("Engine does not support Lte") } return lteer.LteScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform Lte on %T and %T", a, b) } } } // Gte performs a elementwise greater than eq comparison (a >= b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func Gte(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var gteer Gteer var ok bool switch at := a.(type) { case Tensor: gteer, ok = at.Engine().(Gteer) switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison if !ok { if gteer, ok = bt.Engine().(Gteer); !ok { return nil, errors.Errorf("Neither operands have engines that support Gte") } } return gteer.Gte(at, bt, opts...) } else { var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if !ok { return nil, errors.Errorf("Engine does not support Gte") } return gteer.GteScalar(at, bt, leftTensor, opts...) } default: if !ok { return nil, errors.Errorf("Engine does not support Gte") } return gteer.GteScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if gteer, ok = bt.Engine().(Gteer); !ok { return nil, errors.Errorf("Engine does not support Gte") } return gteer.GteScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform Gte on %T and %T", a, b) } } } // ElEq performs a elementwise equality comparison (a == b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func ElEq(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var eleqer ElEqer var ok bool switch at := a.(type) { case Tensor: eleqer, ok = at.Engine().(ElEqer) switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison if !ok { if eleqer, ok = bt.Engine().(ElEqer); !ok { return nil, errors.Errorf("Neither operands have engines that support ElEq") } } return eleqer.ElEq(at, bt, opts...) } else { var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if !ok { return nil, errors.Errorf("Engine does not support ElEq") } return eleqer.EqScalar(at, bt, leftTensor, opts...) } default: if !ok { return nil, errors.Errorf("Engine does not support ElEq") } return eleqer.EqScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if eleqer, ok = bt.Engine().(ElEqer); !ok { return nil, errors.Errorf("Engine does not support ElEq") } return eleqer.EqScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform ElEq on %T and %T", a, b) } } } // ElNe performs a elementwise equality comparison (a != b). a and b can either be float64 or *Dense. // It returns the same Tensor type as its input. // // If both operands are *Dense, shape is checked first. // Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out. func ElNe(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var eleqer ElEqer var ok bool switch at := a.(type) { case Tensor: eleqer, ok = at.Engine().(ElEqer) switch bt := b.(type) { case Tensor: if !ok { if eleqer, ok = bt.Engine().(ElEqer); !ok { return nil, errors.Errorf("Neither operands have engines that support ElEq") } } return eleqer.ElNe(at, bt, opts...) default: if !ok { return nil, errors.Errorf("Engine does not support ElEq") } return eleqer.NeScalar(at, bt, true, opts...) } default: switch bt := b.(type) { case Tensor: if eleqer, ok = bt.Engine().(ElEqer); !ok { return nil, errors.Errorf("Engine does not support ElEq") } return eleqer.NeScalar(bt, at, false, opts...) default: return nil, errors.Errorf("Unable to perform ElEq on %T and %T", a, b) } } } tensor-0.9.24/api_cmp_generated_test.go000066400000000000000000001041511426512615100201410ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "reflect" "testing" "testing/quick" ) func TestGt(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Gt(a, b) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gt(b, c) if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Gt(a, c) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestGte(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Gte(a, b) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gte(b, c) if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Gte(a, c) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestLt(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Lt(a, b) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lt(b, c) if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Lt(a, c) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestLte(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Lte(a, b) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lte(b, c) if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Lte(a, c) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestEq(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := ElEq(a, b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := ElEq(b, c) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := ElEq(a, c) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := ElEq(a, b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElEq(b, a) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } } func TestNe(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := ElNe(a, b) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElNe(b, a) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElNe failed: %v", err) } } func TestGt_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Gt(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gt(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Gt(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestGte_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Gte(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gte(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Gte(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestLt_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Lt(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lt(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Lt(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestLte_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := Lte(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lte(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := Lte(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestEq_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := ElEq(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := ElEq(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := ElEq(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := ElEq(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElEq(b, a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } } func TestNe_assame(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := ElNe(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElNe(b, a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElNe failed: %v", err) } } func TestGtScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Gt(a, b) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gt(b, c) if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Gt(a, c) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestGteScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Gte(a, b) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gte(b, c) if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Gte(a, c) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestLtScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Lt(a, b) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lt(b, c) if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Lt(a, c) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestLteScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Lte(a, b) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lte(b, c) if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Lte(a, c) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestEqScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := ElEq(a, b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := ElEq(b, c) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := ElEq(a, c) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := ElEq(a, b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElEq(b, a) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElEq failed: %v", err) } } func TestNeScalar(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := ElNe(a, b) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElNe(b, a) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElNe failed: %v", err) } } func TestGtScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Gt(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gt(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Gt(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestGteScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Gte(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Gte(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Gte(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestLtScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Lt(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lt(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Lt(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestLteScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := Lte(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := Lte(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := Lte(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestEqScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := ElEq(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := ElEq(b, c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := ElEq(a, c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := ElEq(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElEq(b, a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElEq failed: %v", err) } } func TestNeScalar_assame(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := ElNe(a, b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := ElNe(b, a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElNe failed: %v", err) } } tensor-0.9.24/api_cmp_test.go000066400000000000000000000120671426512615100161270ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) // This file contains the tests for API functions that aren't generated by genlib func TestLtScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = false res, err := Lt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{1, 4})) b = New(WithBacking([]float64{2})) correct = []bool{true, false} res, err = Lt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{3})) b = New(WithBacking([]float64{6, 2})) correct = []bool{true, false} res, err = Lt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 2})) b = New(WithBacking([]float64{7, 10})) correct = []bool{false, true} res, err = Lt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestGtScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = true res, err := Gt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{1, 4})) b = New(WithBacking([]float64{2})) correct = []bool{false, true} res, err = Gt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{3})) b = New(WithBacking([]float64{6, 2})) correct = []bool{false, true} res, err = Gt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 2})) b = New(WithBacking([]float64{7, 10})) correct = []bool{true, false} res, err = Gt(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestLteScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = false res, err := Lte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{1, 2, 4})) b = New(WithBacking([]float64{2})) correct = []bool{true, true, false} res, err = Lte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{3})) b = New(WithBacking([]float64{6, 2})) correct = []bool{true, false} res, err = Lte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 2})) b = New(WithBacking([]float64{7, 10})) correct = []bool{false, true} res, err = Lte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestGteScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = true res, err := Gte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{1, 2, 4})) b = New(WithBacking([]float64{2})) correct = []bool{false, true, true} res, err = Gte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{3})) b = New(WithBacking([]float64{6, 3, 2})) correct = []bool{false, true, true} res, err = Gte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 31, 2})) b = New(WithBacking([]float64{7, 31, 10})) correct = []bool{true, true, false} res, err = Gte(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } func TestElEqScalarScalar(t *testing.T) { // scalar-scalar a := New(WithBacking([]float64{6})) b := New(WithBacking([]float64{2})) var correct interface{} = false res, err := ElEq(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // scalar-tensor a = New(WithBacking([]float64{1, 2, 4})) b = New(WithBacking([]float64{2})) correct = []bool{false, true, false} res, err = ElEq(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor-scalar a = New(WithBacking([]float64{3})) b = New(WithBacking([]float64{6, 3, 2})) correct = []bool{false, true, false} res, err = ElEq(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) // tensor - tensor a = New(WithBacking([]float64{21, 10})) b = New(WithBacking([]float64{7, 10})) correct = []bool{false, true} res, err = ElEq(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) } tensor-0.9.24/api_matop.go000066400000000000000000000146711426512615100154340ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) // this file handles matops. While by default most of these matops should already have been defined as part of the // Tensor interface, not all are possible(for example, concatenating a sparse tensor), hence the need for the following functions // Narrow narrows the tensor. func Narrow(t Tensor, dim, start, length int) (View, error) { dim = resolveAxis(dim, t.Dims()) slices := make([]Slice, MinInt(dim+1, t.Dims())) slices[dim] = S(start, start+length, 1) return t.Slice(slices...) } // Repeat repeats a Tensor along the axis and given the number of repeats. func Repeat(t Tensor, axis int, repeats ...int) (retVal Tensor, err error) { if r, ok := t.Engine().(Repeater); ok { return r.Repeat(t, axis, repeats...) } return nil, errors.New("Engine does not support Repeat") } // RepeatReuse repeats a Tensor along the axis and the given number of repeats, and puts the results in the provided reuse tensor. If the reuse tensor is not correctly sized, then an error will be given, but the results will still be valid. func RepeatReuse(t, reuse Tensor, axis int, repeats ...int) (retval Tensor, err error) { if r, ok := t.Engine().(Repeater); ok { return r.RepeatReuse(t, reuse, axis, repeats...) } return nil, errors.New("Engine does not support Repeat") } // T safely transposes a Tensor. It returns a tensor that is not a view of the input tensor - rather, the data is all copied. func T(t Tensor, axes ...int) (retVal Tensor, err error) { switch tt := t.(type) { case *Dense: return tt.SafeT(axes...) } panic("Unreachable") } // Transpose performs transposition of a tensor according to its axes. func Transpose(t Tensor, axes ...int) (retVal Tensor, err error) { switch tt := t.(type) { case *Dense: var ret *Dense if ret, err = tt.SafeT(axes...); err != nil { return } ret.Transpose() retVal = ret return } panic("Unreachable") } // Concat concatenates a list of Tensors. At the moment the operation only supports Tensors of the same type // (*Dense can only be concatenated with a bunch of *Dense, CSCs can only be concatenated with a bunch of CSC, etc) func Concat(axis int, t Tensor, others ...Tensor) (retVal Tensor, err error) { if len(others) == 0 { return t, nil } switch T := t.(type) { case *Dense: ts := make([]*Dense, len(others)) for i, o := range others { if ot, ok := o.(*Dense); ok { ts[i] = ot continue } return nil, errors.Errorf("Expected all Tensors to be *Dense") } return T.Concat(axis, ts...) } panic("Unreachable") } // Copy copies a tensor to another. For *Dense views, only the relevant slots are copied. func Copy(dst, src Tensor) error { switch st := src.(type) { case DenseTensor: dt, ok := dst.(DenseTensor) if !ok { return errors.Errorf("Cannot copy from DenseTensor to %T", dst) } if st.RequiresIterator() || dt.RequiresIterator() { siter := st.Iterator() diter := dt.Iterator() _, err := copyDenseIter(dt, st, diter, siter) return err } copyDense(dt, st) return nil default: return errors.Errorf("NYI for Copy %T", src) } panic("Unreachable") } // Stack stacks a list of other Tensors. At the moment the operation only supports Tensors of the same type. // (*Dense can only be stacked with *Dense... etc) func Stack(axis int, t Tensor, others ...Tensor) (retVal Tensor, err error) { if len(others) == 0 { return t, nil } switch T := t.(type) { case DenseTensor: var dts []DenseTensor if dts, err = tensorsToDenseTensors(others); err != nil { return nil, errors.Wrap(err, "Cannot convert others into a slice of DenseTensors") } return T.stackDense(axis, dts...) } panic("Unreachable") } // Materialize takes a View and copies out the data into a new allocation. func Materialize(t Tensor) Tensor { switch tt := t.(type) { case View: return tt.Materialize() default: return t } } func Diag(t Tensor) (retVal Tensor, err error) { if d, ok := t.Engine().(Diager); ok { return d.Diag(t) } return nil, errors.Errorf("Unable to perform diagonalization of tensor ") } // ByIndices allows for selection of value of `a` byt the indices listed in the `indices` tensor. // The `indices` tensor has to be a vector-like tensor of ints. func ByIndices(a, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if axis >= a.Shape().Dims() { return nil, errors.Errorf("Cannot select by indices on axis %d. Input only has %d dims", axis, a.Shape().Dims()) } if sbi, ok := a.Engine().(ByIndiceser); ok { return sbi.SelectByIndices(a, indices, axis, opts...) } return nil, errors.Errorf("Unable to select by indices. Engine %T does not support that.", a.Engine()) } // ByIndicesB is the backpropagation of ByIndices. func ByIndicesB(a, b, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if axis >= a.Shape().Dims() { return nil, errors.Errorf("Cannot select by indices on axis %d. Input only has %d dims", axis, a.Shape().Dims()) } if sbi, ok := a.Engine().(ByIndiceser); ok { return sbi.SelectByIndicesB(a, b, indices, axis, opts...) } return nil, errors.Errorf("Unable to select by indices. Engine %T does not support that.", a.Engine()) } // LogSoftMax applies log softmax to the given tensor. func LogSoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if sm, ok := x.Engine().(SoftMaxer); ok { return sm.LogSoftMax(x, axis, opts...) } return nil, errors.Errorf("Unable to apply LogSoftMax. Engine %T does not support that.", x.Engine()) } // SoftMax applies softmax to the given tensor. func SoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if sm, ok := x.Engine().(SoftMaxer); ok { return sm.SoftMax(x, axis, opts...) } return nil, errors.Errorf("Unable to apply SoftMax. Engine %T does not support that.", x.Engine()) } // SoftMaxB applies softmax backwards operation func SoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if sm, ok := output.Engine().(SoftMaxer); ok { return sm.SoftMaxB(output, grad, axis, opts...) } return nil, errors.Errorf("Unable to apply SoftMaxB. Engine %T does not support that.", output.Engine()) } // LogSoftMaxB applies softmax backwards operation func LogSoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if sm, ok := output.Engine().(SoftMaxer); ok { return sm.LogSoftMaxB(output, grad, axis, opts...) } return nil, errors.Errorf("Unable to apply SoftMaxB. Engine %T does not support that.", output.Engine()) } tensor-0.9.24/api_minmax.go000066400000000000000000000114711426512615100156000ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" func MinBetween(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var minbetweener MinBetweener var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition if oe != nil { return oe.MinBetween(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.MinBetween(at, bt, opts...) } if minbetweener, ok = at.Engine().(MinBetweener); ok { return minbetweener.MinBetween(at, bt, opts...) } if minbetweener, ok = bt.Engine().(MinBetweener); ok { return minbetweener.MinBetween(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support MinBetween") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.MinBetweenScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.MinBetweenScalar(at, bt, leftTensor, opts...) } if minbetweener, ok = at.Engine().(MinBetweener); ok { return minbetweener.MinBetweenScalar(at, bt, leftTensor, opts...) } if minbetweener, ok = bt.Engine().(MinBetweener); ok { return minbetweener.MinBetweenScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support MinBetween") } default: if oe != nil { return oe.MinBetweenScalar(at, bt, true, opts...) } if minbetweener, ok = at.Engine().(MinBetweener); ok { return minbetweener.MinBetweenScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support MinBetween") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.MinBetweenScalar(bt, at, false, opts...) } if minbetweener, ok = bt.Engine().(MinBetweener); ok { return minbetweener.MinBetweenScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support MinBetween") default: return nil, errors.Errorf("Cannot perform MinBetween of %T and %T", a, b) } } panic("Unreachable") } func MaxBetween(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var maxbetweener MaxBetweener var oe standardEngine var ok bool switch at := a.(type) { case Tensor: oe = at.standardEngine() switch bt := b.(type) { case Tensor: if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition if oe != nil { return oe.MaxBetween(at, bt, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.MaxBetween(at, bt, opts...) } if maxbetweener, ok = at.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetween(at, bt, opts...) } if maxbetweener, ok = bt.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetween(at, bt, opts...) } return nil, errors.New("Neither engines of either operand support MaxBetween") } else { // at least one of the operands is a scalar var leftTensor bool if !bt.Shape().IsScalar() { leftTensor = false // a Scalar-Tensor * b Tensor tmp := at at = bt bt = tmp } else { leftTensor = true // a Tensor * b Scalar-Tensor } if oe != nil { return oe.MaxBetweenScalar(at, bt, leftTensor, opts...) } if oe = bt.standardEngine(); oe != nil { return oe.MaxBetweenScalar(at, bt, leftTensor, opts...) } if maxbetweener, ok = at.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetweenScalar(at, bt, leftTensor, opts...) } if maxbetweener, ok = bt.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetweenScalar(at, bt, leftTensor, opts...) } return nil, errors.New("Neither engines of either operand support MaxBetween") } default: if oe != nil { return oe.MaxBetweenScalar(at, bt, true, opts...) } if maxbetweener, ok = at.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetweenScalar(at, bt, true, opts...) } return nil, errors.New("Operand A's engine does not support MaxBetween") } default: switch bt := b.(type) { case Tensor: if oe = bt.standardEngine(); oe != nil { return oe.MaxBetweenScalar(bt, at, false, opts...) } if maxbetweener, ok = bt.Engine().(MaxBetweener); ok { return maxbetweener.MaxBetweenScalar(bt, at, false, opts...) } return nil, errors.New("Operand B's engine does not support MaxBetween") default: return nil, errors.Errorf("Cannot perform MaxBetween of %T and %T", a, b) } } panic("Unreachable") } tensor-0.9.24/api_reduction.go000066400000000000000000000015101426512615100162740ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" // Sum sums a Tensor along the given axes func Sum(t Tensor, along ...int) (retVal Tensor, err error) { if sumer, ok := t.Engine().(Sumer); ok { return sumer.Sum(t, along...) } return nil, errors.New("Engine does not support Sum()") } // Argmax finds the index of the max value along the axis provided func Argmax(t Tensor, axis int) (retVal Tensor, err error) { if argmaxer, ok := t.Engine().(Argmaxer); ok { return argmaxer.Argmax(t, axis) } return nil, errors.New("Engine does not support Argmax()") } // Argmin finds the index of the min value along the axis provided func Argmin(t Tensor, axis int) (retVal Tensor, err error) { if argminer, ok := t.Engine().(Argminer); ok { return argminer.Argmin(t, axis) } return nil, errors.New("Engine does not support Argmax()") } tensor-0.9.24/api_unary.go000066400000000000000000000065231426512615100154470ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import "github.com/pkg/errors" func Neg(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if neger, ok := e.(Neger); ok { return neger.Neg(a, opts...) } err = errors.Errorf("Engine does not perform Neg") return } func Inv(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if inver, ok := e.(Inver); ok { return inver.Inv(a, opts...) } err = errors.Errorf("Engine does not perform Inv") return } func Square(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if squarer, ok := e.(Squarer); ok { return squarer.Square(a, opts...) } err = errors.Errorf("Engine does not perform Square") return } func Cube(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if cuber, ok := e.(Cuber); ok { return cuber.Cube(a, opts...) } err = errors.Errorf("Engine does not perform Cube") return } func Exp(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if exper, ok := e.(Exper); ok { return exper.Exp(a, opts...) } err = errors.Errorf("Engine does not perform Exp") return } func Tanh(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if tanher, ok := e.(Tanher); ok { return tanher.Tanh(a, opts...) } err = errors.Errorf("Engine does not perform Tanh") return } func Log(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if loger, ok := e.(Loger); ok { return loger.Log(a, opts...) } err = errors.Errorf("Engine does not perform Log") return } func Log2(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if log2er, ok := e.(Log2er); ok { return log2er.Log2(a, opts...) } err = errors.Errorf("Engine does not perform Log2") return } func Log10(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if log10er, ok := e.(Log10er); ok { return log10er.Log10(a, opts...) } err = errors.Errorf("Engine does not perform Log10") return } func Sqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if sqrter, ok := e.(Sqrter); ok { return sqrter.Sqrt(a, opts...) } err = errors.Errorf("Engine does not perform Sqrt") return } func Cbrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if cbrter, ok := e.(Cbrter); ok { return cbrter.Cbrt(a, opts...) } err = errors.Errorf("Engine does not perform Cbrt") return } func InvSqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if invsqrter, ok := e.(InvSqrter); ok { return invsqrter.InvSqrt(a, opts...) } err = errors.Errorf("Engine does not perform InvSqrt") return } func Abs(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if abser, ok := e.(Abser); ok { return abser.Abs(a, opts...) } err = errors.Errorf("Engine does not perform Abs") return } func Sign(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if signer, ok := e.(Signer); ok { return signer.Sign(a, opts...) } err = errors.Errorf("Engine does not perform Sign") return } func Clamp(a Tensor, min interface{}, max interface{}, opts ...FuncOpt) (retVal Tensor, err error) { e := a.Engine() if clamper, ok := e.(Clamper); ok { return clamper.Clamp(a, min, max, opts...) } err = errors.Errorf("Engine does not perform Clamp") return } tensor-0.9.24/api_unary_generated_test.go000066400000000000000000000564131426512615100205270ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" "testing/quick" ) func TestNeg(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Neger) we = we || !ok ret, err := Neg(a) if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly { if err != nil { return false } return true } Neg(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Neg failed: %v", err) } } func TestSquare(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Squarer) we = we || !ok ret, err := Square(a) if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil { return true // uninvertible due to type class implementation issues } Sqrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Square failed: %v", err) } } func TestCube(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Cuber) we = we || !ok ret, err := Cube(a) if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true // uninvertible due to type class implementation issues } Cbrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cube failed: %v", err) } } func TestExp(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Exper) we = we || !ok ret, err := Exp(a) if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly { if err != nil { return false } return true } Log(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Exp failed: %v", err) } } func TestLog(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Loger) we = we || !ok ret, err := Log(a) if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly { if err != nil { return false } return true } Exp(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Log failed: %v", err) } } func TestSqrt(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Sqrter) we = we || !ok ret, err := Sqrt(a) if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Square(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Sqrt failed: %v", err) } } func TestCbrt(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Cbrter) we = we || !ok ret, err := Cbrt(a) if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Cube(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cbrt failed: %v", err) } } func TestNeg_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Neger) we = we || !ok ret, err := Neg(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly { if err != nil { return false } return true } Neg(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Neg failed: %v", err) } } func TestSquare_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Squarer) we = we || !ok ret, err := Square(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil { return true // uninvertible due to type class implementation issues } Sqrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Square failed: %v", err) } } func TestCube_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Cuber) we = we || !ok ret, err := Cube(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true // uninvertible due to type class implementation issues } Cbrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cube failed: %v", err) } } func TestExp_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Exper) we = we || !ok ret, err := Exp(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly { if err != nil { return false } return true } Log(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Exp failed: %v", err) } } func TestLog_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Loger) we = we || !ok ret, err := Log(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly { if err != nil { return false } return true } Exp(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Log failed: %v", err) } } func TestSqrt_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Sqrter) we = we || !ok ret, err := Sqrt(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Square(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Sqrt failed: %v", err) } } func TestCbrt_unsafe(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Cbrter) we = we || !ok ret, err := Cbrt(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Cube(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cbrt failed: %v", err) } } func TestNeg_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Neger) we = we || !ok ret, err := Neg(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly { if err != nil { return false } return true } Neg(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Neg failed: %v", err) } } func TestSquare_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Squarer) we = we || !ok ret, err := Square(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil { return true // uninvertible due to type class implementation issues } Sqrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Square failed: %v", err) } } func TestCube_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Cuber) we = we || !ok ret, err := Cube(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true // uninvertible due to type class implementation issues } Cbrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cube failed: %v", err) } } func TestExp_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Exper) we = we || !ok ret, err := Exp(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly { if err != nil { return false } return true } Log(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Exp failed: %v", err) } } func TestLog_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Loger) we = we || !ok ret, err := Log(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly { if err != nil { return false } return true } Exp(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Log failed: %v", err) } } func TestSqrt_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Sqrter) we = we || !ok ret, err := Sqrt(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Square(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Sqrt failed: %v", err) } } func TestCbrt_reuse(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Cbrter) we = we || !ok ret, err := Cbrt(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly { if err != nil { return false } return true } Cube(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cbrt failed: %v", err) } } func TestNeg_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Neger) we = we || !ok ret, err := Neg(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Neg(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Neg failed: %v", err) } } func TestSquare_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Squarer) we = we || !ok ret, err := Square(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil { return true // uninvertible due to type class implementation issues } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Sqrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Square failed: %v", err) } } func TestCube_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Cuber) we = we || !ok ret, err := Cube(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly { if err != nil { return false } return true } if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true // uninvertible due to type class implementation issues } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Cbrt(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cube failed: %v", err) } } func TestExp_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Exper) we = we || !ok ret, err := Exp(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Log(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Exp failed: %v", err) } } func TestLog_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Loger) we = we || !ok ret, err := Log(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Exp(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Log failed: %v", err) } } func TestSqrt_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, nil) _, ok := q.Engine().(Sqrter) we = we || !ok ret, err := Sqrt(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Square(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Sqrt failed: %v", err) } } func TestCbrt_incr(t *testing.T) { invFn := func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Cbrter) we = we || !ok ret, err := Cbrt(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Cube(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv tests for Cbrt failed: %v", err) } } tensor-0.9.24/api_unary_test.go000066400000000000000000000766631426512615100165220ustar00rootroot00000000000000package tensor import ( "math/rand" "testing" "testing/quick" "time" "math" "github.com/stretchr/testify/assert" "github.com/chewxy/math32" ) /* GENERATED FILE BY Genlib V1. DO NOT EDIT */ var clampTests = []struct { a, reuse interface{} min, max interface{} correct interface{} correctSliced interface{} }{ {[]int{1, 2, 3, 4}, []int{10, 20, 30, 40}, int(2), int(3), []int{2, 2, 3, 3}, []int{2, 2, 3}}, {[]int8{1, 2, 3, 4}, []int8{10, 20, 30, 40}, int8(2), int8(3), []int8{2, 2, 3, 3}, []int8{2, 2, 3}}, {[]int16{1, 2, 3, 4}, []int16{10, 20, 30, 40}, int16(2), int16(3), []int16{2, 2, 3, 3}, []int16{2, 2, 3}}, {[]int32{1, 2, 3, 4}, []int32{10, 20, 30, 40}, int32(2), int32(3), []int32{2, 2, 3, 3}, []int32{2, 2, 3}}, {[]int64{1, 2, 3, 4}, []int64{10, 20, 30, 40}, int64(2), int64(3), []int64{2, 2, 3, 3}, []int64{2, 2, 3}}, {[]uint{1, 2, 3, 4}, []uint{10, 20, 30, 40}, uint(2), uint(3), []uint{2, 2, 3, 3}, []uint{2, 2, 3}}, {[]uint8{1, 2, 3, 4}, []uint8{10, 20, 30, 40}, uint8(2), uint8(3), []uint8{2, 2, 3, 3}, []uint8{2, 2, 3}}, {[]uint16{1, 2, 3, 4}, []uint16{10, 20, 30, 40}, uint16(2), uint16(3), []uint16{2, 2, 3, 3}, []uint16{2, 2, 3}}, {[]uint32{1, 2, 3, 4}, []uint32{10, 20, 30, 40}, uint32(2), uint32(3), []uint32{2, 2, 3, 3}, []uint32{2, 2, 3}}, {[]uint64{1, 2, 3, 4}, []uint64{10, 20, 30, 40}, uint64(2), uint64(3), []uint64{2, 2, 3, 3}, []uint64{2, 2, 3}}, {[]float32{1, 2, 3, 4}, []float32{10, 20, 30, 40}, float32(2), float32(3), []float32{2, 2, 3, 3}, []float32{2, 2, 3}}, {[]float64{1, 2, 3, 4}, []float64{10, 20, 30, 40}, float64(2), float64(3), []float64{2, 2, 3, 3}, []float64{2, 2, 3}}, } var clampTestsMasked = []struct { a, reuse interface{} min, max interface{} correct interface{} correctSliced interface{} }{ {[]int{1, 2, 3, 4}, []int{1, 20, 30, 40}, int(2), int(3), []int{1, 2, 3, 3}, []int{1, 2, 3}}, {[]int8{1, 2, 3, 4}, []int8{1, 20, 30, 40}, int8(2), int8(3), []int8{1, 2, 3, 3}, []int8{1, 2, 3}}, {[]int16{1, 2, 3, 4}, []int16{1, 20, 30, 40}, int16(2), int16(3), []int16{1, 2, 3, 3}, []int16{1, 2, 3}}, {[]int32{1, 2, 3, 4}, []int32{1, 20, 30, 40}, int32(2), int32(3), []int32{1, 2, 3, 3}, []int32{1, 2, 3}}, {[]int64{1, 2, 3, 4}, []int64{1, 20, 30, 40}, int64(2), int64(3), []int64{1, 2, 3, 3}, []int64{1, 2, 3}}, {[]uint{1, 2, 3, 4}, []uint{1, 20, 30, 40}, uint(2), uint(3), []uint{1, 2, 3, 3}, []uint{1, 2, 3}}, {[]uint8{1, 2, 3, 4}, []uint8{1, 20, 30, 40}, uint8(2), uint8(3), []uint8{1, 2, 3, 3}, []uint8{1, 2, 3}}, {[]uint16{1, 2, 3, 4}, []uint16{1, 20, 30, 40}, uint16(2), uint16(3), []uint16{1, 2, 3, 3}, []uint16{1, 2, 3}}, {[]uint32{1, 2, 3, 4}, []uint32{1, 20, 30, 40}, uint32(2), uint32(3), []uint32{1, 2, 3, 3}, []uint32{1, 2, 3}}, {[]uint64{1, 2, 3, 4}, []uint64{1, 20, 30, 40}, uint64(2), uint64(3), []uint64{1, 2, 3, 3}, []uint64{1, 2, 3}}, {[]float32{1, 2, 3, 4}, []float32{1, 20, 30, 40}, float32(2), float32(3), []float32{1, 2, 3, 3}, []float32{1, 2, 3}}, {[]float64{1, 2, 3, 4}, []float64{1, 20, 30, 40}, float64(2), float64(3), []float64{1, 2, 3, 3}, []float64{1, 2, 3}}, } func TestClamp(t *testing.T) { assert := assert.New(t) var got, sliced Tensor var T, reuse *Dense var err error for _, ct := range clampTests { T = New(WithBacking(ct.a)) // safe if got, err = Clamp(T, ct.min, ct.max); err != nil { t.Error(err) continue } if got == T { t.Error("expected got != T") continue } assert.Equal(ct.correct, got.Data()) // sliced safe if sliced, err = T.Slice(makeRS(0, 3)); err != nil { t.Error("Unable to slice T") continue } if got, err = Clamp(sliced, ct.min, ct.max); err != nil { t.Error(err) continue } // reuse reuse = New(WithBacking(ct.reuse)) if got, err = Clamp(T, ct.min, ct.max, WithReuse(reuse)); err != nil { t.Error(err) continue } if got != reuse { t.Error("expected got == reuse") continue } assert.Equal(ct.correct, got.Data()) // unsafe if got, err = Clamp(T, ct.min, ct.max, UseUnsafe()); err != nil { t.Error(err) continue } if got != T { t.Error("expected got == T") continue } assert.Equal(ct.correct, got.Data()) } } func TestClampMasked(t *testing.T) { assert := assert.New(t) var got, sliced Tensor var T, reuse *Dense var err error for _, ct := range clampTestsMasked { T = New(WithBacking(ct.a, []bool{true, false, false, false})) // safe if got, err = Clamp(T, ct.min, ct.max); err != nil { t.Error(err) continue } if got == T { t.Error("expected got != T") continue } assert.Equal(ct.correct, got.Data()) // sliced safe if sliced, err = T.Slice(makeRS(0, 3)); err != nil { t.Error("Unable to slice T") continue } if got, err = Clamp(sliced, ct.min, ct.max); err != nil { t.Error(err) continue } // reuse reuse = New(WithBacking(ct.reuse, []bool{true, false, false, false})) if got, err = Clamp(T, ct.min, ct.max, WithReuse(reuse)); err != nil { t.Error(err) continue } if got != reuse { t.Error("expected got == reuse") continue } assert.Equal(ct.correct, got.Data()) // unsafe if got, err = Clamp(T, ct.min, ct.max, UseUnsafe()); err != nil { t.Error(err) continue } if got != T { t.Error("expected got == T") continue } assert.Equal(ct.correct, got.Data()) } } var signTests = []struct { a, reuse interface{} correct interface{} correctSliced interface{} }{ {[]int{0, 1, 2, -2, -1}, []int{100, 10, 20, 30, 40}, []int{0, 1, 1, -1, -1}, []int{0, 1, 1, -1}}, {[]int8{0, 1, 2, -2, -1}, []int8{100, 10, 20, 30, 40}, []int8{0, 1, 1, -1, -1}, []int8{0, 1, 1, -1}}, {[]int16{0, 1, 2, -2, -1}, []int16{100, 10, 20, 30, 40}, []int16{0, 1, 1, -1, -1}, []int16{0, 1, 1, -1}}, {[]int32{0, 1, 2, -2, -1}, []int32{100, 10, 20, 30, 40}, []int32{0, 1, 1, -1, -1}, []int32{0, 1, 1, -1}}, {[]int64{0, 1, 2, -2, -1}, []int64{100, 10, 20, 30, 40}, []int64{0, 1, 1, -1, -1}, []int64{0, 1, 1, -1}}, {[]float32{0, 1, 2, -2, -1}, []float32{100, 10, 20, 30, 40}, []float32{0, 1, 1, -1, -1}, []float32{0, 1, 1, -1}}, {[]float64{0, 1, 2, -2, -1}, []float64{100, 10, 20, 30, 40}, []float64{0, 1, 1, -1, -1}, []float64{0, 1, 1, -1}}, } var signTestsMasked = []struct { a, reuse interface{} correct interface{} // correctSliced interface{} }{ {[]int{1, 2, -2, -1}, []int{10, 20, 30, 40}, []int{1, 1, -2, -1}}, {[]int8{1, 2, -2, -1}, []int8{10, 20, 30, 40}, []int8{1, 1, -2, -1}}, {[]int16{1, 2, -2, -1}, []int16{10, 20, 30, 40}, []int16{1, 1, -2, -1}}, {[]int32{1, 2, -2, -1}, []int32{10, 20, 30, 40}, []int32{1, 1, -2, -1}}, {[]int64{1, 2, -2, -1}, []int64{10, 20, 30, 40}, []int64{1, 1, -2, -1}}, {[]float32{1, 2, -2, -1}, []float32{10, 20, 30, 40}, []float32{1, 1, -2, -1}}, {[]float64{1, 2, -2, -1}, []float64{10, 20, 30, 40}, []float64{1, 1, -2, -1}}, } func TestSign(t *testing.T) { assert := assert.New(t) var got, sliced Tensor var T, reuse *Dense var err error for _, st := range signTests { T = New(WithBacking(st.a)) // safe if got, err = Sign(T); err != nil { t.Error(err) continue } if got == T { t.Error("expected got != T") continue } assert.Equal(st.correct, got.Data()) // sliced safe if sliced, err = T.Slice(makeRS(0, 4)); err != nil { t.Error("Unable to slice T") continue } if got, err = Sign(sliced); err != nil { t.Error(err) continue } assert.Equal(st.correctSliced, got.Data()) // reuse reuse = New(WithBacking(st.reuse)) if got, err = Sign(T, WithReuse(reuse)); err != nil { t.Error(err) continue } if got != reuse { t.Error("expected got == reuse") continue } assert.Equal(st.correct, got.Data()) // unsafe if got, err = Sign(T, UseUnsafe()); err != nil { t.Error(err) continue } if got != T { t.Error("expected got == T") continue } assert.Equal(st.correct, got.Data()) } } func TestSignMasked(t *testing.T) { assert := assert.New(t) var got Tensor var T, reuse *Dense var err error for _, st := range signTestsMasked { T = New(WithBacking(st.a, []bool{false, false, true, false})) // safe if got, err = Sign(T); err != nil { t.Error(err) continue } if got == T { t.Error("expected got != T") continue } assert.Equal(st.correct, got.Data()) // reuse reuse = New(WithBacking(st.reuse, []bool{false, false, true, false})) if got, err = Sign(T, WithReuse(reuse)); err != nil { t.Error(err) continue } if got != reuse { t.Error("expected got == reuse") continue } assert.Equal(st.correct, got.Data()) // unsafe if got, err = Sign(T, UseUnsafe()); err != nil { t.Error(err) continue } if got != T { t.Error("expected got == T") continue } assert.Equal(st.correct, got.Data()) } } var negTestsMasked = []struct { a, reuse interface{} correct interface{} }{ {[]int{1, 2, -2, -1}, []int{10, 20, 30, 40}, []int{-1, -2, -2, 1}}, {[]int8{1, 2, -2, -1}, []int8{10, 20, 30, 40}, []int8{-1, -2, -2, 1}}, {[]int16{1, 2, -2, -1}, []int16{10, 20, 30, 40}, []int16{-1, -2, -2, 1}}, {[]int32{1, 2, -2, -1}, []int32{10, 20, 30, 40}, []int32{-1, -2, -2, 1}}, {[]int64{1, 2, -2, -1}, []int64{10, 20, 30, 40}, []int64{-1, -2, -2, 1}}, {[]float32{1, 2, -2, -1}, []float32{10, 20, 30, 40}, []float32{-1, -2, -2, 1}}, {[]float64{1, 2, -2, -1}, []float64{10, 20, 30, 40}, []float64{-1, -2, -2, 1}}, } func TestNegMasked(t *testing.T) { assert := assert.New(t) var got Tensor var T, reuse *Dense var err error for _, st := range negTestsMasked { T = New(WithBacking(st.a, []bool{false, false, true, false})) // safe if got, err = Neg(T); err != nil { t.Error(err) continue } if got == T { t.Error("expected got != T") continue } assert.Equal(st.correct, got.Data()) // reuse reuse = New(WithBacking(st.reuse, []bool{false, false, true, false})) if got, err = Neg(T, WithReuse(reuse)); err != nil { t.Error(err) continue } if got != reuse { t.Error("expected got == reuse") continue } assert.Equal(st.correct, got.Data()) // unsafe if got, err = Neg(T, UseUnsafe()); err != nil { t.Error(err) continue } if got != T { t.Error("expected got == T") continue } assert.Equal(st.correct, got.Data()) } } func TestInvSqrt(t *testing.T) { var r *rand.Rand invFn := func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(InvSqrter) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := InvSqrt(a) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Sqrt(b, UseUnsafe()) Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for InvSqrt failed: %v", err) } // unsafe invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(InvSqrter) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := InvSqrt(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Sqrt(b, UseUnsafe()) Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for InvSqrt failed: %v", err) } // reuse invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) reuse := q.Clone().(*Dense) reuse.Zero() correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(InvSqrter) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := InvSqrt(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Sqrt(b, UseUnsafe()) Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Errorf("Expected ret to be the same as reuse") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests with reuse for InvSqrt failed: %v", err) } // incr invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(InvSqrter) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := InvSqrt(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Sqrt(b, UseUnsafe()) Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != incr { t.Errorf("Expected ret to be the same as incr") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests with incr for InvSqrt failed: %v", err) } } func TestInv(t *testing.T) { var r *rand.Rand invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Inver) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Inv(a) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Mul(ret, a, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for Inv failed: %v", err) } // unsafe invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Inver) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Inv(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Mul(ret, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Inv failed: %v", err) } // reuse invFn = func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) reuse := a.Clone().(*Dense) reuse.Zero() we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Inver) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Inv(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } Mul(ret, a, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Errorf("Expected ret to be the same as reuse") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Inv failed: %v", err) } // incr invFn = func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Inver) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Inv(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } Mul(ret, a, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != incr { t.Errorf("Expected ret to be the same as incr") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Inv failed: %v", err) } } func TestLog10(t *testing.T) { var r *rand.Rand // default invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log10er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log10(a) if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly { if err != nil { return false } return true } ten := identityVal(10, a.Dtype()) Pow(ten, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for Log10 failed: %v", err) } // unsafe invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log10er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log10(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly { if err != nil { return false } return true } ten := identityVal(10, a.Dtype()) Pow(ten, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log10 failed: %v", err) } // reuse invFn = func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) reuse := a.Clone().(*Dense) reuse.Zero() we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log10er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log10(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly { if err != nil { return false } return true } ten := identityVal(10, a.Dtype()) Pow(ten, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Errorf("Expected ret to be the same as reuse") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log10 failed: %v", err) } // incr invFn = func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log10er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log10(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } ten := identityVal(10, a.Dtype()) Pow(ten, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != incr { t.Errorf("Expected ret to be the same as incr") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log10 failed: %v", err) } } func TestAbs(t *testing.T) { var r *rand.Rand absFn := func(q *Dense) bool { a := q.Clone().(*Dense) zeros := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...)) correct := New(Of(Bool), WithShape(q.Shape().Clone()...)) correct.Memset(true) // we'll exclude everything other than ordtypes because complex numbers cannot be abs'd if err := typeclassCheck(a.Dtype(), ordTypes); err != nil { return true } we, willFailEq := willerr(a, signedTypes, nil) _, ok := q.Engine().(Abser) we = we || !ok ret, err := Abs(a) if err, retEarly := qcErrCheck(t, "Abs", a, nil, we, err); retEarly { if err != nil { return false } return true } check, _ := Gte(ret, zeros) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), check.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(absFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for Abs failed: %v", err) } } func TestTanh(t *testing.T) { var r *rand.Rand // default invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Tanher) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Tanh(a) if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly { if err != nil { return false } return true } switch a.Dtype() { case Float64: if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil { t.Error(err) return false } case Float32: if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil { t.Error(err) return false } } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for Tanh failed: %v", err) } // unsafe invFn = func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Tanher) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Tanh(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly { if err != nil { return false } return true } switch a.Dtype() { case Float64: if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil { t.Error(err) return false } case Float32: if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil { t.Error(err) return false } } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Tanh failed: %v", err) } // reuse invFn = func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) reuse := a.Clone().(*Dense) reuse.Zero() we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Tanher) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Tanh(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly { if err != nil { return false } return true } switch a.Dtype() { case Float64: if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil { t.Error(err) return false } case Float32: if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil { t.Error(err) return false } } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Errorf("Expected ret to be the same as reuse") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Tanh failed: %v", err) } // incr invFn = func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Tanher) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Tanh(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } switch a.Dtype() { case Float64: if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil { t.Error(err) return false } case Float32: if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil { t.Error(err) return false } } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != incr { t.Errorf("Expected ret to be the same as incr") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Tanh failed: %v", err) } } func TestLog2(t *testing.T) { var r *rand.Rand // default invFn := func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log2er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log2(a) if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly { if err != nil { return false } return true } two := identityVal(2, a.Dtype()) Pow(two, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests for Log2 failed: %v", err) } // unsafe invFn = func(q *Dense) bool { a := q.Clone().(*Dense) b := q.Clone().(*Dense) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log2er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log2(a, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly { if err != nil { return false } return true } two := identityVal(2, a.Dtype()) Pow(two, b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log2 failed: %v", err) } // reuse invFn = func(q *Dense) bool { a := q.Clone().(*Dense) correct := a.Clone().(*Dense) reuse := a.Clone().(*Dense) reuse.Zero() we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log2er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log2(a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly { if err != nil { return false } return true } two := identityVal(2, a.Dtype()) Pow(two, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Errorf("Expected ret to be the same as reuse") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log2 failed: %v", err) } // incr invFn = func(q *Dense) bool { a := q.Clone().(*Dense) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatTypes, nil) _, ok := q.Engine().(Log2er) we = we || !ok // we'll exclude everything other than floats if err := typeclassCheck(a.Dtype(), floatTypes); err != nil { return true } ret, err := Log2(a, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly { if err != nil { return false } return true } if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil { t.Errorf("err while subtracting incr: %v", err) return false } two := identityVal(2, a.Dtype()) Pow(two, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != incr { t.Errorf("Expected ret to be the same as incr") } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil { t.Errorf("Inv tests using unsafe for Log2 failed: %v", err) } }tensor-0.9.24/api_utils.go000066400000000000000000000037621426512615100154530ustar00rootroot00000000000000package tensor import ( "log" "math" "math/rand" "reflect" "sort" "github.com/chewxy/math32" ) // SortIndex is similar to numpy's argsort // TODO: tidy this up func SortIndex(in interface{}) (out []int) { switch list := in.(type) { case []int: orig := make([]int, len(list)) out = make([]int, len(list)) copy(orig, list) sort.Ints(list) for i, s := range list { for j, o := range orig { if o == s { out[i] = j break } } } case []float64: orig := make([]float64, len(list)) out = make([]int, len(list)) copy(orig, list) sort.Float64s(list) for i, s := range list { for j, o := range orig { if o == s { out[i] = j break } } } case sort.Interface: sort.Sort(list) log.Printf("TODO: SortIndex for sort.Interface not yet done.") } return } // SampleIndex samples a slice or a Tensor. // TODO: tidy this up. func SampleIndex(in interface{}) int { // var l int switch list := in.(type) { case []int: var sum, i int // l = len(list) r := rand.Int() for { sum += list[i] if sum > r && i > 0 { return i } i++ } case []float64: var sum float64 var i int // l = len(list) r := rand.Float64() for { sum += list[i] if sum > r && i > 0 { return i } i++ } case *Dense: var i int switch list.t.Kind() { case reflect.Float64: var sum float64 r := rand.Float64() data := list.Float64s() // l = len(data) for { datum := data[i] if math.IsNaN(datum) || math.IsInf(datum, 0) { return i } sum += datum if sum > r && i > 0 { return i } i++ } case reflect.Float32: var sum float32 r := rand.Float32() data := list.Float32s() // l = len(data) for { datum := data[i] if math32.IsNaN(datum) || math32.IsInf(datum, 0) { return i } sum += datum if sum > r && i > 0 { return i } i++ } default: panic("not yet implemented") } default: panic("Not yet implemented") } return -1 } tensor-0.9.24/array.go000066400000000000000000000247771426512615100146110ustar00rootroot00000000000000package tensor import ( "fmt" "reflect" "sync" "unsafe" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) // array is the underlying generic array. type array struct { storage.Header // the header - the Go representation (a slice) t Dtype // the element type } // makeArray makes an array. The memory allocation is handled by Go func makeArray(t Dtype, length int) array { v := malloc(t, length) hdr := storage.Header{ Raw: v, } return array{ Header: hdr, t: t, } } // arrayFromSlice creates an array from a slice. If x is not a slice, it will panic. func arrayFromSlice(x interface{}) array { xT := reflect.TypeOf(x) if xT.Kind() != reflect.Slice { panic("Expected a slice") } elT := xT.Elem() return array{ Header: storage.Header{ Raw: storage.AsByteSlice(x), }, t: Dtype{elT}, } } func (a *array) Len() int { return a.Header.TypedLen(a.t.Type) } func (a *array) Cap() int { return a.Header.TypedLen(a.t.Type) } // fromSlice populates the value from a slice func (a *array) fromSlice(x interface{}) { xT := reflect.TypeOf(x) if xT.Kind() != reflect.Slice { panic("Expected a slice") } elT := xT.Elem() a.Raw = storage.AsByteSlice(x) a.t = Dtype{elT} } // fromSliceOrTensor populates the value from a slice or anything that can form an array func (a *array) fromSliceOrArrayer(x interface{}) { if T, ok := x.(arrayer); ok { xp := T.arrPtr() // if the underlying array hasn't been allocated, or not enough has been allocated if a.Header.Raw == nil { a.Header.Raw = malloc(xp.t, xp.Len()) } a.t = xp.t copyArray(a, T.arrPtr()) return } a.fromSlice(x) } // byteSlice casts the underlying slice into a byte slice. Useful for copying and zeroing, but not much else func (a array) byteSlice() []byte { return a.Header.Raw } // sliceInto creates a slice. Instead of returning an array, which would cause a lot of reallocations, sliceInto expects a array to // already have been created. This allows repetitive actions to be done without having to have many pointless allocation func (a *array) sliceInto(i, j int, res *array) { c := a.Cap() if i < 0 || j < i || j > c { panic(fmt.Sprintf("Cannot slice %v - index %d:%d is out of bounds", a, i, j)) } s := i * int(a.t.Size()) e := j * int(a.t.Size()) c = c - i res.Raw = a.Raw[s:e] } // slice slices an array func (a array) slice(start, end int) array { if end > a.Len() { panic("Index out of range") } if end < start { panic("Index out of range") } s := start * int(a.t.Size()) e := end * int(a.t.Size()) return array{ Header: storage.Header{Raw: a.Raw[s:e]}, t: a.t, } } // swap swaps the elements i and j in the array func (a *array) swap(i, j int) { if a.t == String { ss := a.hdr().Strings() ss[i], ss[j] = ss[j], ss[i] return } if !isParameterizedKind(a.t.Kind()) { switch a.t.Size() { case 8: us := a.hdr().Uint64s() us[i], us[j] = us[j], us[i] case 4: us := a.hdr().Uint32s() us[i], us[j] = us[j], us[i] case 2: us := a.hdr().Uint16s() us[i], us[j] = us[j], us[i] case 1: us := a.hdr().Uint8s() us[i], us[j] = us[j], us[i] } return } size := int(a.t.Size()) tmp := make([]byte, size) bs := a.byteSlice() is := i * size ie := is + size js := j * size je := js + size copy(tmp, bs[is:ie]) copy(bs[is:ie], bs[js:je]) copy(bs[js:je], tmp) } /* *Array is a Memory */ // Uintptr returns the pointer of the first value of the slab func (a *array) Uintptr() uintptr { return uintptr(unsafe.Pointer(&a.Header.Raw[0])) } // MemSize returns how big the slice is in bytes func (a *array) MemSize() uintptr { return uintptr(len(a.Header.Raw)) } // Data returns the representation of a slice. func (a array) Data() interface{} { // build a type of []T shdr := reflect.SliceHeader{ Data: a.Uintptr(), Len: a.Len(), Cap: a.Cap(), } sliceT := reflect.SliceOf(a.t.Type) ptr := unsafe.Pointer(&shdr) val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) return val.Interface() } // Zero zeroes out the underlying array of the *Dense tensor. func (a array) Zero() { if a.t.Kind() == reflect.String { ss := a.Strings() for i := range ss { ss[i] = "" } return } if !isParameterizedKind(a.t.Kind()) { ba := a.byteSlice() for i := range ba { ba[i] = 0 } return } l := a.Len() for i := 0; i < l; i++ { val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(reflect.Zero(a.t)) } } func (a *array) hdr() *storage.Header { return &a.Header } func (a *array) rtype() reflect.Type { return a.t.Type } /* MEMORY MOVEMENT STUFF */ // malloc is standard Go allocation of a block of memory - the plus side is that Go manages the memory func malloc(t Dtype, length int) []byte { size := int(calcMemSize(t, length)) return make([]byte, size) } // calcMemSize calulates the memory size of an array (given its size) func calcMemSize(dt Dtype, size int) int64 { return int64(dt.Size()) * int64(size) } // copyArray copies an array. func copyArray(dst, src *array) int { if dst.t != src.t { panic("Cannot copy arrays of different types.") } return storage.Copy(dst.t.Type, &dst.Header, &src.Header) } func copyArraySliced(dst array, dstart, dend int, src array, sstart, send int) int { if dst.t != src.t { panic("Cannot copy arrays of different types.") } return storage.CopySliced(dst.t.Type, &dst.Header, dstart, dend, &src.Header, sstart, send) } // copyDense copies a DenseTensor func copyDense(dst, src DenseTensor) int { if dst.Dtype() != src.Dtype() { panic("Cannot dopy DenseTensors of different types") } if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() { if md, ok := dst.(MaskedTensor); ok { dmask := md.Mask() smask := ms.Mask() if cap(dmask) < len(smask) { dmask = make([]bool, len(smask)) copy(dmask, md.Mask()) md.SetMask(dmask) } copy(dmask, smask) } } e := src.Engine() if err := e.Memcpy(dst.arrPtr(), src.arrPtr()); err != nil { panic(err) } return dst.len() // return copyArray(dst.arr(), src.arr()) } // copyDenseSliced copies a DenseTensor, but both are sliced func copyDenseSliced(dst DenseTensor, dstart, dend int, src DenseTensor, sstart, send int) int { if dst.Dtype() != src.Dtype() { panic("Cannot copy DenseTensors of different types") } if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() { if md, ok := dst.(MaskedTensor); ok { dmask := md.Mask() smask := ms.Mask() if cap(dmask) < dend { dmask = make([]bool, dend) copy(dmask, md.Mask()) md.SetMask(dmask) } copy(dmask[dstart:dend], smask[sstart:send]) } } if e := src.Engine(); e != nil { darr := dst.arr() sarr := src.arr() da := darr.slice(dstart, dend) sa := sarr.slice(sstart, send) switch e.(type) { case NonStdEngine: if err := e.Memcpy(&da, &sa); err != nil { panic(err) } default: // THIS IS AN OPTIMIZATION. REVISIT WHEN NEEDED. // // THE PURPOSE of this optimization is to make this perform better under // default circumstances. // // The original code simply uses t.Engine().Memcpy(&dSlice, &tSlice). // A variant can still be seen in the NonStdEngine case above. // // The `array.slice()` method has been optimized to return `array2`, which is a // non-heap allocated type. // a value of `array2` cannot have its address taken - e.g. // var a array2 // doSomething(&a) // ← this cannot be done // // We *could* make `array2` implement Memory. But then a lot of runtime.convT2I and // runtime.convI2T would be called. Which defeats the purpose of making things fast. // // So instead, we check to see if the Engine uses standard allocation methods. // Typically this means `StdEng`. // // If so, we directly use storage.Copy instead of using the engine storage.Copy(da.t.Type, &da.Header, &sa.Header) } return da.Len() } return copyArraySliced(dst.arr(), dstart, dend, src.arr(), sstart, send) } // copyDenseIter copies a DenseTensor, with iterator func copyDenseIter(dst, src DenseTensor, diter, siter Iterator) (int, error) { if dst.Dtype() != src.Dtype() { panic("Cannot copy Dense arrays of different types") } // if they all don't need iterators, and have the same data order if !dst.RequiresIterator() && !src.RequiresIterator() && dst.DataOrder().HasSameOrder(src.DataOrder()) { return copyDense(dst, src), nil } if !dst.IsNativelyAccessible() { return 0, errors.Errorf(inaccessibleData, dst) } if !src.IsNativelyAccessible() { return 0, errors.Errorf(inaccessibleData, src) } if diter == nil { diter = FlatIteratorFromDense(dst) } if siter == nil { siter = FlatIteratorFromDense(src) } // if it's a masked tensor, we copy the mask as well if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() { if md, ok := dst.(MaskedTensor); ok { dmask := md.Mask() smask := ms.Mask() if cap(dmask) < len(smask) { dmask = make([]bool, len(smask)) copy(dmask, md.Mask()) md.SetMask(dmask) } copy(dmask, smask) } } return storage.CopyIter(dst.rtype(), dst.hdr(), src.hdr(), diter, siter), nil } type scalarPtrCount struct { Ptr unsafe.Pointer Count int } // scalarRCLock is a lock for the reference counting list. var scalarRCLock sync.Mutex // scalarRC is a bunch of reference counted pointers to scalar values var scalarRC = make(map[uintptr]*sync.Pool) // uintptr is the size, the pool stores []byte func scalarPool(size uintptr) *sync.Pool { scalarRCLock.Lock() pool, ok := scalarRC[size] if !ok { pool = &sync.Pool{ New: func() interface{} { return make([]byte, size) }, } scalarRC[size] = pool } scalarRCLock.Unlock() return pool } func allocScalar(a interface{}) []byte { atype := reflect.TypeOf(a) size := atype.Size() pool := scalarPool(size) return pool.Get().([]byte) } func freeScalar(bs []byte) { if bs == nil { return } // zero out for i := range bs { bs[i] = 0 } size := uintptr(len(bs)) // put it back into pool pool := scalarPool(size) pool.Put(bs) } // scalarToHeader creates a Header from a scalar value func scalarToHeader(a interface{}) (hdr *storage.Header, newAlloc bool) { var raw []byte switch at := a.(type) { case Memory: raw = storage.FromMemory(at.Uintptr(), at.MemSize()) default: raw = allocScalar(a) newAlloc = true } hdr = borrowHeader() hdr.Raw = raw if newAlloc { copyScalarToPrealloc(a, hdr.Raw) } return hdr, newAlloc } func copyScalarToPrealloc(a interface{}, bs []byte) { xV := reflect.ValueOf(a) xT := reflect.TypeOf(a) p := unsafe.Pointer(&bs[0]) v := reflect.NewAt(xT, p) reflect.Indirect(v).Set(xV) return } tensor-0.9.24/array_getset.go000066400000000000000000000360051426512615100161470ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "reflect" "unsafe" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) // Set sets the value of the underlying array at the index i. func (a *array) Set(i int, x interface{}) { switch a.t.Kind() { case reflect.Bool: xv := x.(bool) a.SetB(i, xv) case reflect.Int: xv := x.(int) a.SetI(i, xv) case reflect.Int8: xv := x.(int8) a.SetI8(i, xv) case reflect.Int16: xv := x.(int16) a.SetI16(i, xv) case reflect.Int32: xv := x.(int32) a.SetI32(i, xv) case reflect.Int64: xv := x.(int64) a.SetI64(i, xv) case reflect.Uint: xv := x.(uint) a.SetU(i, xv) case reflect.Uint8: xv := x.(uint8) a.SetU8(i, xv) case reflect.Uint16: xv := x.(uint16) a.SetU16(i, xv) case reflect.Uint32: xv := x.(uint32) a.SetU32(i, xv) case reflect.Uint64: xv := x.(uint64) a.SetU64(i, xv) case reflect.Uintptr: xv := x.(uintptr) a.SetUintptr(i, xv) case reflect.Float32: xv := x.(float32) a.SetF32(i, xv) case reflect.Float64: xv := x.(float64) a.SetF64(i, xv) case reflect.Complex64: xv := x.(complex64) a.SetC64(i, xv) case reflect.Complex128: xv := x.(complex128) a.SetC128(i, xv) case reflect.String: xv := x.(string) a.SetStr(i, xv) case reflect.UnsafePointer: xv := x.(unsafe.Pointer) a.SetUnsafePointer(i, xv) default: xv := reflect.ValueOf(x) val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } } // Get returns the ith element of the underlying array of the *Dense tensor. func (a *array) Get(i int) interface{} { switch a.t.Kind() { case reflect.Bool: return a.GetB(i) case reflect.Int: return a.GetI(i) case reflect.Int8: return a.GetI8(i) case reflect.Int16: return a.GetI16(i) case reflect.Int32: return a.GetI32(i) case reflect.Int64: return a.GetI64(i) case reflect.Uint: return a.GetU(i) case reflect.Uint8: return a.GetU8(i) case reflect.Uint16: return a.GetU16(i) case reflect.Uint32: return a.GetU32(i) case reflect.Uint64: return a.GetU64(i) case reflect.Uintptr: return a.GetUintptr(i) case reflect.Float32: return a.GetF32(i) case reflect.Float64: return a.GetF64(i) case reflect.Complex64: return a.GetC64(i) case reflect.Complex128: return a.GetC128(i) case reflect.String: return a.GetStr(i) case reflect.UnsafePointer: return a.GetUnsafePointer(i) default: val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) return val.Interface() } } // Memset sets all values in the array. func (a *array) Memset(x interface{}) error { switch a.t { case Bool: if xv, ok := x.(bool); ok { data := a.Bools() for i := range data { data[i] = xv } return nil } case Int: if xv, ok := x.(int); ok { data := a.Ints() for i := range data { data[i] = xv } return nil } case Int8: if xv, ok := x.(int8); ok { data := a.Int8s() for i := range data { data[i] = xv } return nil } case Int16: if xv, ok := x.(int16); ok { data := a.Int16s() for i := range data { data[i] = xv } return nil } case Int32: if xv, ok := x.(int32); ok { data := a.Int32s() for i := range data { data[i] = xv } return nil } case Int64: if xv, ok := x.(int64); ok { data := a.Int64s() for i := range data { data[i] = xv } return nil } case Uint: if xv, ok := x.(uint); ok { data := a.Uints() for i := range data { data[i] = xv } return nil } case Uint8: if xv, ok := x.(uint8); ok { data := a.Uint8s() for i := range data { data[i] = xv } return nil } case Uint16: if xv, ok := x.(uint16); ok { data := a.Uint16s() for i := range data { data[i] = xv } return nil } case Uint32: if xv, ok := x.(uint32); ok { data := a.Uint32s() for i := range data { data[i] = xv } return nil } case Uint64: if xv, ok := x.(uint64); ok { data := a.Uint64s() for i := range data { data[i] = xv } return nil } case Uintptr: if xv, ok := x.(uintptr); ok { data := a.Uintptrs() for i := range data { data[i] = xv } return nil } case Float32: if xv, ok := x.(float32); ok { data := a.Float32s() for i := range data { data[i] = xv } return nil } case Float64: if xv, ok := x.(float64); ok { data := a.Float64s() for i := range data { data[i] = xv } return nil } case Complex64: if xv, ok := x.(complex64); ok { data := a.Complex64s() for i := range data { data[i] = xv } return nil } case Complex128: if xv, ok := x.(complex128); ok { data := a.Complex128s() for i := range data { data[i] = xv } return nil } case String: if xv, ok := x.(string); ok { data := a.Strings() for i := range data { data[i] = xv } return nil } case UnsafePointer: if xv, ok := x.(unsafe.Pointer); ok { data := a.UnsafePointers() for i := range data { data[i] = xv } return nil } } xv := reflect.ValueOf(x) l := a.Len() for i := 0; i < l; i++ { val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } return nil } func (a *array) memsetIter(x interface{}, it Iterator) (err error) { var i int switch a.t { case Bool: xv, ok := x.(bool) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Bools() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Int: xv, ok := x.(int) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Ints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Int8: xv, ok := x.(int8) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Int8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Int16: xv, ok := x.(int16) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Int16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Int32: xv, ok := x.(int32) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Int32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Int64: xv, ok := x.(int64) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Int64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uint: xv, ok := x.(uint) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uint8: xv, ok := x.(uint8) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uint8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uint16: xv, ok := x.(uint16) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uint16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uint32: xv, ok := x.(uint32) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uint32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uint64: xv, ok := x.(uint64) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uint64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Uintptr: xv, ok := x.(uintptr) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Uintptrs() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Float32: xv, ok := x.(float32) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Float32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Float64: xv, ok := x.(float64) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Float64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Complex64: xv, ok := x.(complex64) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Complex64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case Complex128: xv, ok := x.(complex128) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Complex128s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case String: xv, ok := x.(string) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.Strings() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) case UnsafePointer: xv, ok := x.(unsafe.Pointer) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.UnsafePointers() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = xv } err = handleNoOp(err) default: xv := reflect.ValueOf(x) for i, err = it.Next(); err == nil; i, err = it.Next() { val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } err = handleNoOp(err) } return } // Eq checks that any two arrays are equal func (a array) Eq(other interface{}) bool { if oa, ok := other.(*array); ok { if oa.t != a.t { return false } if oa.Len() != a.Len() { return false } /* if oa.C != a.C { return false } */ // same exact thing if uintptr(unsafe.Pointer(&oa.Header.Raw[0])) == uintptr(unsafe.Pointer(&a.Header.Raw[0])) { return true } switch a.t.Kind() { case reflect.Bool: for i, v := range a.Bools() { if oa.GetB(i) != v { return false } } case reflect.Int: for i, v := range a.Ints() { if oa.GetI(i) != v { return false } } case reflect.Int8: for i, v := range a.Int8s() { if oa.GetI8(i) != v { return false } } case reflect.Int16: for i, v := range a.Int16s() { if oa.GetI16(i) != v { return false } } case reflect.Int32: for i, v := range a.Int32s() { if oa.GetI32(i) != v { return false } } case reflect.Int64: for i, v := range a.Int64s() { if oa.GetI64(i) != v { return false } } case reflect.Uint: for i, v := range a.Uints() { if oa.GetU(i) != v { return false } } case reflect.Uint8: for i, v := range a.Uint8s() { if oa.GetU8(i) != v { return false } } case reflect.Uint16: for i, v := range a.Uint16s() { if oa.GetU16(i) != v { return false } } case reflect.Uint32: for i, v := range a.Uint32s() { if oa.GetU32(i) != v { return false } } case reflect.Uint64: for i, v := range a.Uint64s() { if oa.GetU64(i) != v { return false } } case reflect.Uintptr: for i, v := range a.Uintptrs() { if oa.GetUintptr(i) != v { return false } } case reflect.Float32: for i, v := range a.Float32s() { if oa.GetF32(i) != v { return false } } case reflect.Float64: for i, v := range a.Float64s() { if oa.GetF64(i) != v { return false } } case reflect.Complex64: for i, v := range a.Complex64s() { if oa.GetC64(i) != v { return false } } case reflect.Complex128: for i, v := range a.Complex128s() { if oa.GetC128(i) != v { return false } } case reflect.String: for i, v := range a.Strings() { if oa.GetStr(i) != v { return false } } case reflect.UnsafePointer: for i, v := range a.UnsafePointers() { if oa.GetUnsafePointer(i) != v { return false } } default: for i := 0; i < a.Len(); i++ { if !reflect.DeepEqual(a.Get(i), oa.Get(i)) { return false } } } return true } return false } func (a *array) zeroIter(it Iterator) (err error) { var i int switch a.t { case Bool: data := a.Bools() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = false } err = handleNoOp(err) case Int: data := a.Ints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int8: data := a.Int8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int16: data := a.Int16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int32: data := a.Int32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Int64: data := a.Int64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint: data := a.Uints() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint8: data := a.Uint8s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint16: data := a.Uint16s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint32: data := a.Uint32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uint64: data := a.Uint64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Uintptr: data := a.Uintptrs() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Float32: data := a.Float32s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Float64: data := a.Float64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Complex64: data := a.Complex64s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case Complex128: data := a.Complex128s() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = 0 } err = handleNoOp(err) case String: data := a.Strings() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = "" } err = handleNoOp(err) case UnsafePointer: data := a.UnsafePointers() for i, err = it.Next(); err == nil; i, err = it.Next() { data[i] = nil } err = handleNoOp(err) default: for i, err = it.Next(); err == nil; i, err = it.Next() { val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(reflect.Zero(a.t)) } err = handleNoOp(err) } return } tensor-0.9.24/bench.sh000077500000000000000000000010151426512615100145360ustar00rootroot00000000000000#!/bin/sh old=$1; new=$2; git checkout $old # https://stackoverflow.com/a/2111099 branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,') echo "Benchmarking $branch (old)" go test -run=$^ -bench=. > ${branch}.bench for i in {1..10} do go test -run=$^ -bench=. >> ${branch}.bench done git checkout $new branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,') echo "Benchmarking $branch (new)" go test -run=$^ -bench=. > ${branch}.bench for i in {1..10} do go test -run=$^ -bench=. >> ${branch}.bench done tensor-0.9.24/benchmark_dense_arith_test.go000066400000000000000000000042531426512615100210140ustar00rootroot00000000000000package tensor import ( "testing" "gorgonia.org/vecf64" ) func BenchmarkDense_Mul_Unsafe(b *testing.B) { A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2))) B := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 1, (100*100*2)+1))) for i := 0; i < b.N; i++ { A.Mul(B, UseUnsafe()) } } func BenchmarkNative_Mul_Unsafe(b *testing.B) { A := Range(Float64, 0, 100*100*2).([]float64) B := Range(Float64, 1, (100*100*2)+1).([]float64) f := func(a, b []float64) { for i, v := range a { a[i] = v * b[i] } } for i := 0; i < b.N; i++ { f(A, B) } } func BenchmarkNative_Mul_Unsafe_vec(b *testing.B) { A := Range(Float64, 0, 100*100*2).([]float64) B := Range(Float64, 1, (100*100*2)+1).([]float64) for i := 0; i < b.N; i++ { vecf64.Mul(A, B) } } func BenchmarkAPI_Mul_Unsafe(b *testing.B) { A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2))) B := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 1, (100*100*2)+1))) for i := 0; i < b.N; i++ { Mul(A, B, UseUnsafe()) } } func BenchmarkDense_ContiguousSliced_Mul_Unsafe(b *testing.B) { A := New(WithShape(4, 100, 100), WithBacking(Range(Float64, 0, 100*100*4))) B := New(WithShape(2, 100, 100), WithBacking(Range(Float64, 1, (100*100*2)+1))) Sliced, _ := A.Slice(makeRS(1, 3)) // result should be contiguous for i := 0; i < b.N; i++ { Mul(Sliced, B, UseUnsafe()) } } func BenchmarkDense_NonContiguousSliced_Mul_Unsafe(b *testing.B) { A := New(WithShape(100, 4, 100), WithBacking(Range(Float64, 0, 100*100*4))) B := New(WithShape(100, 2, 100), WithBacking(Range(Float64, 1, (100*100*2)+1))) Sliced, _ := A.Slice(nil, makeRS(1, 3)) // result should be non-contiguous for i := 0; i < b.N; i++ { Mul(Sliced, B, UseUnsafe()) } } func BenchmarkAPI_MulScalar_Unsafe(b *testing.B) { A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2))) B := 3.141 for i := 0; i < b.N; i++ { Mul(A, B, UseUnsafe()) } } func BenchmarkNative_MulScalar_Unsafe(b *testing.B) { A := Range(Float64, 0, 100*100*2).([]float64) B := 3.141 f := func(a []float64, b float64) { for i := range a { a[i] *= b } } for i := 0; i < b.N; i++ { f(A, B) } } tensor-0.9.24/benchmark_dense_matop_test.go000066400000000000000000000052241426512615100210240ustar00rootroot00000000000000package tensor import ( "math/rand" "testing" ) func BenchmarkDense_Transpose(b *testing.B) { T := New(WithShape(100, 100, 2), WithBacking(Range(Byte, 0, 100*100*2))) for i := 0; i < b.N; i++ { T.T() T.Transpose() } } func BenchmarkNativeSet(b *testing.B) { T := New(WithShape(10000), Of(Float64)) data := T.Data().([]float64) for i := 0; i < b.N; i++ { for next := 0; next < 10000; next++ { data[next] = float64(next + 1) } } } func BenchmarkSetMethod(b *testing.B) { T := New(WithShape(10000), Of(Float64)) for i := 0; i < b.N; i++ { for next := 0; next < 10000; next++ { T.Set(next, float64(next+1)) } } } func BenchmarkNativeGet(b *testing.B) { T := New(WithShape(10000), Of(Float64)) data := T.Data().([]float64) var f float64 for i := 0; i < b.N; i++ { for next := 0; next < 10000; next++ { f = data[next] } } _ = f } func BenchmarkGetMethod(b *testing.B) { T := New(WithShape(10000), Of(Float64)) var f float64 for i := 0; i < b.N; i++ { for next := 0; next < 10000; next++ { f = T.Get(next).(float64) } } _ = f } func BenchmarkGetWithIterator(b *testing.B) { T := New(WithShape(100, 100), Of(Float64)) var f float64 data := T.Data().([]float64) for i := 0; i < b.N; i++ { it := IteratorFromDense(T) var next int var err error for next, err = it.Start(); err == nil; next, err = it.Next() { f = data[next] } if _, ok := err.(NoOpError); !ok { b.Errorf("Error: %v", err) } } _ = f } func BenchmarkComplicatedGet(b *testing.B) { T := New(WithShape(101, 1, 36, 5), Of(Float64)) T.T(0, 2, 1, 3) data := T.Data().([]float64) var f float64 b.ResetTimer() for i := 0; i < b.N; i++ { it := IteratorFromDense(T) var next int var err error for next, err = it.Start(); err == nil; next, err = it.Next() { f = data[next] } if _, ok := err.(NoOpError); !ok { b.Errorf("Error: %v", err) } } _ = f } var atCoords [10000][2]int func init() { for i := range atCoords { atCoords[i][0] = rand.Intn(100) atCoords[i][1] = rand.Intn(100) } } var at1, at2 float64 // func BenchmarkAtWithNativeIterator(b *testing.B) { // T := New(WithShape(100, 100), Of(Float64)) // it, err := NativeMatrixF64(T) // if err != nil { // b.Fatalf("Error: %v", err) // } // var j int // for i := 0; i < b.N; i++ { // if j >= len(atCoords) { // j = 0 // } // at := atCoords[j] // at1 = it[at[0]][at[1]] // j++ // } // } func BenchmarkAt(b *testing.B) { T := New(WithShape(100, 100), Of(Float64)) var j int for i := 0; i < b.N; i++ { if j >= len(atCoords) { j = 0 } at := atCoords[j] _, err := T.At(at[0], at[1]) if err != nil { b.Errorf("Error: %v", err) } j++ } } tensor-0.9.24/benchmark_dense_repeat_test.go000066400000000000000000000004001426512615100211530ustar00rootroot00000000000000package tensor import "testing" func BenchmarkDenseRepeat(b *testing.B) { for _, tst := range repeatTests { tst := tst b.Run(tst.name, func(b *testing.B) { for n := 0; n < b.N; n++ { tst.tensor.Repeat(tst.axis, tst.repeats...) } }) } } tensor-0.9.24/bitmap.go000066400000000000000000000022451426512615100147310ustar00rootroot00000000000000package tensor // BitMap is a very simple bitmap. It only supports Set, IsSet and Clear methods. It's mostly used for tracking which element has been set type BitMap struct { n []uint64 max int } // NewBitMap creates a new BitMap. func NewBitMap(size int) *BitMap { q, r := divmod(size, 64) if r > 0 { q++ } return &BitMap{ n: make([]uint64, q), max: size, } } // Set sets the ith bit of the bit map to 1. It panics if i is greater or equal to the defined max func (bm *BitMap) Set(i int) { if i >= bm.max || i < 0 { panic("Index out of range") } block, pos := divmod(i, 64) bm.n[block] |= uint64(1) << uint64(pos) } // IsSet returns true if the ith bit is set. It panics if the i is greater or equal to the defined max func (bm *BitMap) IsSet(i int) bool { if i >= bm.max || i < 0 { panic("Index out of range") } block, pos := divmod(i, 64) return bm.n[block]>>uint64(pos)&uint64(1) == uint64(1) } // Clear clears the ith bit. It panics if i is greater or equal to the defined max func (bm *BitMap) Clear(i int) { if i >= bm.max || i < 0 { panic("Index out of range") } block, pos := divmod(i, 64) bm.n[block] &= ^(uint64(1) << uint64(pos)) } tensor-0.9.24/bitmap_test.go000066400000000000000000000031721426512615100157700ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) func TestBitMap(t *testing.T) { assert := assert.New(t) bm := NewBitMap(64) assert.Equal(1, len(bm.n)) track := uint64(0) for i := 0; i < 64; i++ { bm.Set(i) track |= uint64(1) << uint64(i) assert.Equal(track, bm.n[0]) assert.Equal(true, bm.IsSet(i)) if i < 63 { assert.Equal(false, bm.IsSet(i+1)) } else { fails := func() { bm.IsSet(i + 1) } assert.Panics(fails) } } for i := 0; i < 64; i++ { bm.Clear(i) track &= ^(uint64(1) << uint64(i)) assert.Equal(track, bm.n[0]) assert.Equal(false, bm.IsSet(i)) } bm = NewBitMap(124) assert.Equal(2, len(bm.n)) track0 := uint64(0) track1 := uint64(0) for i := 0; i < 128; i++ { if i < 124 { bm.Set(i) } else { fails := func() { bm.Set(i) } assert.Panics(fails) } if i < 64 { track0 |= uint64(1) << uint64(i) assert.Equal(track0, bm.n[0]) assert.Equal(true, bm.IsSet(i)) } else if i > 123 { fails := func() { bm.IsSet(i) } assert.Panics(fails) } else { track1 |= uint64(1) << uint64(i-64) assert.Equal(track1, bm.n[1]) assert.Equal(true, bm.IsSet(i)) } if i < 123 { assert.Equal(false, bm.IsSet(i+1)) } else { fails := func() { bm.IsSet(i + 1) } assert.Panics(fails) } } for i := 48; i < 70; i++ { bm.Clear(i) } for i := 48; i < 70; i++ { assert.Equal(false, bm.IsSet(i)) } fails := func() { bm.Clear(125) } assert.Panics(fails) // idiots section! bm = NewBitMap(3) fails = func() { bm.Set(-1) } assert.Panics(fails) fails = func() { bm.Set(3) } assert.Panics(fails) } tensor-0.9.24/blas.go000066400000000000000000000024541426512615100144000ustar00rootroot00000000000000package tensor import ( "sync" "gonum.org/v1/gonum/blas" "gonum.org/v1/gonum/blas/gonum" ) var blasdoor sync.Mutex var whichblas BLAS // BLAS represents all the possible implementations of BLAS. // The default is Gonum's Native type BLAS interface { blas.Float32 blas.Float64 blas.Complex64 blas.Complex128 } // only blastoise.Implementation() and cubone.Implementation() are batchedBLAS - // they both batch cgo calls (and cubone batches cuda calls) type batchedBLAS interface { WorkAvailable() int DoWork() BLAS } // Use defines which BLAS implementation gorgonia should use. // The default is Gonum's Native. These are the other options: // Use(blastoise.Implementation()) // Use(cubone.Implementation()) // Use(cgo.Implementation) // Note the differences in the brackets. The blastoise and cubone ones are functions. func Use(b BLAS) { // close the blast door! close the blast door! blasdoor.Lock() // open the blast door! open the blast door! defer blasdoor.Unlock() // those lines were few of the better additions to the Special Edition. There, I said it. The Special Edition is superior. Except Han still shot first in my mind. whichblas = b } // WhichBLAS returns the BLAS that gorgonia uses. func WhichBLAS() BLAS { return whichblas } func init() { whichblas = gonum.Implementation{} } tensor-0.9.24/collections.go000066400000000000000000000013121426512615100157650ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" func densesToTensors(a []*Dense) []Tensor { retVal := make([]Tensor, len(a)) for i, t := range a { retVal[i] = t } return retVal } func densesToDenseTensors(a []*Dense) []DenseTensor { retVal := make([]DenseTensor, len(a)) for i, t := range a { retVal[i] = t } return retVal } func tensorsToDenseTensors(a []Tensor) ([]DenseTensor, error) { retVal := make([]DenseTensor, len(a)) var ok bool for i, t := range a { if retVal[i], ok = t.(DenseTensor); !ok { return nil, errors.Errorf("can only convert Tensors of the same type to DenseTensors. Trying to convert %T (#%d in slice)", t, i) } } return retVal, nil } tensor-0.9.24/consopt.go000066400000000000000000000144241426512615100151440ustar00rootroot00000000000000package tensor import ( "reflect" "gorgonia.org/tensor/internal/storage" ) // ConsOpt is a tensor construction option. type ConsOpt func(Tensor) // Of is a construction option for a Tensor. func Of(a Dtype) ConsOpt { Register(a) f := func(t Tensor) { switch tt := t.(type) { case *Dense: tt.t = a case *CS: tt.t = a default: panic("Unsupported Tensor type") } } return f } // WithBacking is a construction option for a Tensor // Use it as such: // backing := []float64{1,2,3,4} // t := New(WithBacking(backing)) // It can be used with other construction options like WithShape func WithBacking(x interface{}, argMask ...[]bool) ConsOpt { var mask []bool if len(argMask) > 0 { mask = argMask[0] } f := func(t Tensor) { if x == nil { return } switch tt := t.(type) { case *Dense: tt.fromSlice(x) if len(argMask) > 0 { tt.addMask(mask) } default: panic("Unsupported Tensor type") } } return f } // WithMask is a construction option for a Tensor // Use it as such: // mask := []bool{true,true,false,false} // t := New(WithBacking(backing), WithMask(mask)) // It can be used with other construction options like WithShape // The supplied mask can be any type. If non-boolean, then tensor mask is set to true // wherever non-zero value is obtained func WithMask(x interface{}) ConsOpt { f := func(t Tensor) { if x == nil { return } switch tt := t.(type) { case *Dense: tt.MaskFromSlice(x) default: panic("Unsupported Tensor type") } } return f } // WithShape is a construction option for a Tensor. It creates the ndarray in the required shape. func WithShape(dims ...int) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: throw := BorrowInts(len(dims)) copy(throw, dims) tt.setShape(throw...) case *CS: if len(dims) != 2 { panic("Only sparse matrices are supported") } throw := BorrowInts(len(dims)) copy(throw, dims) tt.s = throw default: panic("Unsupported Tensor type") } } return f } // FromScalar is a construction option for representing a scalar value as a Tensor func FromScalar(x interface{}, argMask ...[]bool) ConsOpt { var mask []bool if len(argMask) > 0 { mask = argMask[0] } f := func(t Tensor) { switch tt := t.(type) { case *Dense: xT := reflect.TypeOf(x) sxT := reflect.SliceOf(xT) xv := reflect.MakeSlice(sxT, 1, 1) // []T xv0 := xv.Index(0) // xv[0] xv0.Set(reflect.ValueOf(x)) tt.array.Header.Raw = storage.AsByteSlice(xv.Interface()) tt.t = Dtype{xT} tt.mask = mask default: panic("Unsupported Tensor Type") } } return f } // FromMemory is a construction option for creating a *Dense (for now) from memory location. This is a useful // option for super large tensors that don't fit into memory - the user may need to `mmap` a file the tensor. // // Bear in mind that at the current stage of the ConsOpt design, the order of the ConsOpt is important. // FromMemory requires the *Dense's Dtype be set already. // This would fail (and panic): // New(FromMemory(ptr, size), Of(Float64)) // This would not: // New(Of(Float64), FromMemory(ptr, size)) // This behaviour of requiring the ConsOpts to be in order might be changed in the future. // // Memory must be manually managed by the caller. // Tensors called with this construction option will not be returned to any pool - rather, all references to the pointers will be null'd. // Use with caution. //go:nocheckptr func FromMemory(ptr uintptr, memsize uintptr) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: tt.Header.Raw = nil // GC anything if needed tt.Header.Raw = storage.FromMemory(ptr, memsize) tt.flag = MakeMemoryFlag(tt.flag, ManuallyManaged) default: panic("Unsupported Tensor type") } } return f } // WithEngine is a construction option that would cause a Tensor to be linked with an execution engine. func WithEngine(e Engine) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: tt.e = e if e != nil && !e.AllocAccessible() { tt.flag = MakeMemoryFlag(tt.flag, NativelyInaccessible) } tt.oe = nil if oe, ok := e.(standardEngine); ok { tt.oe = oe } case *CS: tt.e = e if e != nil && !e.AllocAccessible() { tt.f = MakeMemoryFlag(tt.f, NativelyInaccessible) } } } return f } // AsFortran creates a *Dense with a col-major layout. // If the optional backing argument is passed, the backing is assumed to be C-order (row major), and // it will be transposed before being used. func AsFortran(backing interface{}, argMask ...[]bool) ConsOpt { var mask []bool if len(argMask) > 0 { mask = argMask[0] } f := func(t Tensor) { switch tt := t.(type) { case *Dense: if backing != nil { // put the data into the tensor, then make a clone tensor to transpose tt.fromSliceOrArrayer(backing) // create a temporary tensor, to which the transpose will be done tmp := NewDense(tt.Dtype(), tt.shape.Clone()) copyArray(tmp.arrPtr(), tt.arrPtr()) tmp.SetMask(mask) tmp.T() tmp.Transpose() // copy the data back to the current tensor copyArray(tt.arrPtr(), tmp.arrPtr()) tt.SetMask(tmp.Mask()) // cleanup: return the temporary tensor back to the pool ReturnTensor(tmp) } tt.AP.o = MakeDataOrder(tt.AP.o, ColMajor) if tt.AP.shape != nil { ReturnInts(tt.AP.strides) tt.AP.strides = nil tt.AP.strides = tt.AP.calcStrides() } case *CS: panic("AsFortran is not an available option for Compressed Sparse layouts") } } return f } func AsDenseDiag(backing interface{}) ConsOpt { f := func(t Tensor) { switch tt := t.(type) { case *Dense: if bt, ok := backing.(Tensor); ok { backing = bt.Data() } xT := reflect.TypeOf(backing) if xT.Kind() != reflect.Slice { panic("Expected a slice") } xV := reflect.ValueOf(backing) l := xV.Len() // elT := xT.Elem() sli := reflect.MakeSlice(xT, l*l, l*l) shape := Shape{l, l} strides := shape.CalcStrides() for i := 0; i < l; i++ { idx, err := Ltoi(shape, strides, i, i) if err != nil { panic(err) } at := sli.Index(idx) xi := xV.Index(i) at.Set(xi) } tt.fromSliceOrArrayer(sli.Interface()) tt.setShape(l, l) default: panic("AsDenseDiag is not available as an option for CS") } } return f } tensor-0.9.24/consopt_test.go000066400000000000000000000043211426512615100161760ustar00rootroot00000000000000// +build linux package tensor import ( "fmt" "io/ioutil" "os" "syscall" "testing" "testing/quick" "unsafe" "github.com/stretchr/testify/assert" ) type F64 float64 func newF64(f float64) *F64 { r := F64(f); return &r } func (f *F64) Uintptr() uintptr { return uintptr(unsafe.Pointer(f)) } func (f *F64) MemSize() uintptr { return 8 } func (f *F64) Pointer() unsafe.Pointer { return unsafe.Pointer(f) } func Test_FromMemory(t *testing.T) { fn := func(F float64) bool { f := newF64(F) T := New(WithShape(), Of(Float64), FromMemory(f.Uintptr(), f.MemSize())) data := T.Data().(float64) if data != F { return false } return true } if err := quick.Check(fn, &quick.Config{MaxCount: 1000000}); err != nil { t.Logf("%v", err) } f, err := ioutil.TempFile("", "test") if err != nil { t.Fatal(err) } // fill in with fake data backing := make([]byte, 8*1024*1024) // 1024*1024 matrix of float64 asFloats := *(*[]float64)(unsafe.Pointer(&backing)) asFloats = asFloats[: 1024*1024 : 1024*1024] asFloats[0] = 3.14 asFloats[2] = 6.28 asFloats[1024*1024-1] = 3.14 asFloats[1024*1024-3] = 6.28 f.Write(backing) // defer cleanup defer os.Remove(f.Name()) // do the mmap stuff stat, err := f.Stat() if err != nil { t.Fatal(err) } size := int(stat.Size()) fd := int(f.Fd()) bs, err := syscall.Mmap(fd, 0, size, syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { t.Fatal(err) } defer func() { if err := syscall.Munmap(bs); err != nil { t.Error(err) } }() T := New(WithShape(1024, 1024), Of(Float64), FromMemory(uintptr(unsafe.Pointer(&bs[0])), uintptr(size))) s := fmt.Sprintf("%v", T) expected := `⎡3.14 0 6.28 0 ... 0 0 0 0⎤ ⎢ 0 0 0 0 ... 0 0 0 0⎥ ⎢ 0 0 0 0 ... 0 0 0 0⎥ ⎢ 0 0 0 0 ... 0 0 0 0⎥ . . . ⎢ 0 0 0 0 ... 0 0 0 0⎥ ⎢ 0 0 0 0 ... 0 0 0 0⎥ ⎢ 0 0 0 0 ... 0 0 0 0⎥ ⎣ 0 0 0 0 ... 0 6.28 0 3.14⎦ ` if s != expected { t.Errorf("Expected mmap'd tensor to be exactly the same.") } assert.True(t, T.IsManuallyManaged()) } tensor-0.9.24/debug.go000066400000000000000000000042731426512615100145460ustar00rootroot00000000000000// +build debug package tensor import ( "fmt" "log" "os" "reflect" "runtime/debug" "strings" "sync/atomic" "unsafe" ) var TABCOUNT uint32 var TRACK = false const DEBUG = true var _logger_ = log.New(os.Stderr, "", 0) var replacement = "\n" func tabcount() int { return int(atomic.LoadUint32(&TABCOUNT)) } func enterLoggingContext() { atomic.AddUint32(&TABCOUNT, 1) tabcount := tabcount() _logger_.SetPrefix(strings.Repeat("\t", tabcount)) replacement = "\n" + strings.Repeat("\t", tabcount) } func leaveLoggingContext() { tabcount := tabcount() tabcount-- if tabcount < 0 { atomic.StoreUint32(&TABCOUNT, 0) tabcount = 0 } else { atomic.StoreUint32(&TABCOUNT, uint32(tabcount)) } _logger_.SetPrefix(strings.Repeat("\t", tabcount)) replacement = "\n" + strings.Repeat("\t", tabcount) } func logf(format string, others ...interface{}) { if DEBUG { // format = strings.Replace(format, "\n", replacement, -1) s := fmt.Sprintf(format, others...) s = strings.Replace(s, "\n", replacement, -1) _logger_.Println(s) // _logger_.Printf(format, others...) } } var stats = new(debug.GCStats) func loggc() { debug.ReadGCStats(stats) log.Printf("NUMGC: %v", stats.NumGC) } func init() { debug.SetPanicOnFault(true) debug.SetTraceback("all") } type rtype struct { size uintptr ptrdata uintptr // number of bytes in the type that can contain pointers hash uint32 // hash of type; avoids computation in hash tables tflag uint8 // extra type information flags align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type kind uint8 // enumeration for C alg uintptr // algorithm table gcdata uintptr // garbage collection data str int32 // string form ptrToThis int32 // type for pointer to this type, may be zero } func (t *rtype) Format(s fmt.State, c rune) { fmt.Fprintf(s, "size: %d pointers: %d, hash: 0x%x, flag: %d, align: %d, kind: %d", t.size, t.ptrdata, t.hash, t.tflag, t.align, t.kind) } func logRtype(t *reflect.Type) { iface := *(*[2]uintptr)(unsafe.Pointer(t)) rt := (*rtype)(unsafe.Pointer(iface[1])) log.Printf("TYPE INFO: %v(%p) - %v", *t, t, rt) } tensor-0.9.24/defaultengine.go000066400000000000000000000031771426512615100162740ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" ) // StdEng is the default execution engine that comes with the tensors. To use other execution engines, use the WithEngine construction option. type StdEng struct { execution.E } // makeArray allocates a slice for the array func (e StdEng) makeArray(arr *array, t Dtype, size int) { arr.Raw = malloc(t, size) arr.t = t } func (e StdEng) AllocAccessible() bool { return true } func (e StdEng) Alloc(size int64) (Memory, error) { return nil, noopError{} } func (e StdEng) Free(mem Memory, size int64) error { return nil } func (e StdEng) Memset(mem Memory, val interface{}) error { if ms, ok := mem.(MemSetter); ok { return ms.Memset(val) } return errors.Errorf("Cannot memset %v with StdEng", mem) } func (e StdEng) Memclr(mem Memory) { if z, ok := mem.(Zeroer); ok { z.Zero() } return } func (e StdEng) Memcpy(dst, src Memory) error { switch dt := dst.(type) { case *array: switch st := src.(type) { case *array: copyArray(dt, st) return nil case arrayer: copyArray(dt, st.arrPtr()) return nil } case arrayer: switch st := src.(type) { case *array: copyArray(dt.arrPtr(), st) return nil case arrayer: copyArray(dt.arrPtr(), st.arrPtr()) return nil } } return errors.Errorf("Failed to copy %T %T", dst, src) } func (e StdEng) Accessible(mem Memory) (Memory, error) { return mem, nil } func (e StdEng) WorksWith(order DataOrder) bool { return true } func (e StdEng) checkAccessible(t Tensor) error { if !t.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, t) } return nil } tensor-0.9.24/defaultengine_argmethods.go000066400000000000000000000102031426512615100204750ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" func (e StdEng) Argmax(t Tensor, axis int) (retVal Tensor, err error) { switch tt := t.(type) { case DenseTensor: return e.argmaxDenseTensor(tt, axis) default: return nil, errors.Errorf(typeNYI, "StdEng.Argmax", t) } } func (e StdEng) argmaxDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, opFail, "Argmax") } if axis >= len(t.Shape()) { return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis) } dataA := t.hdr() typ := t.rtype() // SPECIAL CASE: FLAT ARGMAX if axis == AllAxes { var index int if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() { if index = e.E.ArgmaxFlatMasked(typ, dataA, mt.Mask()); index == -1 { return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype()) } } else { if index = e.E.ArgmaxFlat(typ, dataA); index == -1 { return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype()) } } return New(FromScalar(index)), nil } // ARGMAX ALONG AXIS var indices []int axes := make([]int, len(t.Shape())) for i := range t.Shape() { switch { case i < axis: axes[i] = i case i == axis: axes[len(axes)-1] = i case i > axis: axes[i-1] = i } } // be a good citizen - borrow and return, since we're only using this AP to figure out the moves newAP, _, err := t.Info().T(axes...) if _, ok := err.(NoOpError); !ok && err != nil { return } else if ok { t.Info().CloneTo(&newAP) } it := IteratorFromDense(t) iteratorLoadAP(it, &newAP) lastSize := it.Shape()[len(it.Shape())-1] newShape := it.Shape().Clone() newShape = newShape[:len(newShape)-1] // cleanup defer func() { newAP.zero() ReturnInts(newShape) }() if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() { mask := mt.Mask() if indices, err = e.E.ArgmaxIterMasked(typ, dataA, mask, it, lastSize); err != nil { return } } else { if indices, err = e.E.ArgmaxIter(typ, dataA, it, lastSize); err != nil { return } } return New(WithShape(newShape...), WithBacking(indices)), nil } func (e StdEng) Argmin(t Tensor, axis int) (retVal Tensor, err error) { switch tt := t.(type) { case DenseTensor: return e.argminDenseTensor(tt, axis) default: return nil, errors.Errorf(typeNYI, "StdEng.Argmin", t) } } func (e StdEng) argminDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, opFail, "Argmin") } if axis >= len(t.Shape()) { return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis) } dataA := t.hdr() typ := t.rtype() // SPECIAL CASE: FLAT ARGMAX if axis == AllAxes { var index int if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() { if index = e.E.ArgminFlatMasked(typ, dataA, mt.Mask()); index == -1 { return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype()) } } else { if index = e.E.ArgminFlat(typ, dataA); index == -1 { return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype()) } } return New(FromScalar(index)), nil } // ARGMAX ALONG AXIS var indices []int axes := make([]int, len(t.Shape())) for i := range t.Shape() { switch { case i < axis: axes[i] = i case i == axis: axes[len(axes)-1] = i case i > axis: axes[i-1] = i } } // be a good citizen - borrow and return, since we're only using this AP to figure out the moves newAP, _, err := t.Info().T(axes...) if _, ok := err.(NoOpError); !ok && err != nil { return } else if ok { newAP = t.Info().Clone() } it := IteratorFromDense(t) iteratorLoadAP(it, &newAP) lastSize := it.Shape()[len(it.Shape())-1] newShape := it.Shape().Clone() newShape = newShape[:len(newShape)-1] // cleanup defer func() { newAP.zero() ReturnInts(newShape) }() if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() { mask := mt.Mask() if indices, err = e.E.ArgminIterMasked(typ, dataA, mask, it, lastSize); err != nil { return } } else { if indices, err = e.E.ArgminIter(typ, dataA, it, lastSize); err != nil { return } } return New(WithShape(newShape...), WithBacking(indices)), nil } tensor-0.9.24/defaultengine_arith.go000066400000000000000000000700201426512615100174520ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) // Add performs a + b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Add failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Add") } if useIter { switch { case incr: err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.AddIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.AddIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.AddIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.AddRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Add(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Add(typ, retVal.hdr(), dataB) } return } // Sub performs a - b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Sub(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Sub failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Sub") } if useIter { switch { case incr: err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.SubIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.SubIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.SubIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.SubRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Sub(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Sub(typ, retVal.hdr(), dataB) } return } // Mul performs a × b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Mul(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Mul failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Mul") } if useIter { switch { case incr: err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MulIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.MulIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.MulIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.MulRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Mul(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Mul(typ, retVal.hdr(), dataB) } return } // Div performs a ÷ b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Div(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Div failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Div") } if useIter { switch { case incr: err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.DivIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.DivIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.DivIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.DivRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Div(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Div(typ, retVal.hdr(), dataB) } return } // Pow performs a ^ b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Pow(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Pow failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Pow") } if useIter { switch { case incr: err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.PowIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.PowIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.PowIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.PowRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Pow(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Pow(typ, retVal.hdr(), dataB) } return } // Mod performs a % b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) Mod(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, numberTypes); err != nil { return nil, errors.Wrapf(err, "Mod failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Mod") } if useIter { switch { case incr: err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.ModIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case !safe: err = e.E.ModIter(typ, dataA, dataB, ait, bit) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit) } return } switch { case incr: err = e.E.ModIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse: err = e.E.ModRecv(typ, dataA, dataB, dataReuse) retVal = reuse case !safe: err = e.E.Mod(typ, dataA, dataB) retVal = a default: if swap { retVal = b.Clone().(Tensor) } else { retVal = a.Clone().(Tensor) } err = e.E.Mod(typ, retVal.hdr(), dataB) } return } // AddScalar performs t + s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Add failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Add failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Add") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Add") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.AddIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.AddIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.AddIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.AddIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.AddIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Add(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Add(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Add(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Add(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // SubScalar performs t - s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Sub failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Sub failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sub") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sub") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.SubIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.SubIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.SubIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.SubIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.SubIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Sub(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Sub(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Sub(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Sub(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // MulScalar performs t × s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Mul failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Mul failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mul") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mul") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MulIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.MulIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.MulIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.MulIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.MulIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Mul(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Mul(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Mul(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Mul(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // DivScalar performs t ÷ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Div failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Div failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Div") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Div") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.DivIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.DivIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.DivIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.DivIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.DivIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Div(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Div(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Div(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Div(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // PowScalar performs t ^ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Pow failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Pow failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Pow") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Pow") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.PowIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.PowIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.PowIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.PowIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.PowIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Pow(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Pow(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Pow(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Pow(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // ModScalar performs t % s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, numberTypes); err != nil { return nil, errors.Wrapf(err, "Mod failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Mod failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mod") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Mod") } scalarHeader = dataA } if useIter { switch { case incr: err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.ModIter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.ModIter(typ, dataA, dataReuse, ait, iit) retVal = reuse case !safe: err = e.E.ModIter(typ, dataA, dataB, ait, bit) retVal = a default: retVal = a.Clone().(Tensor) if leftTensor { err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.ModIter(typ, dataA, retVal.hdr(), ait, bit) } } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } switch { case incr: err = e.E.ModIncr(typ, dataA, dataB, dataReuse) retVal = reuse case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.Mod(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.Mod(typ, dataA, dataReuse) if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } retVal = reuse case !safe: err = e.E.Mod(typ, dataA, dataB) if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } retVal = a default: retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.Mod(typ, retVal.hdr(), dataB) } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } tensor-0.9.24/defaultengine_cmp.go000066400000000000000000001047011426512615100171260ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) // Gt performs a > b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) Gt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "Gt failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Gt") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.GtSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.GtSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.GtSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.GtSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Gt(typ, dataA, dataB, dataReuse) retVal = reuse } return } // Gte performs a ≥ b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) Gte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "Gte failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Gte") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.GteSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.GteSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.GteSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.GteSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Gte(typ, dataA, dataB, dataReuse) retVal = reuse } return } // Lt performs a < b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) Lt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "Lt failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Lt") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.LtSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.LtSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.LtSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.LtSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Lt(typ, dataA, dataB, dataReuse) retVal = reuse } return } // Lte performs a ≤ b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) Lte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "Lte failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Lte") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.LteSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.LteSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.LteSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.LteSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Lte(typ, dataA, dataB, dataReuse) retVal = reuse } return } // ElEq performs a == b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) ElEq(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, eqTypes); err != nil { return nil, errors.Wrapf(err, "Eq failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Eq") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.EqSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.EqSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.EqSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.EqSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Eq(typ, dataA, dataB, dataReuse) retVal = reuse } return } // ElNe performs a ≠ b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) ElNe(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, eqTypes); err != nil { return nil, errors.Wrapf(err, "Ne failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Ne") } // check to see if anything needs to be created switch { case same && safe && reuse == nil: if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.NeSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.NeSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } return } // standard switch { case !safe && same && reuse == nil: err = e.E.NeSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.NeSame(typ, dataReuse, dataB) retVal = reuse default: err = e.E.Ne(typ, dataA, dataB, dataReuse) retVal = reuse } return } // GtScalar performs t > s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) GtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "Gt failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Gt failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gt") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gt") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.GtSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.GtSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.GtSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.GtSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.LtSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.GtSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.GtSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.GtSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Gt(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // GteScalar performs t ≥ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) GteScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "Gte failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Gte failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gte") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Gte") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.GteSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.GteSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.GteSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.GteSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.LteSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.GteSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.GteSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.GteSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Gte(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // LtScalar performs t < s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) LtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "Lt failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Lt failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lt") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lt") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.LtSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.LtSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.LtSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.LtSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.GtSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.LtSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.LtSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.LtSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Lt(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // LteScalar performs t ≤ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (e StdEng) LteScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "Lte failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Lte failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lte") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Lte") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.LteSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.LteSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.LteSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.LteSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.GteSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.LteSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.LteSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.LteSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Lte(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } func (e StdEng) EqScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, eqTypes); err != nil { return nil, errors.Wrapf(err, "Eq failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Eq failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Eq") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Eq") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.EqSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.EqSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.EqSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.EqSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.EqSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.EqSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.EqSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.EqSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Eq(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } func (e StdEng) NeScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, eqTypes); err != nil { return nil, errors.Wrapf(err, "Ne failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "Ne failed") } var reuse DenseTensor var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Ne") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Ne") } scalarHeader = dataA } // check to see if anything needs to be created switch { case same && safe && reuse == nil: reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.NeSameIter(typ, dataA, dataB, ait, bit) retVal = a case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.NeSameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.NeSameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.NeSame(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.NeSame(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && same && reuse == nil: err = e.E.NeSame(typ, dataA, dataB) retVal = a case same && safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.NeSame(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.NeSame(typ, dataA, dataReuse) retVal = reuse default: err = e.E.Ne(typ, dataA, dataB, dataReuse) retVal = reuse } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } tensor-0.9.24/defaultengine_linalg.go000066400000000000000000000451351426512615100176220ustar00rootroot00000000000000package tensor import ( "reflect" "github.com/pkg/errors" "gonum.org/v1/gonum/blas" "gonum.org/v1/gonum/mat" ) // Trace returns the trace of a matrix (i.e. the sum of the diagonal elements). If the Tensor provided is not a matrix, it will return an error func (e StdEng) Trace(t Tensor) (retVal interface{}, err error) { if t.Dims() != 2 { err = errors.Errorf(dimMismatch, 2, t.Dims()) return } if err = typeclassCheck(t.Dtype(), numberTypes); err != nil { return nil, errors.Wrap(err, "Trace") } rstride := t.Strides()[0] cstride := t.Strides()[1] r := t.Shape()[0] c := t.Shape()[1] m := MinInt(r, c) stride := rstride + cstride switch data := t.Data().(type) { case []int: var trace int for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []int8: var trace int8 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []int16: var trace int16 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []int32: var trace int32 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []int64: var trace int64 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []uint: var trace uint for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []uint8: var trace uint8 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []uint16: var trace uint16 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []uint32: var trace uint32 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []uint64: var trace uint64 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []float32: var trace float32 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []float64: var trace float64 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []complex64: var trace complex64 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace case []complex128: var trace complex128 for i := 0; i < m; i++ { trace += data[i*stride] } retVal = trace } return } func (e StdEng) Dot(x, y Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if _, ok := x.(DenseTensor); !ok { err = errors.Errorf("Engine only supports working on x that is a DenseTensor. Got %T instead", x) return } if _, ok := y.(DenseTensor); !ok { err = errors.Errorf("Engine only supports working on y that is a DenseTensor. Got %T instead", y) return } var a, b DenseTensor if a, err = getFloatDenseTensor(x); err != nil { err = errors.Wrapf(err, opFail, "Dot") return } if b, err = getFloatDenseTensor(y); err != nil { err = errors.Wrapf(err, opFail, "Dot") return } fo := ParseFuncOpts(opts...) var reuse, incr DenseTensor if reuse, err = getFloatDenseTensor(fo.reuse); err != nil { err = errors.Wrapf(err, opFail, "Dot - reuse") return } if incr, err = getFloatDenseTensor(fo.incr); err != nil { err = errors.Wrapf(err, opFail, "Dot - incr") return } switch { case a.IsScalar() && b.IsScalar(): var res interface{} switch a.Dtype().Kind() { case reflect.Float64: res = a.GetF64(0) * b.GetF64(0) case reflect.Float32: res = a.GetF32(0) * b.GetF32(0) } switch { case incr != nil: if !incr.IsScalar() { err = errors.Errorf(shapeMismatch, ScalarShape(), incr.Shape()) return } if err = e.E.MulIncr(a.Dtype().Type, a.hdr(), b.hdr(), incr.hdr()); err != nil { err = errors.Wrapf(err, opFail, "Dot scalar incr") return } retVal = incr case reuse != nil: reuse.Set(0, res) reuse.reshape() retVal = reuse default: retVal = New(FromScalar(res)) } return case a.IsScalar(): switch { case incr != nil: return Mul(a.ScalarValue(), b, WithIncr(incr)) case reuse != nil: return Mul(a.ScalarValue(), b, WithReuse(reuse)) } // default moved out return Mul(a.ScalarValue(), b) case b.IsScalar(): switch { case incr != nil: return Mul(a, b.ScalarValue(), WithIncr(incr)) case reuse != nil: return Mul(a, b.ScalarValue(), WithReuse(reuse)) } return Mul(a, b.ScalarValue()) } switch { case a.IsVector(): switch { case b.IsVector(): // check size if a.len() != b.len() { err = errors.Errorf(shapeMismatch, a.Shape(), b.Shape()) return } var ret interface{} if ret, err = e.Inner(a, b); err != nil { return nil, errors.Wrapf(err, opFail, "Dot") } return New(FromScalar(ret)), nil case b.IsMatrix(): b.T() defer b.UT() switch { case reuse != nil && incr != nil: return b.MatVecMul(a, WithReuse(reuse), WithIncr(incr)) case reuse != nil: return b.MatVecMul(a, WithReuse(reuse)) case incr != nil: return b.MatVecMul(a, WithIncr(incr)) default: } return b.MatVecMul(a) default: } case a.IsMatrix(): switch { case b.IsVector(): switch { case reuse != nil && incr != nil: return a.MatVecMul(b, WithReuse(reuse), WithIncr(incr)) case reuse != nil: return a.MatVecMul(b, WithReuse(reuse)) case incr != nil: return a.MatVecMul(b, WithIncr(incr)) default: } return a.MatVecMul(b) case b.IsMatrix(): switch { case reuse != nil && incr != nil: return a.MatMul(b, WithReuse(reuse), WithIncr(incr)) case reuse != nil: return a.MatMul(b, WithReuse(reuse)) case incr != nil: return a.MatMul(b, WithIncr(incr)) default: } return a.MatMul(b) default: } default: } as := a.Shape() bs := b.Shape() axesA := BorrowInts(1) axesB := BorrowInts(1) defer ReturnInts(axesA) defer ReturnInts(axesB) var lastA, secondLastB int lastA = len(as) - 1 axesA[0] = lastA if len(bs) >= 2 { secondLastB = len(bs) - 2 } else { secondLastB = 0 } axesB[0] = secondLastB if as[lastA] != bs[secondLastB] { err = errors.Errorf(shapeMismatch, as, bs) return } var rd *Dense if rd, err = a.TensorMul(b, axesA, axesB); err != nil { panic(err) } if reuse != nil { copyDense(reuse, rd) ap := rd.Info().Clone() reuse.setAP(&ap) defer ReturnTensor(rd) // swap out the underlying data and metadata // reuse.data, rd.data = rd.data, reuse.data // reuse.AP, rd.AP = rd.AP, reuse.AP // defer ReturnTensor(rd) retVal = reuse } else { retVal = rd } return } // TODO: make it take DenseTensor func (e StdEng) SVD(a Tensor, uv, full bool) (s, u, v Tensor, err error) { var t *Dense var ok bool if err = e.checkAccessible(a); err != nil { return nil, nil, nil, errors.Wrapf(err, "opFail %v", "SVD") } if t, ok = a.(*Dense); !ok { return nil, nil, nil, errors.Errorf("StdEng only performs SVDs for DenseTensors. Got %T instead", a) } if err = typeclassCheck(a.Dtype(), floatTypes); err != nil { return nil, nil, nil, errors.Errorf("StdEng can only perform SVDs for float64 and float32 type. Got tensor of %v instead", t.Dtype()) } if !t.IsMatrix() { return nil, nil, nil, errors.Errorf(dimMismatch, 2, t.Dims()) } var m *mat.Dense var svd mat.SVD if m, err = ToMat64(t, UseUnsafe()); err != nil { return } switch { case full && uv: ok = svd.Factorize(m, mat.SVDFull) case !full && uv: ok = svd.Factorize(m, mat.SVDThin) case full && !uv: // illogical state - if you specify "full", you WANT the UV matrices // error err = errors.Errorf("SVD requires computation of `u` and `v` matrices if `full` was specified.") return default: // by default, we return only the singular values ok = svd.Factorize(m, mat.SVDNone) } if !ok { // error err = errors.Errorf("Unable to compute SVD") return } // extract values var um, vm mat.Dense s = recycledDense(Float64, Shape{MinInt(t.Shape()[0], t.Shape()[1])}, WithEngine(e)) svd.Values(s.Data().([]float64)) if uv { svd.UTo(&um) svd.VTo(&vm) // vm.VFromSVD(&svd) u = FromMat64(&um, UseUnsafe(), As(t.t)) v = FromMat64(&vm, UseUnsafe(), As(t.t)) } return } // Inner is a thin layer over BLAS's D/Sdot. // It returns a scalar value, wrapped in an interface{}, which is not quite nice. func (e StdEng) Inner(a, b Tensor) (retVal interface{}, err error) { var ad, bd DenseTensor if ad, bd, err = e.checkTwoFloatComplexTensors(a, b); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Inner") } switch A := ad.Data().(type) { case []float32: B := bd.Float32s() retVal = whichblas.Sdot(len(A), A, 1, B, 1) case []float64: B := bd.Float64s() retVal = whichblas.Ddot(len(A), A, 1, B, 1) case []complex64: B := bd.Complex64s() retVal = whichblas.Cdotu(len(A), A, 1, B, 1) case []complex128: B := bd.Complex128s() retVal = whichblas.Zdotu(len(A), A, 1, B, 1) } return } // MatVecMul is a thin layer over BLAS' DGEMV // Because DGEMV computes: // y = αA * x + βy // we set beta to 0, so we don't have to manually zero out the reused/retval tensor data func (e StdEng) MatVecMul(a, b, prealloc Tensor) (err error) { // check all are DenseTensors var ad, bd, pd DenseTensor if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil { return errors.Wrapf(err, opFail, "StdEng.MatVecMul") } m := ad.oshape()[0] n := ad.oshape()[1] tA := blas.NoTrans do := a.DataOrder() z := ad.oldAP().IsZero() var lda int switch { case do.IsRowMajor() && z: lda = n case do.IsRowMajor() && !z: tA = blas.Trans lda = n case do.IsColMajor() && z: tA = blas.Trans lda = m m, n = n, m case do.IsColMajor() && !z: lda = m m, n = n, m } incX, incY := 1, 1 // step size // ASPIRATIONAL TODO: different incX and incY // TECHNICAL DEBT. TECHDEBT. TECH DEBT // Example use case: // log.Printf("a %v %v", ad.Strides(), ad.ostrides()) // log.Printf("b %v", b.Strides()) // incX := a.Strides()[0] // incY = b.Strides()[0] switch A := ad.Data().(type) { case []float64: x := bd.Float64s() y := pd.Float64s() alpha, beta := float64(1), float64(0) whichblas.Dgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY) case []float32: x := bd.Float32s() y := pd.Float32s() alpha, beta := float32(1), float32(0) whichblas.Sgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY) case []complex64: x := bd.Complex64s() y := pd.Complex64s() var alpha, beta complex64 = complex(1, 0), complex(0, 0) whichblas.Cgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY) case []complex128: x := bd.Complex128s() y := pd.Complex128s() var alpha, beta complex128 = complex(1, 0), complex(0, 0) whichblas.Zgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY) default: return errors.Errorf(typeNYI, "matVecMul", bd.Data()) } return nil } // MatMul is a thin layer over DGEMM. // DGEMM computes: // C = αA * B + βC // To prevent needless zeroing out of the slice, we just set β to 0 func (e StdEng) MatMul(a, b, prealloc Tensor) (err error) { // check all are DenseTensors var ad, bd, pd DenseTensor if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil { return errors.Wrapf(err, opFail, "StdEng.MatMul") } ado := a.DataOrder() bdo := b.DataOrder() cdo := prealloc.DataOrder() // get result shapes. k is the shared dimension // a is (m, k) // b is (k, n) // c is (m, n) var m, n, k int m = ad.Shape()[0] k = ad.Shape()[1] n = bd.Shape()[1] // wrt the strides, we use the original strides, because that's what BLAS needs, instead of calling .Strides() // lda in colmajor = number of rows; // lda in row major = number of cols var lda, ldb, ldc int switch { case ado.IsColMajor(): lda = m case ado.IsRowMajor(): lda = k } switch { case bdo.IsColMajor(): ldb = bd.Shape()[0] case bdo.IsRowMajor(): ldb = n } switch { case cdo.IsColMajor(): ldc = prealloc.Shape()[0] case cdo.IsRowMajor(): ldc = prealloc.Shape()[1] } // check for trans tA, tB := blas.NoTrans, blas.NoTrans if !ad.oldAP().IsZero() { tA = blas.Trans if ado.IsRowMajor() { lda = m } else { lda = k } } if !bd.oldAP().IsZero() { tB = blas.Trans if bdo.IsRowMajor() { ldb = bd.Shape()[0] } else { ldb = bd.Shape()[1] } } switch A := ad.Data().(type) { case []float64: B := bd.Float64s() C := pd.Float64s() alpha, beta := float64(1), float64(0) if ado.IsColMajor() && bdo.IsColMajor() { whichblas.Dgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) } else { whichblas.Dgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) } case []float32: B := bd.Float32s() C := pd.Float32s() alpha, beta := float32(1), float32(0) if ado.IsColMajor() && bdo.IsColMajor() { whichblas.Sgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) } else { whichblas.Sgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) } case []complex64: B := bd.Complex64s() C := pd.Complex64s() var alpha, beta complex64 = complex(1, 0), complex(0, 0) if ado.IsColMajor() && bdo.IsColMajor() { whichblas.Cgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) } else { whichblas.Cgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) } case []complex128: B := bd.Complex128s() C := pd.Complex128s() var alpha, beta complex128 = complex(1, 0), complex(0, 0) if ado.IsColMajor() && bdo.IsColMajor() { whichblas.Zgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) } else { whichblas.Zgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) } default: return errors.Errorf(typeNYI, "matMul", ad.Data()) } return } // Outer is a thin wrapper over S/Dger func (e StdEng) Outer(a, b, prealloc Tensor) (err error) { // check all are DenseTensors var ad, bd, pd DenseTensor if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil { return errors.Wrapf(err, opFail, "StdEng.Outer") } m := ad.Size() n := bd.Size() pdo := pd.DataOrder() // the stride of a Vector is always going to be [1], // incX := t.Strides()[0] // incY := other.Strides()[0] incX, incY := 1, 1 // lda := pd.Strides()[0] var lda int switch { case pdo.IsColMajor(): aShape := a.Shape().Clone() bShape := b.Shape().Clone() if err = a.Reshape(aShape[0], 1); err != nil { return err } if err = b.Reshape(1, bShape[0]); err != nil { return err } if err = e.MatMul(a, b, prealloc); err != nil { return err } if err = b.Reshape(bShape...); err != nil { return } if err = a.Reshape(aShape...); err != nil { return } return nil case pdo.IsRowMajor(): lda = pd.Shape()[1] } switch x := ad.Data().(type) { case []float64: y := bd.Float64s() A := pd.Float64s() alpha := float64(1) whichblas.Dger(m, n, alpha, x, incX, y, incY, A, lda) case []float32: y := bd.Float32s() A := pd.Float32s() alpha := float32(1) whichblas.Sger(m, n, alpha, x, incX, y, incY, A, lda) case []complex64: y := bd.Complex64s() A := pd.Complex64s() var alpha complex64 = complex(1, 0) whichblas.Cgeru(m, n, alpha, x, incX, y, incY, A, lda) case []complex128: y := bd.Complex128s() A := pd.Complex128s() var alpha complex128 = complex(1, 0) whichblas.Zgeru(m, n, alpha, x, incX, y, incY, A, lda) default: return errors.Errorf(typeNYI, "outer", b.Data()) } return nil } /* UNEXPORTED UTILITY FUNCTIONS */ func (e StdEng) checkTwoFloatTensors(a, b Tensor) (ad, bd DenseTensor, err error) { if err = e.checkAccessible(a); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible") } if err = e.checkAccessible(b); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible") } if a.Dtype() != b.Dtype() { return nil, nil, errors.New("Expected a and b to have the same Dtype") } if ad, err = getFloatDenseTensor(a); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor") } if bd, err = getFloatDenseTensor(b); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor") } return } func (e StdEng) checkThreeFloatTensors(a, b, ret Tensor) (ad, bd, retVal DenseTensor, err error) { if err = e.checkAccessible(a); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible") } if err = e.checkAccessible(b); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible") } if err = e.checkAccessible(ret); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: ret is not accessible") } if a.Dtype() != b.Dtype() || b.Dtype() != ret.Dtype() { return nil, nil, nil, errors.New("Expected a and b and retVal all to have the same Dtype") } if ad, err = getFloatDenseTensor(a); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor") } if bd, err = getFloatDenseTensor(b); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor") } if retVal, err = getFloatDenseTensor(ret); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects retVal to be be a DenseTensor") } return } func (e StdEng) checkTwoFloatComplexTensors(a, b Tensor) (ad, bd DenseTensor, err error) { if err = e.checkAccessible(a); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible") } if err = e.checkAccessible(b); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible") } if a.Dtype() != b.Dtype() { return nil, nil, errors.New("Expected a and b to have the same Dtype") } if ad, err = getFloatComplexDenseTensor(a); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor") } if bd, err = getFloatComplexDenseTensor(b); err != nil { return nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor") } return } func (e StdEng) checkThreeFloatComplexTensors(a, b, ret Tensor) (ad, bd, retVal DenseTensor, err error) { if err = e.checkAccessible(a); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible") } if err = e.checkAccessible(b); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible") } if err = e.checkAccessible(ret); err != nil { return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: ret is not accessible") } if a.Dtype() != b.Dtype() || b.Dtype() != ret.Dtype() { return nil, nil, nil, errors.New("Expected a and b and retVal all to have the same Dtype") } if ad, err = getFloatComplexDenseTensor(a); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor") } if bd, err = getFloatComplexDenseTensor(b); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor") } if retVal, err = getFloatComplexDenseTensor(ret); err != nil { return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects retVal to be be a DenseTensor") } return } tensor-0.9.24/defaultengine_mapreduce.go000066400000000000000000000205521426512615100203150ustar00rootroot00000000000000package tensor import ( "reflect" "sort" "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" "gorgonia.org/tensor/internal/storage" ) func (e StdEng) Map(fn interface{}, a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, nil); err != nil { err = errors.Wrap(err, "Failed Map()") return } var reuse DenseTensor var safe, _, incr bool if reuse, safe, _, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return } switch { case safe && reuse == nil: // create reuse if v, ok := a.(View); ok { if v.IsMaterializable() { reuse = v.Materialize().(DenseTensor) } else { reuse = v.Clone().(DenseTensor) } } else { reuse = New(Of(a.Dtype()), WithShape(a.Shape().Clone()...)) } case reuse != nil: if !reuse.IsNativelyAccessible() { return nil, errors.Errorf(inaccessibleData, reuse) } if a.Size() != reuse.Size() { return nil, errors.Errorf(shapeMismatch, a.Shape(), reuse.Shape()) } } // PREP DATA typ := a.Dtype().Type var dataA, dataReuse, used *storage.Header var ait, rit, uit Iterator var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Map") } // HANDLE USE CASES switch { case !safe: used = dataA uit = ait default: used = dataReuse uit = rit } // DO if useIter { err = e.E.MapIter(typ, fn, used, incr, uit) } else { err = e.E.Map(typ, fn, used, incr) } if err != nil { err = errors.Wrapf(err, "Unable to apply function %v to tensor of %v", fn, typ) return } // SET RETVAL switch { case reuse != nil: if err = reuseCheckShape(reuse, a.Shape()); err != nil { err = errors.Wrapf(err, "Reuse shape check failed") return } retVal = reuse case !safe: retVal = a default: retVal = reuse } return } func (e StdEng) Reduce(fn interface{}, a Tensor, axis int, defaultValue interface{}, opts ...FuncOpt) (retVal Tensor, err error) { if !a.IsNativelyAccessible() { return nil, errors.Errorf(inaccessibleData, a) } var at, reuse DenseTensor var dataA, dataReuse *storage.Header if at, reuse, dataA, dataReuse, err = e.prepReduce(a, axis, opts...); err != nil { err = errors.Wrap(err, "Prep Reduce failed") return } lastAxis := a.Dims() - 1 typ := a.Dtype().Type // actual call out to the internal engine switch { case (axis == 0 && at.DataOrder().IsRowMajor()) || ((axis == lastAxis || axis == len(a.Shape())-1) && at.DataOrder().IsColMajor()): var size, split int if at.DataOrder().IsColMajor() { return nil, errors.Errorf("NYI: colmajor") } size = a.Shape()[0] split = a.DataSize() / size storage.CopySliced(typ, dataReuse, 0, split, dataA, 0, split) err = e.E.ReduceFirst(typ, dataA, dataReuse, split, size, fn) case (axis == lastAxis && at.DataOrder().IsRowMajor()) || (axis == 0 && at.DataOrder().IsColMajor()): var dimSize int if at.DataOrder().IsColMajor() { return nil, errors.Errorf("NYI: colmajor") } dimSize = a.Shape()[axis] err = e.E.ReduceLast(typ, dataA, dataReuse, dimSize, defaultValue, fn) default: dim0 := a.Shape()[0] dimSize := a.Shape()[axis] outerStride := a.Strides()[0] stride := a.Strides()[axis] expected := reuse.Strides()[0] err = e.E.ReduceDefault(typ, dataA, dataReuse, dim0, dimSize, outerStride, stride, expected, fn) } retVal = reuse return } func (e StdEng) OptimizedReduce(a Tensor, axis int, firstFn, lastFn, defaultFn, defaultValue interface{}, opts ...FuncOpt) (retVal Tensor, err error) { if !a.IsNativelyAccessible() { return nil, errors.Errorf(inaccessibleData, a) } var at, reuse DenseTensor var dataA, dataReuse *storage.Header if at, reuse, dataA, dataReuse, err = e.prepReduce(a, axis, opts...); err != nil { err = errors.Wrap(err, "Prep Reduce failed") return } lastAxis := a.Dims() - 1 typ := a.Dtype().Type // actual call out to the internal engine switch { case (axis == 0 && at.DataOrder().IsRowMajor()) || ((axis == lastAxis || axis == len(a.Shape())-1) && at.DataOrder().IsColMajor()): var size, split int if at.DataOrder().IsColMajor() { return nil, errors.Errorf("NYI: colmajor") } size = a.Shape()[0] split = a.DataSize() / size storage.CopySliced(typ, dataReuse, 0, split, dataA, 0, split) err = e.E.ReduceFirst(typ, dataA, dataReuse, split, size, firstFn) case (axis == lastAxis && at.DataOrder().IsRowMajor()) || (axis == 0 && at.DataOrder().IsColMajor()): var dimSize int if at.DataOrder().IsColMajor() { return nil, errors.Errorf("NYI: colmajor") } dimSize = a.Shape()[axis] err = e.E.ReduceLast(typ, dataA, dataReuse, dimSize, defaultValue, lastFn) default: dim0 := a.Shape()[0] dimSize := a.Shape()[axis] outerStride := a.Strides()[0] stride := a.Strides()[axis] expected := reuse.Strides()[0] err = e.E.ReduceDefault(typ, dataA, dataReuse, dim0, dimSize, outerStride, stride, expected, defaultFn) } retVal = reuse return } func (e StdEng) Sum(a Tensor, along ...int) (retVal Tensor, err error) { a2 := a if v, ok := a.(View); ok && v.IsMaterializable() { a2 = v.Materialize() } return e.reduce("Sum", execution.MonotonicSum, execution.SumMethods, a2, along...) } func (e StdEng) Min(a Tensor, along ...int) (retVal Tensor, err error) { a2 := a if v, ok := a.(View); ok && v.IsMaterializable() { a2 = v.Materialize() } return e.reduce("Min", execution.MonotonicMin, execution.MinMethods, a2, along...) } func (e StdEng) Max(a Tensor, along ...int) (retVal Tensor, err error) { a2 := a if v, ok := a.(View); ok && v.IsMaterializable() { a2 = v.Materialize() } return e.reduce("Max", execution.MonotonicMax, execution.MaxMethods, a2, along...) } func (e StdEng) reduce( op string, monotonicMethod func(t reflect.Type, a *storage.Header) (interface{}, error), methods func(t reflect.Type) (interface{}, interface{}, interface{}, error), a Tensor, along ...int) (retVal Tensor, err error) { switch at := a.(type) { case *Dense: hdr := at.hdr() typ := at.t.Type monotonic, incr1 := IsMonotonicInts(along) // if both are true, then it means all axes are accounted for, then it'll return a scalar value if (monotonic && incr1 && len(along) == a.Dims()) || len(along) == 0 { var ret interface{} if ret, err = monotonicMethod(typ, hdr); err != nil { return } return New(FromScalar(ret)), nil } var firstFn, lastFn, defaultFn interface{} if firstFn, lastFn, defaultFn, err = methods(typ); err != nil { return } defaultVal := reflect.Zero(typ).Interface() retVal = a dimsReduced := 0 sort.Slice(along, func(i, j int) bool { return along[i] < along[j] }) for _, axis := range along { axis -= dimsReduced dimsReduced++ if axis >= retVal.Dims() { err = errors.Errorf(dimMismatch, retVal.Dims(), axis) return } if retVal, err = e.OptimizedReduce(retVal, axis, firstFn, lastFn, defaultFn, defaultVal); err != nil { return } } return default: return nil, errors.Errorf("Cannot perform %s on %T", op, a) } } func (StdEng) prepReduce(a Tensor, axis int, opts ...FuncOpt) (at, reuse DenseTensor, dataA, dataReuse *storage.Header, err error) { if axis >= a.Dims() { err = errors.Errorf(dimMismatch, axis, a.Dims()) return } if err = unaryCheck(a, nil); err != nil { err = errors.Wrap(err, "prepReduce failed") return } // FUNC PREP var safe bool if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { err = errors.Wrap(err, "Unable to prep unary tensor") return } var newShape Shape for i, s := range a.Shape() { if i == axis { continue } newShape = append(newShape, s) } switch { case !safe: err = errors.New("Reduce only supports safe operations.") return case reuse != nil && !reuse.IsNativelyAccessible(): err = errors.Errorf(inaccessibleData, reuse) return case reuse != nil: if reuse.Shape().TotalSize() != newShape.TotalSize() { err = errors.Errorf(shapeMismatch, reuse.Shape(), newShape) return } reuse.Reshape(newShape...) case safe && reuse == nil: reuse = New(Of(a.Dtype()), WithShape(newShape...)) } // DATA PREP var useIter bool if dataA, dataReuse, _, _, useIter, err = prepDataUnary(a, reuse); err != nil { err = errors.Wrapf(err, "StdEng.Reduce data prep") return } var ok bool if at, ok = a.(DenseTensor); !ok || useIter { err = errors.Errorf("Reduce does not (yet) support iterable tensors") return } return } tensor-0.9.24/defaultengine_matop_misc.go000066400000000000000000000251371426512615100205070ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) var ( _ Diager = StdEng{} ) type fastcopier interface { fastCopyDenseRepeat(t DenseTensor, d *Dense, outers, size, stride, newStride int, repeats []int) error } // Repeat ... func (e StdEng) Repeat(t Tensor, axis int, repeats ...int) (Tensor, error) { switch tt := t.(type) { case DenseTensor: newShape, newRepeats, newAxis, size, err := e.denseRepeatCheck(t, axis, repeats) if err != nil { return nil, err } rr := recycledDense(t.Dtype(), newShape, WithEngine(StdEng{})) return e.denseRepeat(tt, rr, newShape, newAxis, size, newRepeats) default: return nil, errors.Errorf("NYI") } } // RepeatReuse is like Repeat, but with a provided reuse Tensor. The reuseTensor must be of the same type as the input t. func (e StdEng) RepeatReuse(t Tensor, reuse Tensor, axis int, repeats ...int) (Tensor, error) { switch tt := t.(type) { case DenseTensor: newShape, newRepeats, newAxis, size, err := e.denseRepeatCheck(t, axis, repeats) if err != nil { return nil, err } rr, ok := reuse.(DenseTensor) if !ok { return nil, errors.Errorf("t is a DenseTensor but reuse is of %T", reuse) } if !reuse.Shape().Eq(newShape) { return nil, errors.Errorf("Reuse shape is %v. Expected shape is %v", reuse.Shape(), newShape) } return e.denseRepeat(tt, rr, newShape, newAxis, size, newRepeats) default: return nil, errors.Errorf("NYI") } } func (StdEng) denseRepeatCheck(t Tensor, axis int, repeats []int) (newShape Shape, newRepeats []int, newAxis, size int, err error) { if newShape, newRepeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil { return nil, nil, -1, -1, errors.Wrap(err, "Unable to get repeated shape") } newAxis = axis if axis == AllAxes { newAxis = 0 } return } func (StdEng) denseRepeat(t, reuse DenseTensor, newShape Shape, axis, size int, repeats []int) (retVal DenseTensor, err error) { d, err := assertDense(reuse) if err != nil { return nil, errors.Wrapf(err, "Repeat reuse is not a *Dense") } var outers int if t.IsScalar() { outers = 1 } else { outers = ProdInts(t.Shape()[0:axis]) } var stride, newStride int if newShape.IsVector() || t.IsVector() { stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector } else { stride = t.ostrides()[axis] } if newShape.IsVector() { newStride = 1 } else { newStride = d.ostrides()[axis] } var destStart, srcStart int // fastCopy is not bypassing the copyDenseSliced method to populate the output tensor var fastCopy bool var fce fastcopier // we need an engine for fastCopying... e := t.Engine() // e can never be nil. Error would have occurred elsewhere var ok bool if fce, ok = e.(fastcopier); ok { fastCopy = true } // In this case, let's not implement the fast copy to keep the code readable if ms, ok := t.(MaskedTensor); ok && ms.IsMasked() { fastCopy = false } // if d is not a fastcopier, then we also cannot use fast copy if _, ok := d.Engine().(fastcopier); !ok { fastCopy = false } if fastCopy { if err := fce.fastCopyDenseRepeat(t, d, outers, size, stride, newStride, repeats); err != nil { return nil, err } return d, nil } for i := 0; i < outers; i++ { for j := 0; j < size; j++ { var tmp int tmp = repeats[j] for k := 0; k < tmp; k++ { if srcStart >= t.len() || destStart+stride > d.len() { break } copyDenseSliced(d, destStart, d.len(), t, srcStart, t.len()) destStart += newStride } srcStart += stride } } return d, nil } func (e StdEng) fastCopyDenseRepeat(src DenseTensor, dest *Dense, outers, size, stride, newStride int, repeats []int) error { sarr := src.arr() darr := dest.arr() var destStart, srcStart int for i := 0; i < outers; i++ { // faster shortcut for common case. // // Consider a case where: // a := ⎡ 1 ⎤ // ⎢ 2 ⎥ // ⎢ 3 ⎥ // ⎣ 4 ⎦ // a has a shape of (4, 1). it is a *Dense. // // Now assume we want to repeat it on axis 1, 3 times. We want to repeat it into `b`, // which is already allocated and zeroed, as shown below // // b := ⎡ 0 0 0 ⎤ // ⎢ 0 0 0 ⎥ // ⎢ 0 0 0 ⎥ // ⎣ 0 0 0 ⎦ // // Now, both `a` and `b` have a stride of 1. // // The desired result is: // b := ⎡ 1 1 1 ⎤ // ⎢ 2 2 2 ⎥ // ⎢ 3 3 3 ⎥ // ⎣ 4 4 4 ⎦ /// // Observe that this is simply broadcasting (copying) a[0] (a scalar value) to the row b[0], and so on and so forth. // This can be done without knowing the full type - we simply copy the bytes over. if stride == 1 && newStride == 1 { for sz := 0; sz < size; sz++ { tmp := repeats[sz] // first we get the bounds of the src and the dest // the srcStart and destStart are the indices assuming a flat array of []T // we need to get the byte slice equivalent. bSrcStart := srcStart * int(sarr.t.Size()) bSrcEnd := (srcStart + stride) * int(sarr.t.Size()) bDestStart := destStart * int(darr.t.Size()) bDestEnd := (destStart + tmp) * int(darr.t.Size()) // then we get the data as a slice of raw bytes sBS := sarr.Header.Raw dBS := darr.Header.Raw // recall that len(src) < len(dest) // it's easier to understand if we define the ranges. // Less prone to errors. sRange := sBS[bSrcStart:bSrcEnd] dRange := dBS[bDestStart:bDestEnd] // finally we copy things. for i := 0; i < len(dRange); i += len(sRange) { copy(dRange[i:], sRange) } srcStart += stride destStart += tmp } // we can straightaway broadcast continue } for j := 0; j < size; j++ { var tmp int tmp = repeats[j] var tSlice array tSlice = sarr.slice(srcStart, src.len()) for k := 0; k < tmp; k++ { if srcStart >= src.len() || destStart+stride > dest.len() { break } dSlice := darr.slice(destStart, destStart+newStride) // THIS IS AN OPTIMIZATION. REVISIT WHEN NEEDED. storage.Copy(dSlice.t.Type, &dSlice.Header, &tSlice.Header) destStart += newStride } srcStart += stride } } return nil } // Concat tensors func (e StdEng) Concat(t Tensor, axis int, others ...Tensor) (retVal Tensor, err error) { switch tt := t.(type) { case DenseTensor: var denses []DenseTensor if denses, err = tensorsToDenseTensors(others); err != nil { return nil, errors.Wrap(err, "Concat failed") } return e.denseConcat(tt, axis, denses) default: return nil, errors.Errorf("NYI") } } func (e StdEng) denseConcat(a DenseTensor, axis int, Ts []DenseTensor) (DenseTensor, error) { ss := make([]Shape, len(Ts)) var err error var isMasked bool for i, T := range Ts { ss[i] = T.Shape() if mt, ok := T.(MaskedTensor); ok { isMasked = isMasked || mt.IsMasked() } } var newShape Shape if newShape, err = a.Shape().Concat(axis, ss...); err != nil { return nil, errors.Wrap(err, "Unable to find new shape that results from concatenation") } retVal := recycledDense(a.Dtype(), newShape, WithEngine(e)) if isMasked { retVal.makeMask() } all := make([]DenseTensor, len(Ts)+1) all[0] = a copy(all[1:], Ts) // TODO: OPIMIZATION // When (axis == 0 && a is row major and all others is row major) || (axis == last axis of A && all tensors are colmajor) // just flat copy // // isOuter is true when the axis is the outermost axis // isInner is true when the axis is the inner most axis isOuter := axis == 0 isInner := axis == (a.Shape().Dims() - 1) // special case var start, end int for _, T := range all { end += T.Shape()[axis] slices := make([]Slice, axis+1) slices[axis] = makeRS(start, end) var v *Dense if v, err = sliceDense(retVal, slices...); err != nil { return nil, errors.Wrap(err, "Unable to slice DenseTensor while performing denseConcat") } // keep dims after slicing switch { case v.IsVector() && T.IsMatrix() && axis == 0: v.reshape(v.shape[0], 1) case T.IsRowVec() && axis == 0: T.reshape(T.Shape()[1]) case v.Shape().IsScalarEquiv() && T.Shape().IsScalarEquiv(): copyArray(v.arrPtr(), T.arrPtr()) if mt, ok := T.(MaskedTensor); ok { copy(v.mask, mt.Mask()) } start = end continue default: diff := retVal.Shape().Dims() - v.Shape().Dims() if diff > 0 && isOuter { newShape := make(Shape, v.Shape().Dims()+diff) for i := 0; i < diff; i++ { newShape[i] = 1 } copy(newShape[diff:], v.Shape()) v.reshape(newShape...) } else if diff > 0 && isInner { newShape := v.Shape().Clone() newStrides := v.strides for i := 0; i < diff; i++ { newShape = append(newShape, 1) newStrides = append(newStrides, 1) } v.shape = newShape v.strides = newStrides } else if T.Shape()[axis] == 1 { if err := v.unsqueeze(axis); err != nil { return nil, errors.Wrapf(err, "Unable to keep dims after slicing a shape %v on axis %d where the size is 1", T.Shape(), axis) } } } var vmask, Tmask []bool vmask = v.mask v.mask = nil if mt, ok := T.(MaskedTensor); ok && mt.IsMasked() { Tmask = mt.Mask() mt.SetMask(nil) } if err = assignArray(v, T); err != nil { return nil, errors.Wrap(err, "Unable to assignArray in denseConcat") } // if it's a masked tensor, we copy the mask as well if Tmask != nil { if vmask != nil { if cap(vmask) < len(Tmask) { vmask2 := make([]bool, len(Tmask)) copy(vmask2, vmask) vmask = vmask2 } copy(vmask, Tmask) v.SetMask(vmask) } // mt.SetMask(Tmask) } start = end } return retVal, nil } // Diag ... func (e StdEng) Diag(t Tensor) (retVal Tensor, err error) { a, ok := t.(DenseTensor) if !ok { return nil, errors.Errorf("StdEng only works with DenseTensor for Diagonal()") } if a.Dims() != 2 { err = errors.Errorf(dimMismatch, 2, a.Dims()) return } if err = typeclassCheck(a.Dtype(), numberTypes); err != nil { return nil, errors.Wrap(err, "Diagonal") } rstride := a.Strides()[0] cstride := a.Strides()[1] r := a.Shape()[0] c := a.Shape()[1] m := MinInt(r, c) stride := rstride + cstride b := a.Clone().(DenseTensor) b.Zero() switch a.rtype().Size() { case 1: bdata := b.hdr().Uint8s() adata := a.hdr().Uint8s() for i := 0; i < m; i++ { bdata[i] = adata[i*stride] } case 2: bdata := b.hdr().Uint16s() adata := a.hdr().Uint16s() for i := 0; i < m; i++ { bdata[i] = adata[i*stride] } case 4: bdata := b.hdr().Uint32s() adata := a.hdr().Uint32s() for i := 0; i < m; i++ { bdata[i] = adata[i*stride] } case 8: bdata := b.hdr().Uint64s() adata := a.hdr().Uint64s() for i := 0; i < m; i++ { bdata[i] = adata[i*stride] } default: return nil, errors.Errorf(typeNYI, "Arbitrary sized diag", t) } return b, nil } tensor-0.9.24/defaultengine_matop_stack.go000066400000000000000000000240361426512615100206560ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) // This file contains code for the execution engine to stack tensors func (e StdEng) StackDense(t DenseTensor, axis int, others ...DenseTensor) (retVal DenseTensor, err error) { opdims := t.Dims() if axis >= opdims+1 { err = errors.Errorf(dimMismatch, opdims+1, axis) return } newShape := Shape(BorrowInts(opdims + 1)) newShape[axis] = len(others) + 1 shape := t.Shape() var cur int for i, s := range shape { if i == axis { cur++ } newShape[cur] = s cur++ } info := t.Info() var newStrides []int if info.o.IsColMajor() { newStrides = newShape.CalcStridesColMajor() } else { newStrides = newShape.CalcStrides() } ap := MakeAP(newShape, newStrides, info.o, info.Δ) allNoMat := !t.RequiresIterator() for _, ot := range others { if allNoMat && ot.RequiresIterator() { allNoMat = false } } retVal = recycledDense(t.Dtype(), ap.Shape(), WithEngine(e)) retVal.setAP(&ap) // the "viewStack" method is the more generalized method // and will work for all Tensors, regardless of whether it's a view // But the simpleStack is faster, and is an optimization if allNoMat { retVal = e.denseSimpleStack(t, retVal, axis, others) } else { retVal, err = e.denseViewStack(t, retVal, axis, others) } return } func (e StdEng) denseSimpleStack(t, retVal DenseTensor, axis int, others []DenseTensor) DenseTensor { switch axis { case 0: copyDense(retVal, t) next := t.len() for _, ot := range others { copyDenseSliced(retVal, next, retVal.len(), ot, 0, ot.len()) next += ot.len() } default: axisStride := retVal.Info().Strides()[axis] batches := retVal.len() / axisStride destStart := 0 start := 0 end := start + axisStride for i := 0; i < batches; i++ { copyDenseSliced(retVal, destStart, retVal.len(), t, start, end) for _, ot := range others { destStart += axisStride copyDenseSliced(retVal, destStart, retVal.len(), ot, start, end) i++ } destStart += axisStride start += axisStride end += axisStride } } return retVal } func (e StdEng) denseViewStack(t, retVal DenseTensor, axis int, others []DenseTensor) (DenseTensor, error) { axisStride := retVal.Info().Strides()[axis] batches := retVal.len() / axisStride it := IteratorFromDense(t) its := make([]Iterator, 0, len(others)) for _, ot := range others { oter := IteratorFromDense(ot) its = append(its, oter) } err := e.doViewStack(t, retVal, axisStride, batches, it, others, its) return retVal, err } func (e StdEng) doViewStack(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) error { switch int(t.Dtype().Size()) { case 1: return e.doViewStack1(t, retVal, axisStride, batches, it, others, its) case 2: return e.doViewStack2(t, retVal, axisStride, batches, it, others, its) case 4: return e.doViewStack4(t, retVal, axisStride, batches, it, others, its) case 8: return e.doViewStack8(t, retVal, axisStride, batches, it, others, its) default: return e.doViewStackArbitrary(t, retVal, axisStride, batches, it, others, its) } } func (e StdEng) doViewStack1(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { data := retVal.hdr().Uint8s()[:0] var mask []bool var retIsMasked bool if mt, ok := t.(MaskedTensor); ok { retIsMasked = mt.IsMasked() } for _, ot := range others { if mt, ok := ot.(MaskedTensor); ok { retIsMasked = retIsMasked || mt.IsMasked() } } f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) { var tmask []bool if mt, ok := t.(MaskedTensor); ok { tmask = mt.Mask() isMasked = mt.IsMasked() } for last = 0; last < axisStride; last++ { id, err := it.Next() if handleNoOp(err) != nil { return -1, isMasked, errors.Wrap(err, "doviewStackfailed") } if err != nil { break } data = append(data, t.hdr().Uint8s()[id]) if isMasked { mask = append(mask, tmask[id]) } } return } for i := 0; i < batches; i++ { var last int var isMasked bool if last, isMasked, err = f(t, it); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } for j, ot := range others { if last, isMasked, err = f(ot, its[j]); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } } } if mt, ok := retVal.(MaskedTensor); ok { mt.SetMask(mask) } return nil } func (e StdEng) doViewStack2(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { data := retVal.hdr().Uint16s()[:0] var mask []bool var retIsMasked bool if mt, ok := t.(MaskedTensor); ok { retIsMasked = mt.IsMasked() } for _, ot := range others { if mt, ok := ot.(MaskedTensor); ok { retIsMasked = retIsMasked || mt.IsMasked() } } f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) { var tmask []bool if mt, ok := t.(MaskedTensor); ok { tmask = mt.Mask() isMasked = mt.IsMasked() } for last = 0; last < axisStride; last++ { id, err := it.Next() if handleNoOp(err) != nil { return -1, isMasked, errors.Wrap(err, "doviewStackfailed") } if err != nil { break } data = append(data, t.hdr().Uint16s()[id]) if isMasked { mask = append(mask, tmask[id]) } } return } for i := 0; i < batches; i++ { var last int var isMasked bool if last, isMasked, err = f(t, it); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } for j, ot := range others { if last, isMasked, err = f(ot, its[j]); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } } } if mt, ok := retVal.(MaskedTensor); ok { mt.SetMask(mask) } return nil } func (e StdEng) doViewStack4(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { data := retVal.hdr().Uint32s()[:0] var mask []bool var retIsMasked bool if mt, ok := t.(MaskedTensor); ok { retIsMasked = mt.IsMasked() } for _, ot := range others { if mt, ok := ot.(MaskedTensor); ok { retIsMasked = retIsMasked || mt.IsMasked() } } f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) { var tmask []bool if mt, ok := t.(MaskedTensor); ok { tmask = mt.Mask() isMasked = mt.IsMasked() } for last = 0; last < axisStride; last++ { id, err := it.Next() if handleNoOp(err) != nil { return -1, isMasked, errors.Wrap(err, "doviewStackfailed") } if err != nil { break } data = append(data, t.hdr().Uint32s()[id]) if isMasked { mask = append(mask, tmask[id]) } } return } for i := 0; i < batches; i++ { var last int var isMasked bool if last, isMasked, err = f(t, it); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } for j, ot := range others { if last, isMasked, err = f(ot, its[j]); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } } } if mt, ok := retVal.(MaskedTensor); ok { mt.SetMask(mask) } return nil } func (e StdEng) doViewStack8(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { data := retVal.hdr().Uint64s()[:0] var mask []bool var retIsMasked bool if mt, ok := t.(MaskedTensor); ok { retIsMasked = mt.IsMasked() } for _, ot := range others { if mt, ok := ot.(MaskedTensor); ok { retIsMasked = retIsMasked || mt.IsMasked() } } f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) { var tmask []bool if mt, ok := t.(MaskedTensor); ok { tmask = mt.Mask() isMasked = mt.IsMasked() } for last = 0; last < axisStride; last++ { id, err := it.Next() if handleNoOp(err) != nil { return -1, isMasked, errors.Wrap(err, "doviewStackfailed") } if err != nil { break } data = append(data, t.hdr().Uint64s()[id]) if isMasked { mask = append(mask, tmask[id]) } } return } for i := 0; i < batches; i++ { var last int var isMasked bool if last, isMasked, err = f(t, it); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } for j, ot := range others { if last, isMasked, err = f(ot, its[j]); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } } } if mt, ok := retVal.(MaskedTensor); ok { mt.SetMask(mask) } return nil } func (e StdEng) doViewStackArbitrary(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) { dt := t.Dtype() data := retVal.hdr().Raw[:0] // truncate to 0 size := int(dt.Size()) var mask []bool var retIsMasked bool if mt, ok := t.(MaskedTensor); ok { retIsMasked = mt.IsMasked() } for _, ot := range others { if mt, ok := ot.(MaskedTensor); ok { retIsMasked = retIsMasked || mt.IsMasked() } } f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) { var tmask []bool if mt, ok := t.(MaskedTensor); ok { tmask = mt.Mask() isMasked = mt.IsMasked() } bs := t.hdr().Raw for last = 0; last < axisStride; last++ { id, err := it.Next() if handleNoOp(err) != nil { return -1, isMasked, errors.Wrap(err, "doviewStackfailed") } if err != nil { break } v := bs[id*size : id*size+size] data = append(data, v...) if isMasked { mask = append(mask, tmask[id]) } } return } for i := 0; i < batches; i++ { var last int var isMasked bool if last, isMasked, err = f(t, it); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } for j, ot := range others { if last, isMasked, err = f(ot, its[j]); err != nil { return } if retIsMasked && (!isMasked) { mask = append(mask, make([]bool, last)...) } } } if mt, ok := retVal.(MaskedTensor); ok { mt.SetMask(mask) } return nil } tensor-0.9.24/defaultengine_matop_transpose.go000066400000000000000000000065321426512615100215700ustar00rootroot00000000000000// +build !inplacetranspose package tensor import ( "github.com/pkg/errors" ) func (e StdEng) Transpose(a Tensor, expStrides []int) error { if !a.IsNativelyAccessible() { return errors.Errorf("Cannot Transpose() on non-natively accessible tensor") } if dt, ok := a.(DenseTensor); ok { e.denseTranspose(dt, expStrides) return nil } return errors.Errorf("Tranpose for tensor of %T not supported", a) } func (e StdEng) denseTranspose(a DenseTensor, expStrides []int) { if a.rtype() == String.Type { e.denseTransposeString(a, expStrides) return } e.transposeMask(a) switch a.rtype().Size() { case 1: e.denseTranspose1(a, expStrides) case 2: e.denseTranspose2(a, expStrides) case 4: e.denseTranspose4(a, expStrides) case 8: e.denseTranspose8(a, expStrides) default: e.denseTransposeArbitrary(a, expStrides) } } func (e StdEng) transposeMask(a DenseTensor) { if !a.(*Dense).IsMasked() { return } orig := a.(*Dense).Mask() tmp := make([]bool, len(orig)) it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { tmp[j] = orig[i] j++ } copy(orig, tmp) } func (e StdEng) denseTranspose1(a DenseTensor, expStrides []int) { var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) u8s := tmpArr.Uint8s() orig := a.hdr().Uint8s() it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { u8s[j] = orig[i] j++ } copy(orig, u8s) } func (e StdEng) denseTranspose2(a DenseTensor, expStrides []int) { var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) u16s := tmpArr.Uint16s() orig := a.hdr().Uint16s() it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { u16s[j] = orig[i] j++ } copy(orig, u16s) } func (e StdEng) denseTranspose4(a DenseTensor, expStrides []int) { var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) u32s := tmpArr.Uint32s() orig := a.hdr().Uint32s() it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { u32s[j] = orig[i] j++ } copy(orig, u32s) } func (e StdEng) denseTranspose8(a DenseTensor, expStrides []int) { var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) u64s := tmpArr.Uint64s() orig := a.hdr().Uint64s() it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { u64s[j] = orig[i] j++ } copy(orig, u64s) } func (e StdEng) denseTransposeString(a DenseTensor, expStrides []int) { var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) strs := tmpArr.Strings() orig := a.hdr().Strings() it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { strs[j] = orig[i] j++ } copy(orig, strs) } func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) { rtype := a.rtype() typeSize := int(rtype.Size()) var tmpArr array e.makeArray(&tmpArr, a.Dtype(), a.Size()) // arbs := storage.AsByteSlice(tmpArr.hdr(), rtype) arbs := tmpArr.byteSlice() orig := a.hdr().Raw it := newFlatIterator(a.Info()) var j int for i, err := it.Next(); err == nil; i, err = it.Next() { srcStart := i * typeSize srcEnd := srcStart + typeSize dstStart := j * typeSize dstEnd := dstStart + typeSize copy(arbs[dstStart:dstEnd], orig[srcStart:srcEnd]) j++ } copy(orig, arbs) } tensor-0.9.24/defaultengine_matop_transpose_inplace.go000066400000000000000000000142221426512615100232560ustar00rootroot00000000000000// +build inplacetranspose package tensor import ( "github.com/pkg/errors" ) func (e StdEng) Transpose(a Tensor, expStrides []int) error { if !a.IsNativelyAccessible() { return errors.Errorf("Cannot Transpose() on non-natively accessible tensor") } if dt, ok := a.(DenseTensor); ok { e.denseTranspose(dt, expStrides) return nil } return errors.Errorf("Tranpose for tensor of %T not supported", a) } func (e StdEng) denseTranspose(a DenseTensor, expStrides []int) { if a.rtype() == String.Type { e.denseTransposeString(a, expStrides) return } e.transposeMask(a) switch a.rtype().Size() { case 1: e.denseTranspose1(a, expStrides) case 2: e.denseTranspose2(a, expStrides) case 4: e.denseTranspose4(a, expStrides) case 8: e.denseTranspose8(a, expStrides) default: e.denseTransposeArbitrary(a, expStrides) } } func (e StdEng) transposeMask(a DenseTensor) { if !a.(*Dense).IsMasked() { return } shape := a.Shape() if len(shape) != 2 { // TODO(poopoothegorilla): currently only two dimensions are implemented return } n, m := shape[0], shape[1] mask := a.(*Dense).Mask() size := len(mask) track := NewBitMap(size) track.Set(0) track.Set(size - 1) for i := 0; i < size; i++ { srci := i if track.IsSet(srci) { continue } srcv := mask[srci] for { oc := srci % n or := (srci - oc) / n desti := oc*m + or if track.IsSet(desti) { break } track.Set(desti) destv := mask[desti] mask[desti] = srcv srci = desti srcv = destv } } } func (e StdEng) denseTranspose1(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change var saved, tmp byte var i int data := a.hdr().Uint8s() if len(data) < 4 { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) if track.IsSet(i) && track.IsSet(dest) { data[i] = saved saved = 0 for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) tmp = data[i] data[i] = saved saved = tmp i = dest } } func (e StdEng) denseTranspose2(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change var saved, tmp uint16 var i int data := a.hdr().Uint16s() if len(data) < 4 { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) if track.IsSet(i) && track.IsSet(dest) { data[i] = saved saved = 0 for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) tmp = data[i] data[i] = saved saved = tmp i = dest } } func (e StdEng) denseTranspose4(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change var saved, tmp uint32 var i int data := a.hdr().Uint32s() if len(data) < 4 { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) if track.IsSet(i) && track.IsSet(dest) { data[i] = saved saved = 0 for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) tmp = data[i] data[i] = saved saved = tmp i = dest } } func (e StdEng) denseTranspose8(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change var saved, tmp uint64 var i int data := a.hdr().Uint64s() if len(data) < 4 { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) if track.IsSet(i) && track.IsSet(dest) { data[i] = saved saved = 0 for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) // log.Printf("i: %d start %d, end %d | tmp %v saved %v", i, start, end, tmp, saved) tmp = data[i] data[i] = saved saved = tmp i = dest } } func (e StdEng) denseTransposeString(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change var saved, tmp string var i int data := a.hdr().Strings() if len(data) < 4 { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) if track.IsSet(i) && track.IsSet(dest) { data[i] = saved saved = "" for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) tmp = data[i] data[i] = saved saved = tmp i = dest } } func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) { axes := a.transposeAxes() size := a.len() rtype := a.rtype() typeSize := int(rtype.Size()) // first we'll create a bit-map to track which elements have been moved to their correct places track := NewBitMap(size) track.Set(0) track.Set(size - 1) // first and last element of a transposedon't change saved := make([]byte, typeSize, typeSize) tmp := make([]byte, typeSize, typeSize) var i int data := a.arr().Raw if len(data) < 4*typeSize { return } for i = 1; ; { dest := a.transposeIndex(i, axes, expStrides) start := typeSize * i end := start + typeSize if track.IsSet(i) && track.IsSet(dest) { copy(data[start:end], saved) for i := range saved { saved[i] = 0 } for i < size && track.IsSet(i) { i++ } if i >= size { break } continue } track.Set(i) copy(tmp, data[start:end]) copy(data[start:end], saved) copy(saved, tmp) i = dest } } tensor-0.9.24/defaultengine_minmax.go000066400000000000000000000224551426512615100176450ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) var ( _ MinBetweener = StdEng{} _ MaxBetweener = StdEng{} ) func (e StdEng) MinBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "MinBetween failed") } var reuse DenseTensor var safe bool if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.MinBetween") } // check to see if anything needs to be created if reuse == nil { if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && reuse == nil: err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit) retVal = a case safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool panic("Unreachable") } return } // standard switch { case !safe && reuse == nil: err = e.E.MinBetween(typ, dataA, dataB) retVal = a case safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.MinBetween(typ, dataReuse, dataB) retVal = reuse default: panic("Unreachable") } return } func (e StdEng) MaxBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = binaryCheck(a, b, ordTypes); err != nil { return nil, errors.Wrapf(err, "MaxBetween failed") } var reuse DenseTensor var safe bool if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.MaxBetween") } // check to see if anything needs to be created if reuse == nil { if swap { reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && reuse == nil: err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit) retVal = a case safe && reuse != nil: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool panic("Unreachable") } return } // standard switch { case !safe && reuse == nil: err = e.E.MaxBetween(typ, dataA, dataB) retVal = a case safe && reuse != nil: storage.Copy(typ, dataReuse, dataA) err = e.E.MaxBetween(typ, dataReuse, dataB) retVal = reuse default: panic("Unreachable") } return } func (e StdEng) MinBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "MinBetween failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "MinBetween failed") } var reuse DenseTensor var safe bool if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween") } scalarHeader = dataA } // check to see if anything needs to be created if reuse == nil { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && reuse == nil: err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit) retVal = a case safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.MinBetweenIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool panic("Unreachable") } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.MinBetween(typ, dataReuse, dataB) retVal = reuse return case safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.MinBetween(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && reuse == nil: err = e.E.MinBetween(typ, dataA, dataB) retVal = a case safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.MinBetween(typ, dataReuse, dataB) retVal = reuse case safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.MinBetween(typ, dataA, dataReuse) retVal = reuse default: panic("Unreachable") } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } func (e StdEng) MaxBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(t, ordTypes); err != nil { return nil, errors.Wrapf(err, "MaxBetween failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "MaxBetween failed") } var reuse DenseTensor var safe bool if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween") } scalarHeader = dataA } // check to see if anything needs to be created if reuse == nil { reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter { iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && reuse == nil: err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit) retVal = a case safe && reuse != nil && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.MaxBetweenIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case safe && reuse != nil && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit) retVal = reuse default: // safe && bool panic("Unreachable") } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.MaxBetween(typ, dataReuse, dataB) retVal = reuse return case safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.MaxBetween(typ, dataReuse, dataA) retVal = reuse return } } // standard switch { case !safe && reuse == nil: err = e.E.MaxBetween(typ, dataA, dataB) retVal = a case safe && reuse != nil && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.MaxBetween(typ, dataReuse, dataB) retVal = reuse case safe && reuse != nil && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.MaxBetween(typ, dataA, dataReuse) retVal = reuse default: panic("Unreachable") } if newAlloc { freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) return } tensor-0.9.24/defaultengine_misc.go000066400000000000000000000044001426512615100172750ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e StdEng) Clamp(a Tensor, min, max interface{}, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, nonComplexNumberTypes); err != nil { return nil, errors.Wrap(err, "Clamp failed") } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Neg") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.ClampIter(typ, cloned.hdr(), ait, min, max); err != nil { return nil, errors.Wrapf(err, "Unable to perform Clamp") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.ClampIter(typ, dataReuse, rit, min, max) retVal = reuse case !safe: err = e.E.ClampIter(typ, dataA, ait, min, max) retVal = a default: cloned := a.Clone().(Tensor) err = e.E.ClampIter(typ, cloned.hdr(), ait, min, max) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Clamp(typ, cloned.hdr(), min, max); err != nil { return nil, errors.Wrapf(err, "Unable to perform Clamp") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Clamp(typ, dataReuse, min, max) retVal = reuse case !safe: err = e.E.Clamp(typ, dataA, min, max) retVal = a default: cloned := a.Clone().(Tensor) err = e.E.Clamp(typ, cloned.hdr(), min, max) retVal = cloned } return } func (e StdEng) FMA(a, x, y Tensor) (Tensor, error) { return e.Mul(a, x, WithIncr(y)) } func (e StdEng) FMAScalar(a Tensor, x interface{}, y Tensor) (Tensor, error) { return e.MulScalar(a, x, true, WithIncr(y)) } tensor-0.9.24/defaultengine_prep.go000066400000000000000000000125561426512615100173230ustar00rootroot00000000000000package tensor import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" // "log" ) func handleFuncOpts(expShape Shape, expType Dtype, o DataOrder, strict bool, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr, same bool, err error) { fo := ParseFuncOpts(opts...) reuseT, incr := fo.IncrReuse() safe = fo.Safe() same = fo.Same() toReuse = reuseT != nil if toReuse { if reuse, err = getDenseTensor(reuseT); err != nil { returnOpOpt(fo) err = errors.Wrapf(err, "Cannot reuse a Tensor that isn't a DenseTensor. Got %T instead", reuseT) return } if reuse != nil && !reuse.IsNativelyAccessible() { returnOpOpt(fo) err = errors.Errorf(inaccessibleData, reuse) return } if (strict || same) && reuse.Dtype() != expType { returnOpOpt(fo) err = errors.Errorf(typeMismatch, expType, reuse.Dtype()) err = errors.Wrapf(err, "Cannot use reuse") return } if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() { returnOpOpt(fo) err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape) err = errors.Wrapf(err, "Cannot use reuse: shape mismatch - reuse.len() %v, expShape.TotalSize() %v", reuse.len(), expShape.TotalSize()) return } if !reuse.Shape().Eq(expShape) { cloned := expShape.Clone() if err = reuse.Reshape(cloned...); err != nil { return } ReturnInts([]int(cloned)) } if !incr && reuse != nil { reuse.setDataOrder(o) // err = reuse.reshape(expShape...) } } returnOpOpt(fo) return } func binaryCheck(a, b Tensor, tc *typeclass) (err error) { // check if the tensors are accessible if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } if !b.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, b) } at := a.Dtype() bt := b.Dtype() if tc != nil { if err = typeclassCheck(at, tc); err != nil { return errors.Wrapf(err, typeclassMismatch, "a") } if err = typeclassCheck(bt, tc); err != nil { return errors.Wrapf(err, typeclassMismatch, "b") } } if at.Kind() != bt.Kind() { return errors.Errorf(typeMismatch, at, bt) } if !a.Shape().Eq(b.Shape()) { return errors.Errorf(shapeMismatch, b.Shape(), a.Shape()) } return nil } func unaryCheck(a Tensor, tc *typeclass) error { if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } at := a.Dtype() if tc != nil { if err := typeclassCheck(at, tc); err != nil { return errors.Wrapf(err, typeclassMismatch, "a") } } return nil } // scalarDtypeCheck checks that a scalar value has the same dtype as the dtype of a given tensor. func scalarDtypeCheck(a Tensor, b interface{}) error { var dt Dtype switch bt := b.(type) { case Dtyper: dt = bt.Dtype() default: t := reflect.TypeOf(b) dt = Dtype{t} } if a.Dtype() != dt { return errors.Errorf("Expected scalar to have the same Dtype as the tensor (%v). Got %T instead ", a.Dtype(), b) } return nil } // prepDataVV prepares the data given the input and reuse tensors. It also retruns several indicators // // useIter indicates that the iterator methods should be used. // swap indicates that the operands are swapped. func prepDataVV(a, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, bit, iit Iterator, useIter, swap bool, err error) { // get data dataA = a.hdr() dataB = b.hdr() if reuse != nil { dataReuse = reuse.hdr() } // iter useIter = a.RequiresIterator() || b.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) || !a.DataOrder().HasSameOrder(b.DataOrder()) || (reuse != nil && (!a.DataOrder().HasSameOrder(reuse.DataOrder()) || !b.DataOrder().HasSameOrder(reuse.DataOrder()))) if useIter { ait = a.Iterator() bit = b.Iterator() if reuse != nil { iit = reuse.Iterator() } } // swap if _, ok := a.(*CS); ok { if _, ok := b.(DenseTensor); ok { swap = true dataA, dataB = dataB, dataA ait, bit = bit, ait } } return } func prepDataVS(a Tensor, b interface{}, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, iit Iterator, useIter bool, newAlloc bool, err error) { // get data dataA = a.hdr() dataB, newAlloc = scalarToHeader(b) if reuse != nil { dataReuse = reuse.hdr() } if a.IsScalar() { return } useIter = a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) || (reuse != nil && !reuse.DataOrder().HasSameOrder(a.DataOrder())) if useIter { ait = a.Iterator() if reuse != nil { iit = reuse.Iterator() } } return } func prepDataSV(a interface{}, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, bit, iit Iterator, useIter bool, newAlloc bool, err error) { // get data dataA, newAlloc = scalarToHeader(a) dataB = b.hdr() if reuse != nil { dataReuse = reuse.hdr() } // get iterator if b.IsScalar() { return } useIter = b.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) || (reuse != nil && !reuse.DataOrder().HasSameOrder(b.DataOrder())) if useIter { bit = b.Iterator() if reuse != nil { iit = reuse.Iterator() } } return } func prepDataUnary(a Tensor, reuse Tensor) (dataA, dataReuse *storage.Header, ait, rit Iterator, useIter bool, err error) { // get data dataA = a.hdr() if reuse != nil { dataReuse = reuse.hdr() } // get iterator if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) { ait = a.Iterator() if reuse != nil { rit = reuse.Iterator() } useIter = true } return } tensor-0.9.24/defaultengine_selbyidx.go000066400000000000000000000166131426512615100201760ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" "reflect" ) // SelectByIndices selects the values given the in `indices` tensor. // // Currently SelectByIndices only supports Dense tensors that do not require the use of iterators. // Please make a pull request to support tensors that require the use of an iterator to traverse data. func (e StdEng) SelectByIndices(a, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if !indices.Shape().IsVectorLike() { return nil, errors.Errorf("Expected indices to be a vector. Got %v instead", indices.Shape()) } if indices.Dtype() != Int { return nil, errors.Errorf("Expected indices to be a vector of ints. Got %v instead", indices.Dtype()) } // if b is a scalar, then use Slice if a.Shape().IsScalarEquiv() { slices := make([]Slice, a.Shape().Dims()) slices[axis] = ss(getInts(indices)[0]) return a.Slice(slices...) } expectedShape := a.Shape().Clone() expectedShape[axis] = indices.Shape().TotalSize() var reuse DenseTensor var safe, toReuse, _ bool if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if safe || !toReuse && reuse == nil && safe { // create reuse reuse = New(WithShape(expectedShape...), Of(a.Dtype())) } if !safe { if a.Shape()[axis] != indices.Shape().TotalSize() { expected := a.Shape().Clone() expected[axis] = indices.Shape().TotalSize() return nil, errors.Errorf("Expected a safe resuse to have the same shape as the expected shape of the result: %v. The input a has %v ", expected, a.Shape()) } reuse = a.(DenseTensor) } typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, indices, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.Add") } if useIter { e.iterSelectByIdx(axis, dataA, dataB, dataReuse, ait, bit, iit) //TODO return } e.selectByIdx(axis, dataB.Ints(), typ, dataA, dataReuse, a.(*Dense).AP, reuse.(*Dense).AP) return reuse, nil } func (e StdEng) iterSelectByIdx(axis int, dataA, dataB, dataReuse *storage.Header, ait, bit, iit Iterator) { panic("iterSelectByIdx is not yet implemented") } func (e StdEng) selectByIdx(axis int, indices []int, typ reflect.Type, dataA, dataRetVal *storage.Header, apA, apRet AP) { isInnermost := axis == apA.shape.Dims()-1 outer := ProdInts(apA.shape[:axis]) axStride := apA.strides[axis] retStride := apRet.strides[axis] var outerRetStride int if axis == 0 { // then it's the outermost outerRetStride = apRet.strides[axis] * 2 } else { outerRetStride = apRet.strides[axis-1] } srcCoord := make([]int, apA.shape.Dims()) dstCoord := make([]int, apRet.shape.Dims()) if isInnermost { prevAxis := axis - 1 if prevAxis < 0 { // this may be the case if input is a vector prevAxis = 0 } prevStride := apA.strides[prevAxis] retPrevStride := apRet.strides[prevAxis] for i, idx := range indices { srcCoord[axis] = idx dstCoord[axis] = i start, _ := Ltoi(apA.shape, apA.strides, srcCoord...) dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...) for o := 0; o < outer; o++ { end := start + axStride dstEnd := dstStart + retStride storage.CopySliced(typ, dataRetVal, dstStart, dstEnd, dataA, start, end) start += prevStride dstStart += retPrevStride } } return } for i, idx := range indices { srcCoord[axis] = idx dstCoord[axis] = i start, _ := Ltoi(apA.shape, apA.strides, srcCoord...) dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...) for o := 0; o < outer; o++ { end := start + axStride dstEnd := dstStart + retStride storage.CopySliced(typ, dataRetVal, dstStart, dstEnd, dataA, start, end) start = end + axStride dstStart = dstEnd + (outerRetStride - retStride) } } } // SelectByIndicesB computes the gradient of the result of `SelectByIndices`. // // Currently SelectByIndicesB only supports Dense tensors that do not require the use of iterators. // Please make a pull request to support tensors that require the use of an iterator to traverse data. func (e StdEng) SelectByIndicesB(input, outGrad, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if !indices.Shape().IsVectorLike() { return nil, errors.Errorf("Expected indices to be a vector. Got %v instead", outGrad.Shape()) } if indices.Dtype() != Int { return nil, errors.Errorf("Expected indices to be a vector of ints. Got %v instead", outGrad.Dtype()) } // if b is a scalar, then use Slice if input.Shape().IsScalarEquiv() { slices := make([]Slice, input.Shape().Dims()) slices[axis] = ss(outGrad.Data().([]int)[0]) return input.Slice(slices...) } expectedShape := input.Shape().Clone() var reuse DenseTensor var _, toReuse, _ bool if reuse, _, toReuse, _, _, err = handleFuncOpts(input.Shape(), input.Dtype(), input.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !toReuse && reuse == nil { // create reuse reuse = New(WithShape(expectedShape...), Of(input.Dtype())) } typ := input.Dtype().Type var _, dataB, dataReuse *storage.Header var _, bit, iit Iterator var useIter bool if _, dataB, dataReuse, _, bit, iit, useIter, _, err = prepDataVV(input, outGrad, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.SelectByIndicesB") } if useIter { e.iterSelectByIndicesB(axis, dataB, dataReuse, bit, iit) //TODO return } e.selectByIndicesB(axis, getInts(indices), typ, dataB, dataReuse, outGrad.(*Dense).AP, reuse.(*Dense).AP) return reuse, nil } func (e StdEng) iterSelectByIndicesB(axis int, dataB, dataGradA *storage.Header, bit, iit Iterator) { panic("iterSelectByIndicesB not implemented yet") } func (e StdEng) selectByIndicesB(axis int, indices []int, typ reflect.Type, dataB, dataGradA *storage.Header, apB, apRet AP) { isInnermost := axis == apB.shape.Dims()-1 outer := ProdInts(apB.shape[:axis]) axStride := apB.strides[axis] retStride := apRet.strides[axis] var outerRetStride int if axis == 0 { outerRetStride = apRet.strides[axis] * 2 } else { outerRetStride = apRet.strides[axis-1] } dstCoord := make([]int, apB.shape.Dims()) srcCoord := make([]int, apRet.shape.Dims()) if isInnermost { prevAxis := axis - 1 if prevAxis < 0 { // this may be the case if input is a vector prevAxis = 0 } retPrevStride := apB.strides[prevAxis] prevStride := apRet.strides[prevAxis] for i, idx := range indices { dstCoord[axis] = idx srcCoord[axis] = i dstStart, _ := Ltoi(apB.shape, apB.strides, dstCoord...) start, _ := Ltoi(apRet.shape, apRet.strides, srcCoord...) for o := 0; o < outer; o++ { dstEnd := dstStart + axStride end := start + retStride e.E.AddSliced(typ, dataGradA, dstStart, dstEnd, dataB, start, end) dstStart += prevStride start += retPrevStride } } return } for i, idx := range indices { dstCoord[axis] = idx srcCoord[axis] = i dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...) start, _ := Ltoi(apB.shape, apB.strides, srcCoord...) for o := 0; o < outer; o++ { dstEnd := dstStart + axStride end := start + retStride e.E.AddSliced(typ, dataGradA, dstStart, dstEnd, dataB, start, end) dstStart = dstEnd + axStride start = end + (outerRetStride - retStride) } } } tensor-0.9.24/defaultengine_softmax.go000066400000000000000000000366561426512615100200450ustar00rootroot00000000000000package tensor import ( "fmt" "math" "sync" "github.com/chewxy/math32" "github.com/pkg/errors" ) // if dims = 2 and axis -1 it returns the last dimension. In this case 1 func resolveAxis(axis int, dims int) int { res := axis % dims if (res < 0 && dims > 0) || (res > 0 && dims < 0) { return res + dims } return res } // SoftMax performs the softmax operation on the given tensor. Currently it expects the tensor to be a Dense tensor. // Please make a pull request to support sparse tensors. // // The softmax function is defined as : // σ(x) = e^x_i / Σ(e^x_i) func (e StdEng) SoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { axis = resolveAxis(axis, x.Dims()) expectedShape := x.Shape() var reuse DenseTensor var safe, toReuse, _ bool if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, x.Dtype(), x.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if safe || !toReuse && reuse == nil && safe { // create reuse reuse = New(WithShape(expectedShape...), Of(x.Dtype())) } switch x.Dtype() { case Float32: if expectedShape.Dims()-1 == axis { e.softMaxLastDimF32(reuse, x, axis, false) } else { e.softMaxInnerDimF32(reuse, x, axis, false) } case Float64: if expectedShape.Dims()-1 == axis { e.softMaxLastDimF64(reuse, x, axis, false) } else { e.softMaxInnerDimF64(reuse, x, axis, false) } default: return nil, fmt.Errorf("type %v not supported", x.Dtype()) } return reuse, nil } // SoftMaxB computes gradient of the input `x`, given the `output = SoftMax(x)` and its associated gradient. Currently it expects the tensor to be a Dense tensor. // Please make a pull request to support sparse tensors. func (e StdEng) SoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if !output.Shape().Eq(grad.Shape()) { return nil, fmt.Errorf("output and grad shapes don't match") } if !output.Dtype().Eq(grad.Dtype()) { return nil, fmt.Errorf("output and grad types don't match") } axis = resolveAxis(axis, output.Dims()) expectedShape := output.Shape() var reuse DenseTensor var safe, toReuse, _ bool if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, output.Dtype(), output.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if safe || !toReuse && reuse == nil && safe { // create reuse reuse = New(WithShape(expectedShape...), Of(output.Dtype())) } switch output.Dtype() { case Float32: if expectedShape.Dims()-1 == axis { e.softMaxBLastDimF32(reuse, output, grad, axis, false) } else { e.softMaxBInnerDimF32(reuse, output, grad, axis, false) } case Float64: if expectedShape.Dims()-1 == axis { e.softMaxBLastDimF64(reuse, output, grad, axis, false) } else { e.softMaxBInnerDimF64(reuse, output, grad, axis, false) } default: return nil, fmt.Errorf("type %v not supported", output.Dtype()) } return reuse, nil } // LogSoftMax performs softmax but in log space. This provides some amount of numerical stabilization. // Conceptually it is the same as performing a logarithm after applying the softmax function. // Currently it expects the tensor to be a Dense tensor. // Please make a pull request to support sparse tensors. func (e StdEng) LogSoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { axis = resolveAxis(axis, x.Dims()) expectedShape := x.Shape() var reuse DenseTensor var safe, toReuse, _ bool if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, x.Dtype(), x.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if safe || !toReuse && reuse == nil && safe { // create reuse reuse = New(WithShape(expectedShape...), Of(x.Dtype())) } switch x.Dtype() { case Float32: if expectedShape.Dims()-1 == axis { e.softMaxLastDimF32(reuse, x, axis, true) } else { e.softMaxInnerDimF32(reuse, x, axis, true) } case Float64: if expectedShape.Dims()-1 == axis { e.softMaxLastDimF64(reuse, x, axis, true) } else { e.softMaxInnerDimF64(reuse, x, axis, true) } default: return nil, fmt.Errorf("type %v not supported", x.Dtype()) } return reuse, nil } // LogSoftMaxB computes the gradient of the input `x`, given the `output = LogSoftmax(x)` and its associated gradient. // Currently it expects the tensor to be a Dense tensor. // Please make a pull request to support sparse tensors. func (e StdEng) LogSoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) { if !output.Shape().Eq(grad.Shape()) { return nil, fmt.Errorf("output and grad shapes don't match") } if !output.Dtype().Eq(grad.Dtype()) { return nil, fmt.Errorf("output and grad types don't match") } axis = resolveAxis(axis, output.Dims()) expectedShape := output.Shape() var reuse DenseTensor var safe, toReuse, _ bool if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, output.Dtype(), output.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if safe || !toReuse && reuse == nil && safe { // create reuse reuse = New(WithShape(expectedShape...), Of(output.Dtype())) } switch output.Dtype() { case Float32: if expectedShape.Dims()-1 == axis { e.softMaxBLastDimF32(reuse, output, grad, axis, true) } else { e.softMaxBInnerDimF32(reuse, output, grad, axis, true) } case Float64: if expectedShape.Dims()-1 == axis { e.softMaxBLastDimF64(reuse, output, grad, axis, true) } else { e.softMaxBInnerDimF64(reuse, output, grad, axis, true) } default: return nil, fmt.Errorf("type %v not supported", output.Dtype()) } return reuse, nil } func (e StdEng) softMaxLastDimF64(output Tensor, x Tensor, axis int, logSoftMax bool) { outputArr := getFloat64s(output) xArr := getFloat64s(x) xShape := x.Shape() outerSize := 1 dimSize := xShape[axis] for i := 0; i < axis; i++ { outerSize *= xShape[i] } var wg sync.WaitGroup for ii := 0; ii < outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { maxInput := xArr[0] for j := 1; j < dimSize; j++ { i := ii*dimSize + j if xArr[i] > maxInput { maxInput = xArr[i] } } sumExp := float64(0.0) for j := 0; j < dimSize; j++ { i := ii*dimSize + j z := xArr[i] - maxInput exp := math.Exp(z) if logSoftMax { outputArr[i] = z } else { outputArr[i] = exp } sumExp += exp } if !logSoftMax { sumExp = 1 / sumExp } for j := 0; j < dimSize; j++ { i := ii*dimSize + j if logSoftMax { outputArr[i] -= math.Log(sumExp) } else { outputArr[i] *= sumExp } } wg.Done() }(ii, &wg) } wg.Wait() } func (e StdEng) softMaxBLastDimF64(inputGrad, output, grad Tensor, axis int, logSoftMax bool) { dx := getFloat64s(inputGrad) outputArr := getFloat64s(output) gradArr := getFloat64s(grad) outputShape := output.Shape() outerSize := 1 dimSize := outputShape[axis] for i := 0; i < axis; i++ { outerSize *= outputShape[i] } var wg sync.WaitGroup for ii := 0; ii < outerSize; ii++ { wg.Add(1) if logSoftMax { go func(gradArr, dx []float64, ii int, wg *sync.WaitGroup) { sum := gradArr[ii*dimSize] for j := 1; j < dimSize; j++ { i := ii*dimSize + j sum += gradArr[i] } for j := 0; j < dimSize; j++ { i := ii*dimSize + j dx[i] = gradArr[i] - (math.Exp(outputArr[i]) * sum) } wg.Done() }(gradArr, dx, ii, &wg) } else { go func(outputArr, gradArr, dx []float64, ii int, wg *sync.WaitGroup) { //mul := make([]float64, dimSize) var sum float64 for j := 0; j < dimSize; j++ { i := ii*dimSize + j //mul[j] = outputArr[i] * gradArr[i] sum += outputArr[i] * gradArr[i] } // sum := mul[0] // for j := 1; j < dimSize; j++ { // sum += mul[j] // } for j := 0; j < dimSize; j++ { i := ii*dimSize + j dx[i] = (gradArr[i] - sum) * outputArr[i] } wg.Done() }(outputArr, gradArr, dx, ii, &wg) } } wg.Wait() } func (e StdEng) softMaxInnerDimF64(output Tensor, x Tensor, axis int, logSoftmax bool) { xShape := x.Shape() innerSize, outerSize := 1, 1 for i := 0; i < axis; i++ { outerSize *= xShape[i] } for i := axis + 1; i < xShape.Dims(); i++ { innerSize *= xShape[i] } dimSize := xShape[axis] dimStride := innerSize outerStride := dimSize * dimStride outputArr := getFloat64s(output) xArr := getFloat64s(x) var wg sync.WaitGroup for ii := 0; ii < innerSize*outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { outerIndex, innerIndex := divmod(ii, innerSize) inputPart := xArr[outerIndex*outerStride+innerIndex:] outputPart := outputArr[outerIndex*outerStride+innerIndex:] maxInput := inputPart[0] for j := 1; j < dimSize; j++ { i := j * dimStride if inputPart[i] > maxInput { maxInput = inputPart[i] } } sumExp := 0.0 for j := 0; j < dimSize; j++ { i := j * dimStride exp := math.Exp(inputPart[i] - maxInput) if !logSoftmax { outputPart[i] = exp } sumExp += exp } if logSoftmax { sumExp = math.Log(sumExp) } else { sumExp = 1 / sumExp } for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { outputPart[i] = inputPart[i] - maxInput - sumExp } else { outputPart[i] *= sumExp } } wg.Done() }(ii, &wg) } wg.Wait() } func (e StdEng) softMaxBInnerDimF64(inputGrad, output, grad Tensor, axis int, logSoftmax bool) { dxShape := inputGrad.Shape() innerSize, outerSize := 1, 1 for i := 0; i < axis; i++ { outerSize *= dxShape[i] } for i := axis + 1; i < dxShape.Dims(); i++ { innerSize *= dxShape[i] } dimSize := dxShape[axis] dimStride := innerSize outerStride := dimSize * dimStride dxArr := getFloat64s(inputGrad) outputArr := getFloat64s(output) gradArr := getFloat64s(grad) var wg sync.WaitGroup for ii := 0; ii < innerSize*outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { outerIndex, innerIndex := divmod(ii, innerSize) gradPart := gradArr[outerIndex*outerStride+innerIndex:] dxPart := dxArr[outerIndex*outerStride+innerIndex:] outputPart := outputArr[outerIndex*outerStride+innerIndex:] sum := 0.0 for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { sum += gradPart[i] } else { sum += gradPart[i] * outputPart[i] } } for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { dxPart[i] = gradPart[i] - math.Exp(outputPart[i])*sum } else { dxPart[i] = outputPart[i] * (gradPart[i] - sum) } } wg.Done() }(ii, &wg) } wg.Wait() } func (e StdEng) softMaxLastDimF32(output Tensor, x Tensor, axis int, logSoftMax bool) { outputArr := getFloat32s(output) xArr := getFloat32s(x) xShape := x.Shape() outerSize := 1 dimSize := xShape[axis] for i := 0; i < axis; i++ { outerSize *= xShape[i] } var wg sync.WaitGroup for ii := 0; ii < outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { maxInput := xArr[0] for j := 1; j < dimSize; j++ { i := ii*dimSize + j if xArr[i] > maxInput { maxInput = xArr[i] } } sumExp := float32(0.0) for j := 0; j < dimSize; j++ { i := ii*dimSize + j z := xArr[i] - maxInput exp := math32.Exp(z) if logSoftMax { outputArr[i] = z } else { outputArr[i] = exp } sumExp += exp } if !logSoftMax { sumExp = 1 / sumExp } for j := 0; j < dimSize; j++ { i := ii*dimSize + j if logSoftMax { outputArr[i] -= math32.Log(sumExp) } else { outputArr[i] *= sumExp } } wg.Done() }(ii, &wg) } wg.Wait() } func (e StdEng) softMaxBLastDimF32(inputGrad, output, grad Tensor, axis int, logSoftMax bool) { dx := getFloat32s(inputGrad) outputArr := getFloat32s(output) gradArr := getFloat32s(grad) outputShape := output.Shape() outerSize := 1 dimSize := outputShape[axis] for i := 0; i < axis; i++ { outerSize *= outputShape[i] } var wg sync.WaitGroup for ii := 0; ii < outerSize; ii++ { wg.Add(1) if logSoftMax { go func(ii int, wg *sync.WaitGroup) { sum := gradArr[ii*dimSize] for j := 1; j < dimSize; j++ { i := ii*dimSize + j sum += gradArr[i] } for j := 0; j < dimSize; j++ { i := ii*dimSize + j dx[i] = gradArr[i] - (math32.Exp(outputArr[i]) * sum) } wg.Done() }(ii, &wg) } else { go func(ii int, wg *sync.WaitGroup) { //mul := make([]float32, dimSize) var sum float32 for j := 0; j < dimSize; j++ { i := ii*dimSize + j //mul[j] = outputArr[i] * gradArr[i] sum += outputArr[i] * gradArr[i] } // sum := mul[0] // for j := 1; j < dimSize; j++ { // sum += mul[j] // } for j := 0; j < dimSize; j++ { i := ii*dimSize + j dx[i] = (gradArr[i] - sum) * outputArr[i] } wg.Done() }(ii, &wg) } } wg.Wait() } func (e StdEng) softMaxInnerDimF32(output Tensor, x Tensor, axis int, logSoftmax bool) { xShape := x.Shape() innerSize, outerSize := 1, 1 for i := 0; i < axis; i++ { outerSize *= xShape[i] } for i := axis + 1; i < xShape.Dims(); i++ { innerSize *= xShape[i] } dimSize := xShape[axis] dimStride := innerSize outerStride := dimSize * dimStride outputArr := getFloat32s(output) xArr := getFloat32s(x) var wg sync.WaitGroup for ii := 0; ii < innerSize*outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { outerIndex, innerIndex := divmod(ii, innerSize) inputPart := xArr[outerIndex*outerStride+innerIndex:] outputPart := outputArr[outerIndex*outerStride+innerIndex:] maxInput := inputPart[0] for j := 1; j < dimSize; j++ { i := j * dimStride if inputPart[i] > maxInput { maxInput = inputPart[i] } } sumExp := float32(0.0) for j := 0; j < dimSize; j++ { i := j * dimStride exp := math32.Exp(inputPart[i] - maxInput) if !logSoftmax { outputPart[i] = exp } sumExp += exp } if logSoftmax { sumExp = math32.Log(sumExp) } else { sumExp = 1 / sumExp } for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { outputPart[i] = inputPart[i] - maxInput - sumExp } else { outputPart[i] *= sumExp } } wg.Done() }(ii, &wg) } wg.Wait() } func (e StdEng) softMaxBInnerDimF32(inputGrad, output, grad Tensor, axis int, logSoftmax bool) { dxShape := inputGrad.Shape() innerSize, outerSize := 1, 1 for i := 0; i < axis; i++ { outerSize *= dxShape[i] } for i := axis + 1; i < dxShape.Dims(); i++ { innerSize *= dxShape[i] } dimSize := dxShape[axis] dimStride := innerSize outerStride := dimSize * dimStride dxArr := getFloat32s(inputGrad) outputArr := getFloat32s(output) gradArr := getFloat32s(grad) var wg sync.WaitGroup for ii := 0; ii < innerSize*outerSize; ii++ { wg.Add(1) go func(ii int, wg *sync.WaitGroup) { outerIndex, innerIndex := divmod(ii, innerSize) gradPart := gradArr[outerIndex*outerStride+innerIndex:] dxPart := dxArr[outerIndex*outerStride+innerIndex:] outputPart := outputArr[outerIndex*outerStride+innerIndex:] sum := float32(0.0) for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { sum += gradPart[i] } else { sum += gradPart[i] * outputPart[i] } } for j := 0; j < dimSize; j++ { i := j * dimStride if logSoftmax { dxPart[i] = gradPart[i] - math32.Exp(outputPart[i])*sum } else { dxPart[i] = outputPart[i] * (gradPart[i] - sum) } } wg.Done() }(ii, &wg) } wg.Wait() } tensor-0.9.24/defaultengine_unary.go000066400000000000000000000622731426512615100175140ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e StdEng) Neg(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, numberTypes); err != nil { err = errors.Wrapf(err, "Neg failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Neg") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.NegIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Neg") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.NegIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.NegIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.NegIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Neg(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Neg") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Neg(typ, dataReuse) retVal = reuse case !safe: err = e.E.Neg(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Neg(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Inv(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, numberTypes); err != nil { err = errors.Wrapf(err, "Inv failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Inv") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.InvIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Inv") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.InvIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.InvIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.InvIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Inv(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Inv") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Inv(typ, dataReuse) retVal = reuse case !safe: err = e.E.Inv(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Inv(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Square(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, numberTypes); err != nil { err = errors.Wrapf(err, "Square failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Square") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.SquareIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Square") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.SquareIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.SquareIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.SquareIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Square(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Square") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Square(typ, dataReuse) retVal = reuse case !safe: err = e.E.Square(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Square(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Cube(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, numberTypes); err != nil { err = errors.Wrapf(err, "Cube failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Cube") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.CubeIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Cube") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.CubeIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.CubeIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.CubeIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Cube(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Cube") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Cube(typ, dataReuse) retVal = reuse case !safe: err = e.E.Cube(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Cube(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Exp(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatcmplxTypes); err != nil { err = errors.Wrapf(err, "Exp failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Exp") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.ExpIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Exp") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.ExpIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.ExpIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.ExpIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Exp(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Exp") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Exp(typ, dataReuse) retVal = reuse case !safe: err = e.E.Exp(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Exp(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Tanh(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatcmplxTypes); err != nil { err = errors.Wrapf(err, "Tanh failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Tanh") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.TanhIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Tanh") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.TanhIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.TanhIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.TanhIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Tanh(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Tanh") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Tanh(typ, dataReuse) retVal = reuse case !safe: err = e.E.Tanh(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Tanh(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Log(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatcmplxTypes); err != nil { err = errors.Wrapf(err, "Log failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Log") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.LogIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Log") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.LogIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.LogIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.LogIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Log(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Log") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Log(typ, dataReuse) retVal = reuse case !safe: err = e.E.Log(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Log(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Log2(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatTypes); err != nil { err = errors.Wrapf(err, "Log2 failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Log2") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Log2Iter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Log2") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.Log2Iter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.Log2Iter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Log2Iter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Log2(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Log2") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Log2(typ, dataReuse) retVal = reuse case !safe: err = e.E.Log2(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Log2(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Log10(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatcmplxTypes); err != nil { err = errors.Wrapf(err, "Log10 failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Log10") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Log10Iter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Log10") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.Log10Iter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.Log10Iter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Log10Iter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Log10(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Log10") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Log10(typ, dataReuse) retVal = reuse case !safe: err = e.E.Log10(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Log10(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Sqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatcmplxTypes); err != nil { err = errors.Wrapf(err, "Sqrt failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sqrt") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.SqrtIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Sqrt") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.SqrtIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.SqrtIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.SqrtIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Sqrt(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Sqrt") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Sqrt(typ, dataReuse) retVal = reuse case !safe: err = e.E.Sqrt(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Sqrt(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Cbrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatTypes); err != nil { err = errors.Wrapf(err, "Cbrt failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Cbrt") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.CbrtIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Cbrt") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.CbrtIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.CbrtIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.CbrtIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Cbrt(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Cbrt") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Cbrt(typ, dataReuse) retVal = reuse case !safe: err = e.E.Cbrt(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Cbrt(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) InvSqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, floatTypes); err != nil { err = errors.Wrapf(err, "InvSqrt failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.InvSqrt") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.InvSqrtIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform InvSqrt") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.InvSqrtIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.InvSqrtIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.InvSqrtIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.InvSqrt(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform InvSqrt") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.InvSqrt(typ, dataReuse) retVal = reuse case !safe: err = e.E.InvSqrt(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.InvSqrt(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Abs(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, signedTypes); err != nil { err = errors.Wrapf(err, "Abs failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Abs") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.AbsIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Abs") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.AbsIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.AbsIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.AbsIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Abs(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Abs") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Abs(typ, dataReuse) retVal = reuse case !safe: err = e.E.Abs(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Abs(typ, cloned.hdr()) retVal = cloned } return } func (e StdEng) Sign(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if err = unaryCheck(a, signedTypes); err != nil { err = errors.Wrapf(err, "Sign failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.Sign") } if useIter { switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.SignIter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform Sign") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.SignIter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.SignIter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.SignIter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.Sign(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform Sign") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.Sign(typ, dataReuse) retVal = reuse case !safe: err = e.E.Sign(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.Sign(typ, cloned.hdr()) retVal = cloned } return } tensor-0.9.24/defaultenginefloat32.go000066400000000000000000000144571426512615100174720ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" "gorgonia.org/tensor/internal/storage" "gorgonia.org/vecf32" ) func handleFuncOptsF32(expShape Shape, o DataOrder, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr bool, err error) { fo := ParseFuncOpts(opts...) reuseT, incr := fo.IncrReuse() safe = fo.Safe() toReuse = reuseT != nil if toReuse { var ok bool if reuse, ok = reuseT.(DenseTensor); !ok { returnOpOpt(fo) err = errors.Errorf("Cannot reuse a different type of Tensor in a *Dense-Scalar operation. Reuse is of %T", reuseT) return } if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() { returnOpOpt(fo) err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape) err = errors.Wrapf(err, "Cannot use reuse: shape mismatch") return } if !incr && reuse != nil { reuse.setDataOrder(o) // err = reuse.reshape(expShape...) } } returnOpOpt(fo) return } func prepDataVSF32(a Tensor, b interface{}, reuse Tensor) (dataA *storage.Header, dataB float32, dataReuse *storage.Header, ait, iit Iterator, useIter bool, err error) { // get data dataA = a.hdr() switch bt := b.(type) { case float32: dataB = bt case *float32: dataB = *bt default: err = errors.Errorf("b is not a float32: %T", b) return } if reuse != nil { dataReuse = reuse.hdr() } if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) { ait = a.Iterator() if reuse != nil { iit = reuse.Iterator() } useIter = true } return } func (e Float32Engine) checkThree(a, b Tensor, reuse Tensor) error { if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } if !b.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, b) } if reuse != nil && !reuse.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, reuse) } if a.Dtype() != Float32 { return errors.Errorf("Expected a to be of Float32. Got %v instead", a.Dtype()) } if a.Dtype() != b.Dtype() || (reuse != nil && b.Dtype() != reuse.Dtype()) { return errors.Errorf("Expected a, b and reuse to have the same Dtype. Got %v, %v and %v instead", a.Dtype(), b.Dtype(), reuse.Dtype()) } return nil } func (e Float32Engine) checkTwo(a Tensor, reuse Tensor) error { if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } if reuse != nil && !reuse.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, reuse) } if a.Dtype() != Float32 { return errors.Errorf("Expected a to be of Float32. Got %v instead", a.Dtype()) } if reuse != nil && reuse.Dtype() != a.Dtype() { return errors.Errorf("Expected reuse to be the same as a. Got %v instead", reuse.Dtype()) } return nil } // Float32Engine is an execution engine that is optimized to only work with float32s. It assumes all data will are float32s. // // Use this engine only as form of optimization. You should probably be using the basic default engine for most cases. type Float32Engine struct { StdEng } // makeArray allocates a slice for the array func (e Float32Engine) makeArray(arr *array, t Dtype, size int) { if t != Float32 { panic("Float32Engine only creates float32s") } if size < 0 { panic("Cannot have negative sizes when making array") } arr.Header.Raw = make([]byte, size*4) arr.t = t } func (e Float32Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) { reuse := y if err = e.checkThree(a, x, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, x, reuse); err != nil { return nil, errors.Wrap(err, "Float32Engine.FMA") } if useIter { err = execution.MulIterIncrF32(dataA.Float32s(), dataB.Float32s(), dataReuse.Float32s(), ait, bit, iit) retVal = reuse return } vecf32.IncrMul(dataA.Float32s(), dataB.Float32s(), dataReuse.Float32s()) retVal = reuse return } func (e Float32Engine) FMAScalar(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) { reuse := y if err = e.checkTwo(a, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var ait, iit Iterator var dataTensor, dataReuse *storage.Header var scalar float32 var useIter bool if dataTensor, scalar, dataReuse, ait, iit, useIter, err = prepDataVSF32(a, x, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "Float32Engine.FMAScalar") } if useIter { err = execution.MulIterIncrVSF32(dataTensor.Float32s(), scalar, dataReuse.Float32s(), ait, iit) retVal = reuse } execution.MulIncrVSF32(dataTensor.Float32s(), scalar, dataReuse.Float32s()) retVal = reuse return } // Add performs a + b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e Float32Engine) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if a.RequiresIterator() || b.RequiresIterator() { return e.StdEng.Add(a, b, opts...) } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, err = handleFuncOptsF32(a.Shape(), a.DataOrder(), opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if err = e.checkThree(a, b, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var hdrA, hdrB, hdrReuse *storage.Header var dataA, dataB, dataReuse []float32 if hdrA, hdrB, hdrReuse, _, _, _, _, _, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "Float32Engine.Add") } dataA = hdrA.Float32s() dataB = hdrB.Float32s() if hdrReuse != nil { dataReuse = hdrReuse.Float32s() } switch { case incr: vecf32.IncrAdd(dataA, dataB, dataReuse) retVal = reuse case toReuse: copy(dataReuse, dataA) vecf32.Add(dataReuse, dataB) retVal = reuse case !safe: vecf32.Add(dataA, dataB) retVal = a default: ret := a.Clone().(headerer) vecf32.Add(ret.hdr().Float32s(), dataB) retVal = ret.(Tensor) } return } func (e Float32Engine) Inner(a, b Tensor) (retVal float32, err error) { var A, B []float32 var AD, BD *Dense var ok bool if AD, ok = a.(*Dense); !ok { return 0, errors.Errorf("a is not a *Dense") } if BD, ok = b.(*Dense); !ok { return 0, errors.Errorf("b is not a *Dense") } A = AD.Float32s() B = BD.Float32s() retVal = whichblas.Sdot(len(A), A, 1, B, 1) return } tensor-0.9.24/defaultenginefloat32_test.go000066400000000000000000000016351426512615100205230ustar00rootroot00000000000000package tensor import ( "testing" "testing/quick" ) func TestFloat32Engine_makeArray(t *testing.T) { // the uint16 is just to make sure that tests are correctly run. // we don't want the quicktest to randomly generate a size that is so large // that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers) // after the quick test. f := func(sz uint16) bool { size := int(sz) e := Float32Engine{StdEng{}} dt := Float32 arr := array{} e.makeArray(&arr, dt, size) if len(arr.Raw) != size*4 { t.Errorf("Expected raw to be size*4. Got %v instead", len(arr.Raw)) return false } v, ok := arr.Data().([]float32) if !ok { t.Errorf("Expected v to be []float32. Got %T instead", arr.Data()) return false } if len(v) != size { return false } return true } if err := quick.Check(f, nil); err != nil { t.Errorf("Quick test failed %v", err) } } tensor-0.9.24/defaultenginefloat64.go000066400000000000000000000143451426512615100174730ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" "gorgonia.org/tensor/internal/execution" "gorgonia.org/tensor/internal/storage" "gorgonia.org/vecf64" ) func handleFuncOptsF64(expShape Shape, o DataOrder, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr bool, err error) { fo := ParseFuncOpts(opts...) reuseT, incr := fo.IncrReuse() safe = fo.Safe() toReuse = reuseT != nil if toReuse { var ok bool if reuse, ok = reuseT.(DenseTensor); !ok { returnOpOpt(fo) err = errors.Errorf("Cannot reuse a different type of Tensor in a *Dense-Scalar operation. Reuse is of %T", reuseT) return } if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() { returnOpOpt(fo) err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape) err = errors.Wrapf(err, "Cannot use reuse: shape mismatch") return } if !incr && reuse != nil { reuse.setDataOrder(o) // err = reuse.reshape(expShape...) } } returnOpOpt(fo) return } func prepDataVSF64(a Tensor, b interface{}, reuse Tensor) (dataA *storage.Header, dataB float64, dataReuse *storage.Header, ait, iit Iterator, useIter bool, err error) { // get data dataA = a.hdr() switch bt := b.(type) { case float64: dataB = bt case *float64: dataB = *bt default: err = errors.Errorf("b is not a float64: %T", b) return } if reuse != nil { dataReuse = reuse.hdr() } if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) { ait = a.Iterator() if reuse != nil { iit = reuse.Iterator() } useIter = true } return } func (e Float64Engine) checkThree(a, b Tensor, reuse Tensor) error { if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } if !b.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, b) } if reuse != nil && !reuse.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, reuse) } if a.Dtype() != Float64 { return errors.Errorf("Expected a to be of Float64. Got %v instead", a.Dtype()) } if a.Dtype() != b.Dtype() || (reuse != nil && b.Dtype() != reuse.Dtype()) { return errors.Errorf("Expected a, b and reuse to have the same Dtype. Got %v, %v and %v instead", a.Dtype(), b.Dtype(), reuse.Dtype()) } return nil } func (e Float64Engine) checkTwo(a Tensor, reuse Tensor) error { if !a.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, a) } if reuse != nil && !reuse.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, reuse) } if a.Dtype() != Float64 { return errors.Errorf("Expected a to be of Float64. Got %v instead", a.Dtype()) } if reuse != nil && reuse.Dtype() != a.Dtype() { return errors.Errorf("Expected reuse to be the same as a. Got %v instead", reuse.Dtype()) } return nil } // Float64Engine is an execution engine that is optimized to only work with float64s. It assumes all data will are float64s. // // Use this engine only as form of optimization. You should probably be using the basic default engine for most cases. type Float64Engine struct { StdEng } // makeArray allocates a slice for the array func (e Float64Engine) makeArray(arr *array, t Dtype, size int) { if t != Float64 { panic("Float64Engine only creates float64s") } arr.Header.Raw = make([]byte, size*8) arr.t = t } func (e Float64Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) { reuse := y if err = e.checkThree(a, x, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, x, reuse); err != nil { return nil, errors.Wrap(err, "Float64Engine.FMA") } if useIter { err = execution.MulIterIncrF64(dataA.Float64s(), dataB.Float64s(), dataReuse.Float64s(), ait, bit, iit) retVal = reuse return } vecf64.IncrMul(dataA.Float64s(), dataB.Float64s(), dataReuse.Float64s()) retVal = reuse return } func (e Float64Engine) FMAScalar(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) { reuse := y if err = e.checkTwo(a, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var ait, iit Iterator var dataTensor, dataReuse *storage.Header var scalar float64 var useIter bool if dataTensor, scalar, dataReuse, ait, iit, useIter, err = prepDataVSF64(a, x, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "Float64Engine.FMAScalar") } if useIter { err = execution.MulIterIncrVSF64(dataTensor.Float64s(), scalar, dataReuse.Float64s(), ait, iit) retVal = reuse } execution.MulIncrVSF64(dataTensor.Float64s(), scalar, dataReuse.Float64s()) retVal = reuse return } // Add performs a + b elementwise. Both a and b must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (e Float64Engine) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) { if a.RequiresIterator() || b.RequiresIterator() { return e.StdEng.Add(a, b, opts...) } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, err = handleFuncOptsF64(a.Shape(), a.DataOrder(), opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } if err = e.checkThree(a, b, reuse); err != nil { return nil, errors.Wrap(err, "Failed checks") } var hdrA, hdrB, hdrReuse *storage.Header var dataA, dataB, dataReuse []float64 if hdrA, hdrB, hdrReuse, _, _, _, _, _, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "Float64Engine.Add") } dataA = hdrA.Float64s() dataB = hdrB.Float64s() if hdrReuse != nil { dataReuse = hdrReuse.Float64s() } switch { case incr: vecf64.IncrAdd(dataA, dataB, dataReuse) retVal = reuse case toReuse: copy(dataReuse, dataA) vecf64.Add(dataReuse, dataB) retVal = reuse case !safe: vecf64.Add(dataA, dataB) retVal = a default: ret := a.Clone().(headerer) vecf64.Add(ret.hdr().Float64s(), dataB) retVal = ret.(Tensor) } return } func (e Float64Engine) Inner(a, b Tensor) (retVal float64, err error) { var A, B []float64 var AD, BD *Dense var ok bool if AD, ok = a.(*Dense); !ok { return 0, errors.Errorf("a is not a *Dense") } if BD, ok = b.(*Dense); !ok { return 0, errors.Errorf("b is not a *Dense") } A = AD.Float64s() B = BD.Float64s() retVal = whichblas.Ddot(len(A), A, 1, B, 1) return } tensor-0.9.24/defaultenginefloat64_test.go000066400000000000000000000016351426512615100205300ustar00rootroot00000000000000package tensor import ( "testing" "testing/quick" ) func TestFloat64Engine_makeArray(t *testing.T) { // the uint16 is just to make sure that tests are correctly run. // we don't want the quicktest to randomly generate a size that is so large // that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers) // after the quick test. f := func(sz uint16) bool { size := int(sz) e := Float64Engine{StdEng{}} dt := Float64 arr := array{} e.makeArray(&arr, dt, size) if len(arr.Raw) != size*8 { t.Errorf("Expected raw to be size*8. Got %v instead", len(arr.Raw)) return false } v, ok := arr.Data().([]float64) if !ok { t.Errorf("Expected v to be []float32. Got %T instead", arr.Data()) return false } if len(v) != size { return false } return true } if err := quick.Check(f, nil); err != nil { t.Errorf("Quick test failed %v", err) } } tensor-0.9.24/dense.go000066400000000000000000000332321426512615100145530ustar00rootroot00000000000000package tensor import ( "fmt" "reflect" "unsafe" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) const ( maskCompEvery int = 8 ) // Dense represents a dense tensor - this is the most common form of tensors. It can be used to represent vectors, matrices.. etc type Dense struct { AP array flag MemoryFlag e Engine // execution engine for the *Dense oe standardEngine // optimized engine // backup AP. When a transpose is done, the old *AP is backed up here, for easy untransposes old AP transposeWith []int // if viewOf != nil, then this *Dense is a view. viewOf uintptr mask []bool // mask slice can be used to identify missing or invalid values. len(mask)<=len(v) maskIsSoft bool } // NewDense creates a new *Dense. It tries its best to get from the tensor pool. func NewDense(dt Dtype, shape Shape, opts ...ConsOpt) *Dense { return recycledDense(dt, shape, opts...) } func recycledDense(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) { retVal = recycledDenseNoFix(dt, shape, opts...) retVal.fix() if err := retVal.sanity(); err != nil { panic(err) } return } func recycledDenseNoFix(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) { // size := shape.TotalSize() //if shape.IsScalar() { // size = 1 //} retVal = borrowDense() retVal.array.t = dt retVal.AP.zeroWithDims(shape.Dims()) for _, opt := range opts { opt(retVal) } retVal.setShape(shape...) return } func (t *Dense) fromSlice(x interface{}) { t.array.Header.Raw = nil // GC anything else t.array.fromSlice(x) } func (t *Dense) addMask(mask []bool) { l := len(mask) if l > 0 && l != t.len() { panic("Mask is not same length as data") } t.mask = mask } func (t *Dense) makeArray(size int) { switch te := t.e.(type) { case NonStdEngine: t.flag = MakeMemoryFlag(t.flag, ManuallyManaged) case arrayMaker: te.makeArray(&t.array, t.t, size) return default: } memsize := calcMemSize(t.t, size) mem, err := t.e.Alloc(memsize) if err != nil { panic(err) } t.array.Raw = storage.FromMemory(mem.Uintptr(), uintptr(memsize)) return } // Info returns the access pattern which explains how the data in the underlying array is accessed. This is mostly used for debugging. func (t *Dense) Info() *AP { return &t.AP } // Dtype returns the data type of the *Dense tensor. func (t *Dense) Dtype() Dtype { return t.t } // Data returns the underlying array. If the *Dense represents a scalar value, the scalar value is returned instead func (t *Dense) Data() interface{} { if t.IsScalar() { return t.Get(0) } // build a type of []T shdr := reflect.SliceHeader{ Data: t.array.Uintptr(), Len: t.array.Len(), Cap: t.array.Cap(), } sliceT := reflect.SliceOf(t.t.Type) ptr := unsafe.Pointer(&shdr) val := reflect.Indirect(reflect.NewAt(sliceT, ptr)) return val.Interface() } // DataSize returns the size of the underlying array. Typically t.DataSize() == t.Shape().TotalSize() func (t *Dense) DataSize() int { if t.IsScalar() { return 0 // DOUBLE CHECK } return t.array.Len() } // Engine returns the execution engine associated with this Tensor func (t *Dense) Engine() Engine { return t.e } // Reshape reshapes a *Dense. If the tensors need to be materialized (either it's a view or transpose), it will be materialized before the reshape happens func (t *Dense) Reshape(dims ...int) error { if t.Shape().TotalSize() != Shape(dims).TotalSize() { return errors.Errorf("Cannot reshape %v into %v", t.Shape(), dims) } if t.viewOf != 0 && t.o.IsNotContiguous() { return errors.Errorf(methodNYI, "Reshape", "non-contiguous views") } if !t.old.IsZero() { t.Transpose() } return t.reshape(dims...) } func (t *Dense) reshape(dims ...int) error { t.setShape(dims...) return t.sanity() } func (t *Dense) unsqueeze(axis int) error { if axis > t.shape.Dims()+1 { return errors.Errorf("Cannot unsqueeze on axis %d when the tensor has shape %v", axis, t.shape) } t.shape = append(t.shape, 1) copy(t.shape[axis+1:], t.shape[axis:]) t.shape[axis] = 1 t.strides = append(t.strides, 1) copy(t.strides[axis+1:], t.strides[axis:]) return nil } // ScalarValue returns the scalar value of a *Tensor, // IF and ONLY IF it's a Tensor representation of a scalar value. // This is required because operations like a (vec · vec) would return a scalar value. // I didn't want to return interface{} for all the API methods, so the next best solution is to // wrap the scalar value in a *Tensor func (t *Dense) ScalarValue() interface{} { if !t.IsScalar() { panic(fmt.Sprintf("ScalarValue only works when the Tensor is a representation of a scalar value. The value of the tensor is %v", t)) } return t.Get(0) } // IsView indicates if the Tensor is a view of another (typically from slicing) func (t *Dense) IsView() bool { return t.viewOf != 0 } // IsMaterializeable indicates if the Tensor is materializable - if it has either gone through some transforms or slicing func (t *Dense) IsMaterializable() bool { return t.viewOf != 0 || !t.old.IsZero() } // IsManuallyManaged returns true if the memory associated with this *Dense is manually managed (by the user) func (t *Dense) IsManuallyManaged() bool { return t.flag.manuallyManaged() } // IsNativelyAccessible checks if the pointers are accessible by Go func (t *Dense) IsNativelyAccessible() bool { return t.flag.nativelyAccessible() } // Clone clones a *Dense. It creates a copy of the data, and the underlying array will be allocated func (t *Dense) Clone() interface{} { if t.e != nil { retVal := new(Dense) t.AP.CloneTo(&retVal.AP) retVal.t = t.t retVal.e = t.e retVal.oe = t.oe retVal.flag = t.flag retVal.makeArray(t.Len()) if !t.old.IsZero() { retVal.old = t.old.Clone() t.old.CloneTo(&retVal.old) } copyDense(retVal, t) retVal.lock() return retVal } panic("Unreachable: No engine") } // IsMasked indicates whether tensor is masked func (t *Dense) IsMasked() bool { return len(t.mask) == t.len() } // MaskFromDense adds a mask slice to tensor by XORing dense arguments' masks func (t *Dense) MaskFromDense(tts ...*Dense) { hasMask := BorrowBools(len(tts)) defer ReturnBools(hasMask) numMasked := 0 var masked = false for i, tt := range tts { if tt != nil { hasMask[i] = tt.IsMasked() masked = masked || hasMask[i] if hasMask[i] { numMasked++ } } } if numMasked < 1 { return } //Only make mask if none already. This way one of the tts can be t itself if len(t.mask) < t.DataSize() { t.makeMask() } for i, tt := range tts { if tt != nil { n := len(tt.mask) if hasMask[i] { for j := range t.mask { t.mask[j] = t.mask[j] || tt.mask[j%n] } } } } } // Private methods func (t *Dense) cap() int { return t.array.Cap() } func (t *Dense) len() int { return t.array.Len() } // exactly the same as DataSize func (t *Dense) arr() array { return t.array } func (t *Dense) arrPtr() *array { return &t.array } func (t *Dense) setShape(s ...int) { t.unlock() t.SetShape(s...) t.lock() return } func (t *Dense) setAP(ap *AP) { t.AP = *ap } func (t *Dense) fix() { if t.e == nil { t.e = StdEng{} } if oe, ok := t.e.(standardEngine); ok { t.oe = oe } switch { case t.IsScalar() && t.array.Header.Raw == nil: t.makeArray(1) case t.Shape() == nil && t.array.Header.Raw != nil: size := t.Len() if size == 1 { t.SetShape() // scalar } else { t.SetShape(size) // vector } case t.array.Header.Raw == nil && t.t != Dtype{}: size := t.Shape().TotalSize() t.makeArray(size) } if len(t.mask) != t.len() { t.mask = t.mask[:0] } t.lock() // don't put this in a defer - if t.array.Ptr == nil and t.Shape() == nil. then leave it unlocked } // makeMask adds a mask slice to tensor if required func (t *Dense) makeMask() { var size int size = t.shape.TotalSize() if len(t.mask) >= size { t.mask = t.mask[:size] } if cap(t.mask) < size { t.mask = make([]bool, size) } t.mask = t.mask[:size] memsetBools(t.mask, false) } // sanity is a function that sanity checks that a tensor is correct. func (t *Dense) sanity() error { if !t.AP.IsZero() && t.Shape() == nil && t.array.Header.Raw == nil { return errors.New(emptyTensor) } size := t.Len() expected := t.Size() if t.viewOf == 0 && size != expected && !t.IsScalar() { return errors.Wrap(errors.Errorf(shapeMismatch, t.Shape(), size), "sanity check failed") } // TODO: sanity check for views return nil } // isTransposed returns true if the *Dense holds a transposed array. func (t *Dense) isTransposed() bool { return t.old.IsZero() } // oshape returns the original shape func (t *Dense) oshape() Shape { if !t.old.IsZero() { return t.old.Shape() } return t.Shape() } // ostrides returns the original strides func (t *Dense) ostrides() []int { if !t.old.IsZero() { return t.old.Strides() } return t.Strides() } // ShallowClone clones the *Dense without making a copy of the underlying array func (t *Dense) ShallowClone() *Dense { retVal := borrowDense() retVal.e = t.e retVal.oe = t.oe t.AP.CloneTo(&retVal.AP) retVal.flag = t.flag retVal.array = t.array retVal.old = t.old retVal.transposeWith = t.transposeWith retVal.viewOf = t.viewOf retVal.mask = t.mask retVal.maskIsSoft = t.maskIsSoft return retVal } func (t *Dense) oldAP() *AP { return &t.old } func (t *Dense) setOldAP(ap *AP) { t.old = *ap } func (t *Dense) transposeAxes() []int { return t.transposeWith } //go:nocheckptr func (t *Dense) parentTensor() *Dense { if t.viewOf != 0 { return (*Dense)(unsafe.Pointer(t.viewOf)) } return nil } func (t *Dense) setParentTensor(d *Dense) { if d == nil { t.viewOf = 0 return } t.viewOf = uintptr(unsafe.Pointer(d)) } /* ------ Mask operations */ //ResetMask fills the mask with either false, or the provided boolean value func (t *Dense) ResetMask(val ...bool) error { if !t.IsMasked() { t.makeMask() } var fillValue = false if len(val) > 0 { fillValue = val[0] } memsetBools(t.mask, fillValue) return nil } // HardenMask forces the mask to hard. If mask is hard, then true mask values can not be unset func (t *Dense) HardenMask() bool { t.maskIsSoft = false return t.maskIsSoft } // SoftenMask forces the mask to soft func (t *Dense) SoftenMask() bool { t.maskIsSoft = true return t.maskIsSoft } // MaskFromSlice makes mask from supplied slice func (t *Dense) MaskFromSlice(x interface{}) { t.makeMask() n := len(t.mask) switch m := x.(type) { case []bool: copy(t.mask, m) return case []int: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []int8: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []int16: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []int32: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []int64: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []uint: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []byte: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []uint16: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []uint32: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []uint64: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []float32: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []float64: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []complex64: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []complex128: for i, v := range m { if v != 0 { t.mask[i] = true } if i >= n { return } } case []string: for i, v := range m { if v != "" { t.mask[i] = true } if i >= n { return } } default: return } } // Memset sets all the values in the *Dense tensor. func (t *Dense) Memset(x interface{}) error { if !t.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, t) } if t.IsMaterializable() { it := newFlatIterator(&t.AP) return t.array.memsetIter(x, it) } return t.array.Memset(x) } // Eq checks that any two things are equal. If the shapes are the same, but the strides are not the same, it's will still be considered the same func (t *Dense) Eq(other interface{}) bool { if ot, ok := other.(*Dense); ok { if ot == t { return true } if !t.Shape().Eq(ot.Shape()) { return false } return t.array.Eq(&ot.array) } return false } func (t *Dense) Zero() { if t.IsMaterializable() { it := newFlatIterator(&t.AP) if err := t.zeroIter(it); err != nil { panic(err) } } if t.IsMasked() { t.ResetMask() } t.array.Zero() } func (t *Dense) Mask() []bool { return t.mask } func (t *Dense) SetMask(mask []bool) { // if len(mask) != t.len() { // panic("Cannot set mask") // } t.mask = mask } func (t *Dense) slice(start, end int) { t.array = t.array.slice(start, end) } // RequiresIterator indicates if an iterator is required to read the data in *Dense in the correct fashion func (t *Dense) RequiresIterator() bool { if t.len() == 1 { return false } // non continuous slice, transpose, or masked. If it's a slice and contiguous, then iterator is not required if !t.o.IsContiguous() || !t.old.IsZero() || t.IsMasked() { return true } return false } func (t *Dense) Iterator() Iterator { return IteratorFromDense(t) } func (t *Dense) standardEngine() standardEngine { return t.oe } tensor-0.9.24/dense_apply_test.go000066400000000000000000000120161426512615100170140ustar00rootroot00000000000000package tensor import ( "math/rand" "testing" "testing/quick" "time" "unsafe" ) func getMutateVal(dt Dtype) interface{} { switch dt { case Int: return int(1) case Int8: return int8(1) case Int16: return int16(1) case Int32: return int32(1) case Int64: return int64(1) case Uint: return uint(1) case Uint8: return uint8(1) case Uint16: return uint16(1) case Uint32: return uint32(1) case Uint64: return uint64(1) case Float32: return float32(1) case Float64: return float64(1) case Complex64: var c complex64 = 1 return c case Complex128: var c complex128 = 1 return c case Bool: return true case String: return "Hello World" case Uintptr: return uintptr(0xdeadbeef) case UnsafePointer: return unsafe.Pointer(uintptr(0xdeadbeef)) } return nil } func getMutateFn(dt Dtype) interface{} { switch dt { case Int: return mutateI case Int8: return mutateI8 case Int16: return mutateI16 case Int32: return mutateI32 case Int64: return mutateI64 case Uint: return mutateU case Uint8: return mutateU8 case Uint16: return mutateU16 case Uint32: return mutateU32 case Uint64: return mutateU64 case Float32: return mutateF32 case Float64: return mutateF64 case Complex64: return mutateC64 case Complex128: return mutateC128 case Bool: return mutateB case String: return mutateStr case Uintptr: return mutateUintptr case UnsafePointer: return mutateUnsafePointer } return nil } func TestDense_Apply(t *testing.T) { var r *rand.Rand mut := func(q *Dense) bool { var mutVal interface{} if mutVal = getMutateVal(q.Dtype()); mutVal == nil { return true // we'll temporarily skip those we cannot mutate/get a mutation value } var fn interface{} if fn = getMutateFn(q.Dtype()); fn == nil { return true // we'll skip those that we cannot mutate } we, eqFail := willerr(q, nil, nil) _, ok := q.Engine().(Mapper) we = we || !ok a := q.Clone().(*Dense) correct := q.Clone().(*Dense) correct.Memset(mutVal) ret, err := a.Apply(fn) if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) { return false } // wrong fn type/illogical values if _, err = a.Apply(getMutateFn); err == nil { t.Error("Expected an error") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil { t.Errorf("Applying mutation function failed %v", err) } } func TestDense_Apply_unsafe(t *testing.T) { var r *rand.Rand mut := func(q *Dense) bool { var mutVal interface{} if mutVal = getMutateVal(q.Dtype()); mutVal == nil { return true // we'll temporarily skip those we cannot mutate/get a mutation value } var fn interface{} if fn = getMutateFn(q.Dtype()); fn == nil { return true // we'll skip those that we cannot mutate } we, eqFail := willerr(q, nil, nil) _, ok := q.Engine().(Mapper) we = we || !ok a := q.Clone().(*Dense) correct := q.Clone().(*Dense) correct.Memset(mutVal) ret, err := a.Apply(fn, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) { return false } if ret != a { t.Error("Expected ret == correct (Unsafe option was used)") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil { t.Errorf("Applying mutation function failed %v", err) } } func TestDense_Apply_reuse(t *testing.T) { var r *rand.Rand mut := func(q *Dense) bool { var mutVal interface{} if mutVal = getMutateVal(q.Dtype()); mutVal == nil { return true // we'll temporarily skip those we cannot mutate/get a mutation value } var fn interface{} if fn = getMutateFn(q.Dtype()); fn == nil { return true // we'll skip those that we cannot mutate } we, eqFail := willerr(q, nil, nil) _, ok := q.Engine().(Mapper) we = we || !ok a := q.Clone().(*Dense) reuse := q.Clone().(*Dense) reuse.Zero() correct := q.Clone().(*Dense) correct.Memset(mutVal) ret, err := a.Apply(fn, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) { return false } if ret != reuse { t.Error("Expected ret == correct (Unsafe option was used)") return false } return true } r = rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil { t.Errorf("Applying mutation function failed %v", err) } } tensor-0.9.24/dense_argmethods.go000066400000000000000000000022251426512615100167660ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" /* Argmax */ // Argmax finds the index of the max value along the axis provided func (t *Dense) Argmax(axis int) (retVal *Dense, err error) { e := t.e switch am := e.(type) { case denseArgmaxer: return am.argmaxDenseTensor(t, axis) case Argmaxer: var ret Tensor var ok bool if ret, err = am.Argmax(t, axis); err != nil { return nil, errors.Wrapf(err, opFail, "Argmax") } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(extractionFail, "*Dense", ret) } return } return nil, errors.New("Engine does not suport Argmax") } /* Argmin */ // Argmin finds the index of the min value along the axis provided func (t *Dense) Argmin(axis int) (retVal *Dense, err error) { e := t.e switch am := e.(type) { case denseArgminer: return am.argminDenseTensor(t, axis) case Argminer: var ret Tensor var ok bool if ret, err = am.Argmin(t, axis); err != nil { return nil, errors.Wrapf(err, opFail, "Argmax") } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(extractionFail, "*Dense", ret) } return } return nil, errors.New("Engine does not suport Argmax") } tensor-0.9.24/dense_argmethods_test.go000066400000000000000000001324271426512615100200350ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "math" "testing" "github.com/chewxy/math32" "github.com/stretchr/testify/assert" ) /* Test data */ var basicDenseI = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseI8 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int8{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseI16 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int16{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseI32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseI64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseU = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseU8 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint8{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseU16 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint16{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseU32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseU64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseF32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]float32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var basicDenseF64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]float64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3})) var argmaxCorrect = []struct { shape Shape data []int }{ {Shape{3, 4, 5, 2}, []int{ 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, }}, {Shape{2, 4, 5, 2}, []int{ 1, 0, 1, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 2, 2, 0, 1, 1, 2, 2, 1, 0, 2, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 1, 0, 1, 2, 1, 0, 1, 1, 2, 0, 1, 0, 0, 0, 0, 2, 1, 0, 1, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 2, 0, }}, {Shape{2, 3, 5, 2}, []int{ 3, 2, 2, 1, 1, 2, 1, 0, 0, 1, 3, 2, 1, 0, 1, 0, 2, 2, 3, 0, 1, 0, 1, 3, 0, 2, 3, 3, 2, 1, 2, 2, 0, 0, 1, 3, 2, 0, 1, 2, 0, 3, 0, 1, 0, 1, 3, 2, 2, 1, 2, 1, 3, 1, 2, 0, 2, 2, 0, 0, }}, {Shape{2, 3, 4, 2}, []int{ 4, 3, 2, 1, 1, 2, 0, 1, 1, 1, 1, 3, 1, 0, 0, 2, 2, 1, 0, 4, 2, 2, 3, 1, 1, 1, 0, 2, 0, 0, 2, 2, 1, 4, 0, 1, 4, 1, 1, 0, 4, 3, 1, 1, 2, 3, 1, 1, }}, {Shape{2, 3, 4, 5}, []int{ 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, }}, } var argminCorrect = []struct { shape Shape data []int }{ {Shape{3, 4, 5, 2}, []int{ 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, }}, {Shape{2, 4, 5, 2}, []int{ 2, 1, 0, 0, 1, 2, 1, 2, 1, 2, 1, 0, 0, 2, 1, 0, 1, 2, 0, 1, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 2, 1, 0, 1, 0, 2, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 2, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 2, 2, 0, 0, 1, 0, 2, 2, 0, 0, 0, 1, 2, 2, 2, 2, 1, 1, }}, {Shape{2, 3, 5, 2}, []int{ 0, 1, 0, 2, 2, 1, 3, 2, 3, 2, 1, 0, 3, 3, 0, 1, 0, 3, 0, 2, 0, 1, 0, 1, 3, 0, 2, 1, 0, 0, 3, 1, 3, 1, 2, 2, 1, 2, 0, 1, 3, 0, 1, 0, 1, 0, 2, 1, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 1, }}, {Shape{2, 3, 4, 2}, []int{ 1, 0, 0, 0, 2, 3, 4, 0, 3, 0, 3, 0, 4, 4, 3, 1, 0, 2, 3, 0, 3, 0, 0, 2, 4, 4, 3, 4, 2, 3, 0, 0, 4, 0, 1, 3, 3, 2, 0, 4, 2, 1, 4, 2, 4, 0, 2, 0, }}, {Shape{2, 3, 4, 5}, []int{ 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, }}, } func TestDense_Argmax_I(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseI.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseI.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_I(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseI.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseI.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_I8(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseI8.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseI8.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_I8(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseI8.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseI8.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_I16(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseI16.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseI16.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_I16(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseI16.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseI16.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_I32(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseI32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseI32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_I32(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseI32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseI32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_I64(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseI64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseI64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_I64(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseI64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseI64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_U(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseU.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseU.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_U(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseU.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseU.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_U8(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseU8.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseU8.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_U8(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseU8.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseU8.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_U16(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseU16.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseU16.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_U16(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseU16.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseU16.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_U32(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseU32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseU32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_U32(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseU32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseU32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_U64(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseU64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // with different engine T = basicDenseU64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_U64(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseU64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // with different engine T = basicDenseU64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_F32(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseF32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // test with NaN T = New(WithShape(4), WithBacking([]float32{1, 2, math32.NaN(), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(2, argmax.ScalarValue(), "NaN test") // test with Mask and Nan T = New(WithShape(4), WithBacking([]float32{1, 9, math32.NaN(), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked NaN test") // test with +Inf T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(1), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with +Inf: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(2, argmax.ScalarValue(), "+Inf test") // test with Mask and +Inf T = New(WithShape(4), WithBacking([]float32{1, 9, math32.Inf(1), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked NaN test") // test with -Inf T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(-1), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with -Inf: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "+Inf test") // test with Mask and -Inf T = New(WithShape(4), WithBacking([]float32{1, 9, math32.Inf(-1), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked -Inf test") // with different engine T = basicDenseF32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_F32(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseF32.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // test with NaN T = New(WithShape(4), WithBacking([]float32{1, 2, math32.NaN(), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(2, argmin.ScalarValue(), "NaN test") // test with Mask and Nan T = New(WithShape(4), WithBacking([]float32{1, -9, math32.NaN(), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked NaN test") // test with +Inf T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(1), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with +Inf: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "+Inf test") // test with Mask and +Inf T = New(WithShape(4), WithBacking([]float32{1, -9, math32.Inf(1), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked NaN test") // test with -Inf T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(-1), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with -Inf: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(2, argmin.ScalarValue(), "+Inf test") // test with Mask and -Inf T = New(WithShape(4), WithBacking([]float32{1, -9, math32.Inf(-1), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked -Inf test") // with different engine T = basicDenseF32.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } func TestDense_Argmax_F64(t *testing.T) { assert := assert.New(t) var T, argmax *Dense var err error T = basicDenseF64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // test all axes if argmax, err = T.Argmax(AllAxes); err != nil { t.Error(err) return } assert.True(argmax.IsScalar()) assert.Equal(7, argmax.ScalarValue()) // test with NaN T = New(WithShape(4), WithBacking([]float64{1, 2, math.NaN(), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(2, argmax.ScalarValue(), "NaN test") // test with Mask and Nan T = New(WithShape(4), WithBacking([]float64{1, 9, math.NaN(), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked NaN test") // test with +Inf T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(1), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with +Inf: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(2, argmax.ScalarValue(), "+Inf test") // test with Mask and +Inf T = New(WithShape(4), WithBacking([]float64{1, 9, math.Inf(1), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked NaN test") // test with -Inf T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(-1), 4})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with -Inf: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "+Inf test") // test with Mask and -Inf T = New(WithShape(4), WithBacking([]float64{1, 9, math.Inf(-1), 4}, []bool{false, true, true, false})) if argmax, err = T.Argmax(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmax.IsScalar()) assert.Equal(3, argmax.ScalarValue(), "Masked -Inf test") // with different engine T = basicDenseF64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmax, err = T.Argmax(i); err != nil { t.Error(err) continue } assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape) assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i) } // idiotsville _, err = T.Argmax(10000) assert.NotNil(err) } func TestDense_Argmin_F64(t *testing.T) { assert := assert.New(t) var T, argmin *Dense var err error T = basicDenseF64.Clone().(*Dense) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // test all axes if argmin, err = T.Argmin(AllAxes); err != nil { t.Error(err) return } assert.True(argmin.IsScalar()) assert.Equal(11, argmin.ScalarValue()) // test with NaN T = New(WithShape(4), WithBacking([]float64{1, 2, math.NaN(), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(2, argmin.ScalarValue(), "NaN test") // test with Mask and Nan T = New(WithShape(4), WithBacking([]float64{1, -9, math.NaN(), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked NaN test") // test with +Inf T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(1), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with +Inf: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "+Inf test") // test with Mask and +Inf T = New(WithShape(4), WithBacking([]float64{1, -9, math.Inf(1), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked NaN test") // test with -Inf T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(-1), 4})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with -Inf: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(2, argmin.ScalarValue(), "+Inf test") // test with Mask and -Inf T = New(WithShape(4), WithBacking([]float64{1, -9, math.Inf(-1), 4}, []bool{false, true, true, false})) if argmin, err = T.Argmin(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True(argmin.IsScalar()) assert.Equal(0, argmin.ScalarValue(), "Masked -Inf test") // with different engine T = basicDenseF64.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i := 0; i < T.Dims(); i++ { if argmin, err = T.Argmin(i); err != nil { t.Error(err) continue } assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape) assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i) } // idiotsville _, err = T.Argmin(10000) assert.NotNil(err) } tensor-0.9.24/dense_arith.go000066400000000000000000000254011426512615100157410ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import "github.com/pkg/errors" // Add performs t + other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Add(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Add(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Add()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Add") } return } if adder, ok := t.e.(Adder); ok { if ret, err = adder.Add(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Add()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Add") } return } return nil, errors.Errorf("Engine does not support Add()") } // Sub performs t - other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Sub(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Sub(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Sub()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Sub") } return } if suber, ok := t.e.(Suber); ok { if ret, err = suber.Sub(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Sub()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Sub") } return } return nil, errors.Errorf("Engine does not support Sub()") } // Mul performs t × other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Mul(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Mul(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Mul()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Mul") } return } if muler, ok := t.e.(Muler); ok { if ret, err = muler.Mul(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Mul()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Mul") } return } return nil, errors.Errorf("Engine does not support Mul()") } // Div performs t ÷ other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Div(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Div(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Div()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Div") } return } if diver, ok := t.e.(Diver); ok { if ret, err = diver.Div(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Div()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Div") } return } return nil, errors.Errorf("Engine does not support Div()") } // Pow performs t ^ other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Pow(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Pow(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Pow()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Pow") } return } if power, ok := t.e.(Power); ok { if ret, err = power.Pow(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Pow()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Pow") } return } return nil, errors.Errorf("Engine does not support Pow()") } // Mod performs t % other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) Mod(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Mod(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Mod()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Mod") } return } if moder, ok := t.e.(Moder); ok { if ret, err = moder.Mod(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Mod()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Mod") } return } return nil, errors.Errorf("Engine does not support Mod()") } // AddScalar performs t + other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) AddScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.AddScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do AddScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "AddScalar") } return } if adder, ok := t.e.(Adder); ok { if ret, err = adder.AddScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do AddScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "AddScalar") } return } return nil, errors.Errorf("Engine does not support AddScalar()") } // SubScalar performs t - other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) SubScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.SubScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do SubScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "SubScalar") } return } if suber, ok := t.e.(Suber); ok { if ret, err = suber.SubScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do SubScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "SubScalar") } return } return nil, errors.Errorf("Engine does not support SubScalar()") } // MulScalar performs t × other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) MulScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.MulScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do MulScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "MulScalar") } return } if muler, ok := t.e.(Muler); ok { if ret, err = muler.MulScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do MulScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "MulScalar") } return } return nil, errors.Errorf("Engine does not support MulScalar()") } // DivScalar performs t ÷ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) DivScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.DivScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do DivScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "DivScalar") } return } if diver, ok := t.e.(Diver); ok { if ret, err = diver.DivScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do DivScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "DivScalar") } return } return nil, errors.Errorf("Engine does not support DivScalar()") } // PowScalar performs t ^ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) PowScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.PowScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do PowScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "PowScalar") } return } if power, ok := t.e.(Power); ok { if ret, err = power.PowScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do PowScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "PowScalar") } return } return nil, errors.Errorf("Engine does not support PowScalar()") } // ModScalar performs t % other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other. // Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T) func (t *Dense) ModScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.ModScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do ModScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "ModScalar") } return } if moder, ok := t.e.(Moder); ok { if ret, err = moder.ModScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do ModScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "ModScalar") } return } return nil, errors.Errorf("Engine does not support ModScalar()") } tensor-0.9.24/dense_arith_test.go000066400000000000000000001523351426512615100170070ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" "testing/quick" ) func TestDense_Add(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := a.Add(b) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestDense_Sub(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := a.Sub(b) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Add(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestDense_Mul(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := a.Mul(b) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDense_Div(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := a.Div(b) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Mul(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestDense_Pow(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := a.Pow(b) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestDense_Add_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := a.Add(b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestDense_Sub_unsafe(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := a.Sub(b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Add(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestDense_Mul_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := a.Mul(b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDense_Div_unsafe(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := a.Div(b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Mul(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestDense_Pow_unsafe(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := a.Pow(b, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestDense_Add_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := a.Add(b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Add(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Add(b, WithReuse(a)) reuse = a } else { ret, err = a.Add(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Add failed: %v", err) } } func TestDense_Sub_reuse(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := a.Sub(b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Add(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Sub(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Sub(b, WithReuse(a)) reuse = a } else { ret, err = a.Sub(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Sub failed: %v", err) } } func TestDense_Mul_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := a.Mul(b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Mul(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Mul(b, WithReuse(a)) reuse = a } else { ret, err = a.Mul(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Mul failed: %v", err) } } func TestDense_Div_reuse(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := a.Div(b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Mul(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Div(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Div(b, WithReuse(a)) reuse = a } else { ret, err = a.Div(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Div failed: %v", err) } } func TestDense_Pow_reuse(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := a.Pow(b, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestDense_Add_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok ret, err := a.Add(b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add failed: %v", err) } } func TestDense_Sub_incr(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Suber) we = we || !ok ret, err := a.Sub(b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Add(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub failed: %v", err) } } func TestDense_Mul_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok ret, err := a.Mul(b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul failed: %v", err) } } func TestDense_Div_incr(t *testing.T) { inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok ret, err := a.Div(b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.Mul(b, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div failed: %v", err) } } func TestDense_Pow_incr(t *testing.T) { iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) b.Memset(identityVal(1, a.t)) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := a.Engine().(Power) we = we || !ok ret, err := a.Pow(b, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow failed: %v", err) } } func TestDense_AddScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, true) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, false) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Add(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Add (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Add(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Add (tensor as right, scalar as left) failed: %v", err) } } func TestDense_SubScalar(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, true) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.AddScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, false) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.SubScalar(b, false, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Sub(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Sub (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Sub(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Sub (tensor as right, scalar as left) failed: %v", err) } } func TestDense_MulScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, true) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, false) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Mul(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Mul (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Mul(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Mul (tensor as right, scalar as left) failed: %v", err) } } func TestDense_DivScalar(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := a.DivScalar(b, true) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.MulScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Div(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Div (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Div(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Div (tensor as right, scalar as left) failed: %v", err) } } func TestDense_PowScalar(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := a.PowScalar(b, true) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } type Foo int wt1 := func(a *Dense) bool { b := Foo(0) ret, err := Pow(a, b) if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Pow (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool { b := Foo(0) ret, err := Pow(b, a) if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for Pow (tensor as right, scalar as left) failed: %v", err) } } func TestDense_AddScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, true, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, false, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } } func TestDense_SubScalar_unsafe(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, true, UseUnsafe()) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.AddScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, false, UseUnsafe()) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.SubScalar(b, false, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } } func TestDense_MulScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, true, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, false, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } } func TestDense_DivScalar_unsafe(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := a.DivScalar(b, true, UseUnsafe()) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.MulScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } } func TestDense_PowScalar_unsafe(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := a.PowScalar(b, true, UseUnsafe()) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if ret != a { t.Errorf("Expected ret to be the same as a") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } func TestDense_AddScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, true, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, false, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Add(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Adder) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Add(b, WithReuse(a)) reuse = a } else { ret, err = a.Add(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Add failed: %v", err) } } func TestDense_SubScalar_reuse(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, true, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.AddScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, false, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.SubScalar(b, false, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Sub(b) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := a.Engine().(Suber) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Sub(b, WithReuse(a)) reuse = a } else { ret, err = a.Sub(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Sub failed: %v", err) } } func TestDense_MulScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, true, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, false, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Mul(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Muler) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Mul(b, WithReuse(a)) reuse = a } else { ret, err = a.Mul(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Mul failed: %v", err) } } func TestDense_DivScalar_reuse(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := a.DivScalar(b, true, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.MulScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype() { return true } if !a.Shape().Eq(b.Shape()) { return true } correct, err := a.Div(b) we, willFailEq := willerr(a, numberTypes, nil) _, ok := a.Engine().(Diver) we = we || !ok var ret, reuse *Dense if reuseA { ret, err = a.Div(b, WithReuse(a)) reuse = a } else { ret, err = a.Div(b, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for Div failed: %v", err) } } func TestDense_PowScalar_reuse(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := a.PowScalar(b, true, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } func TestDense_AddScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, true, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Adder) we = we || !ok ret, err := a.AddScalar(b, false, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err) } } func TestDense_SubScalar_incr(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(0, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok ret, err := a.SubScalar(b, true, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.AddScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err) } } func TestDense_MulScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, true, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err) } iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Muler) we = we || !ok ret, err := a.MulScalar(b, false, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err) } } func TestDense_DivScalar_incr(t *testing.T) { inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, numberTypes, nil) _, ok := q.Engine().(Diver) we = we || !ok ret, err := a.DivScalar(b, true, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly { if err != nil { return false } return true } ret, err = ret.MulScalar(b, true, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err) } } func TestDense_PowScalar_incr(t *testing.T) { iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal(1, q.t) incr := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) we, willFailEq := willerr(a, floatcmplxTypes, complexTypes) _, ok := q.Engine().(Power) we = we || !ok ret, err := a.PowScalar(b, true, WithIncr(incr)) if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly { if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err) } } tensor-0.9.24/dense_assign.go000066400000000000000000000040741426512615100161210ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) func overlaps(a, b DenseTensor) bool { if a.cap() == 0 || b.cap() == 0 { return false } aarr := a.arr() barr := b.arr() if aarr.Uintptr() == barr.Uintptr() { return true } aptr := aarr.Uintptr() bptr := barr.Uintptr() capA := aptr + uintptr(cap(aarr.Header.Raw)) capB := bptr + uintptr(cap(barr.Header.Raw)) switch { case aptr < bptr: if bptr < capA { return true } case aptr > bptr: if aptr < capB { return true } } return false } func assignArray(dest, src DenseTensor) (err error) { // var copiedSrc bool if src.IsScalar() { panic("HELP") } dd := dest.Dims() sd := src.Dims() dstrides := dest.Strides() sstrides := src.Strides() var ds, ss int ds = dstrides[0] if src.IsVector() { ss = sstrides[0] } else { ss = sstrides[sd-1] } // when dd == 1, and the strides point in the same direction // we copy to a temporary if there is an overlap of data if ((dd == 1 && sd >= 1 && ds*ss < 0) || dd > 1) && overlaps(dest, src) { // create temp // copiedSrc = true } // broadcast src to dest for raw iteration tmpShape := Shape(BorrowInts(sd)) tmpStrides := BorrowInts(len(src.Strides())) copy(tmpShape, src.Shape()) copy(tmpStrides, src.Strides()) defer ReturnInts(tmpShape) defer ReturnInts(tmpStrides) if sd > dd { tmpDim := sd for tmpDim > dd && tmpShape[0] == 1 { tmpDim-- // this is better than tmpShape = tmpShape[1:] // because we are going to return these ints later copy(tmpShape, tmpShape[1:]) copy(tmpStrides, tmpStrides[1:]) } } var newStrides []int if newStrides, err = BroadcastStrides(dest.Shape(), tmpShape, dstrides, tmpStrides); err != nil { err = errors.Wrapf(err, "BroadcastStrides failed") return } dap := dest.Info() sap := MakeAP(tmpShape, newStrides, src.Info().o, src.Info().Δ) diter := newFlatIterator(dap) siter := newFlatIterator(&sap) _, err = copyDenseIter(dest, src, diter, siter) sap.zeroOnly() // cleanup, but not entirely because tmpShape and tmpStrides are separately cleaned up. Don't double free return } tensor-0.9.24/dense_cmp.go000066400000000000000000000306271426512615100154170ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import "github.com/pkg/errors" // Gt performs t > other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) Gt(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Gt(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Gt()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Gt") } return } if gter, ok := t.e.(Gter); ok { if ret, err = gter.Gt(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Gt()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Gt") } return } return nil, errors.Errorf("Engine does not support Gt()") } // Gte performs t ≥ other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) Gte(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Gte(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Gte()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Gte") } return } if gteer, ok := t.e.(Gteer); ok { if ret, err = gteer.Gte(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Gte()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Gte") } return } return nil, errors.Errorf("Engine does not support Gte()") } // Lt performs t < other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) Lt(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Lt(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Lt()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Lt") } return } if lter, ok := t.e.(Lter); ok { if ret, err = lter.Lt(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Lt()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Lt") } return } return nil, errors.Errorf("Engine does not support Lt()") } // Lte performs t ≤ other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) Lte(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.Lte(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Lte()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Lte") } return } if lteer, ok := t.e.(Lteer); ok { if ret, err = lteer.Lte(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Lte()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Lte") } return } return nil, errors.Errorf("Engine does not support Lte()") } // ElEq performs t == other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) ElEq(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.ElEq(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Eq()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Eq") } return } if eleqer, ok := t.e.(ElEqer); ok { if ret, err = eleqer.ElEq(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Eq()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Eq") } return } return nil, errors.Errorf("Engine does not support Eq()") } // ElNe performs t ≠ other elementwise. Both t and other must have the same shape. // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). //UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) ElNe(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.ElNe(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Ne()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Ne") } return } if eleqer, ok := t.e.(ElEqer); ok { if ret, err = eleqer.ElNe(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do Ne()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Ne") } return } return nil, errors.Errorf("Engine does not support Ne()") } // GtScalar performs t > other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) GtScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.GtScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do GtScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "GtScalar") } return } if gter, ok := t.e.(Gter); ok { if ret, err = gter.GtScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do GtScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "GtScalar") } return } return nil, errors.Errorf("Engine does not support GtScalar()") } // GteScalar performs t ≥ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) GteScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.GteScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do GteScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "GteScalar") } return } if gteer, ok := t.e.(Gteer); ok { if ret, err = gteer.GteScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do GteScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "GteScalar") } return } return nil, errors.Errorf("Engine does not support GteScalar()") } // LtScalar performs t < other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) LtScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.LtScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do LtScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "LtScalar") } return } if lter, ok := t.e.(Lter); ok { if ret, err = lter.LtScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do LtScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "LtScalar") } return } return nil, errors.Errorf("Engine does not support LtScalar()") } // LteScalar performs t ≤ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) LteScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.LteScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do LteScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "LteScalar") } return } if lteer, ok := t.e.(Lteer); ok { if ret, err = lteer.LteScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do LteScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "LteScalar") } return } return nil, errors.Errorf("Engine does not support LteScalar()") } // EqScalar performs t == other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) ElEqScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.EqScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do EqScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "EqScalar") } return } if eleqer, ok := t.e.(ElEqer); ok { if ret, err = eleqer.EqScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do EqScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "EqScalar") } return } return nil, errors.Errorf("Engine does not support EqScalar()") } // NeScalar performs t ≠ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other // Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse(). // UseUnsafe() will ensure that the same type is returned. // Tensors used in WithReuse has to have the same Dtype as the return value's Dtype. func (t *Dense) ElNeScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) { var ret Tensor if t.oe != nil { if ret, err = t.oe.NeScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do NeScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "NeScalar") } return } if eleqer, ok := t.e.(ElEqer); ok { if ret, err = eleqer.NeScalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do NeScalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "NeScalar") } return } return nil, errors.Errorf("Engine does not support NeScalar()") } tensor-0.9.24/dense_cmp_test.go000066400000000000000000001044151426512615100164530ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "reflect" "testing" "testing/quick" ) func TestDense_Gt(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Gt(b) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Gt(c) if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gt(c) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestDense_Gte(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Gte(b) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Gte(c) if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gte(c) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestDense_Lt(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Lt(b) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Lt(c) if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lt(c) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestDense_Lte(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Lte(b) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Lte(c) if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lte(c) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestDense_ElEq(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.ElEq(b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.ElEq(c) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.ElEq(c) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := a.ElEq(b) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := b.ElEq(a) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } } func TestDense_ElNe(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := a.ElNe(b) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := b.ElNe(a) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElNe failed: %v", err) } } func TestDense_Gt_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Gt(b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Gt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestDense_Gte_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Gte(b, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Gte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestDense_Lt_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Lt(b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Lt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestDense_Lte_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.Lte(b, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.Lte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestDense_ElEq_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) axb, err := a.ElEq(b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := b.ElEq(c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly { if err != nil { return false } return true } axc, err := a.ElEq(c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := a.ElEq(b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := b.ElEq(a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } } func TestDense_ElNe_assame(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) axb, err := a.ElNe(b, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := b.ElNe(a, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElNe failed: %v", err) } } func TestDense_GtScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.GtScalar(b, true) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.GtScalar(b, false) if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gt(c) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestDense_GteScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.GteScalar(b, true) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.GteScalar(b, false) if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gte(c) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestDense_LtScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.LtScalar(b, true) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.LtScalar(b, false) if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lt(c) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestDense_LteScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, ordTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.LteScalar(b, true) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.LteScalar(b, false) if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lte(c) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestDense_ElEqScalar(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.ElEqScalar(b, true) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.ElEqScalar(b, false) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.ElEq(c) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() for i, vab := range ab { if vab && bc[i] { if !ac[i] { return false } } } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := a.ElEqScalar(b, true) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := a.ElEqScalar(b, false) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElEq failed: %v", err) } } func TestDense_ElNeScalar(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, eqTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := a.ElNeScalar(b, true) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := a.ElNeScalar(b, false) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElNe failed: %v", err) } } func TestDense_GtScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.GtScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.GtScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gt failed: %v", err) } } func TestDense_GteScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Gteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.GteScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.GteScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Gte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Gte failed: %v", err) } } func TestDense_LtScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lter) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.LtScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.LtScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lt(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lt failed: %v", err) } } func TestDense_LteScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(Lteer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.LteScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.LteScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.Lte(c, AsSameType()) if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for Lte failed: %v", err) } } func TestDense_ElEqScalar_assame(t *testing.T) { transFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) axb, err := a.ElEqScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxc, err := c.ElEqScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly { if err != nil { return false } return true } axc, err := a.ElEq(c, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly { if err != nil { return false } return true } if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) { t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for ElEq failed: %v", err) } symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := a.ElEqScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := a.ElEqScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElEq failed: %v", err) } } func TestDense_ElNeScalar_assame(t *testing.T) { symFn := func(q *Dense) bool { we, _ := willerr(q, nonComplexNumberTypes, nil) _, ok := q.Engine().(ElEqer) we = we || !ok if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil { return true // we exit early if the generated type is not something we can handle } r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() axb, err := a.ElNeScalar(b, true, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly { if err != nil { return false } return true } bxa, err := a.ElNeScalar(b, false, AsSameType()) if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly { if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for ElNe failed: %v", err) } } tensor-0.9.24/dense_colmajor_linalg_test.go000066400000000000000000000540671426512615100210370ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) var colMajorTraceTests = []struct { data interface{} correct interface{} err bool }{ {[]int{0, 1, 2, 3, 4, 5}, int(4), false}, {[]int8{0, 1, 2, 3, 4, 5}, int8(4), false}, {[]int16{0, 1, 2, 3, 4, 5}, int16(4), false}, {[]int32{0, 1, 2, 3, 4, 5}, int32(4), false}, {[]int64{0, 1, 2, 3, 4, 5}, int64(4), false}, {[]uint{0, 1, 2, 3, 4, 5}, uint(4), false}, {[]uint8{0, 1, 2, 3, 4, 5}, uint8(4), false}, {[]uint16{0, 1, 2, 3, 4, 5}, uint16(4), false}, {[]uint32{0, 1, 2, 3, 4, 5}, uint32(4), false}, {[]uint64{0, 1, 2, 3, 4, 5}, uint64(4), false}, {[]float32{0, 1, 2, 3, 4, 5}, float32(4), false}, {[]float64{0, 1, 2, 3, 4, 5}, float64(4), false}, {[]complex64{0, 1, 2, 3, 4, 5}, complex64(4), false}, {[]complex128{0, 1, 2, 3, 4, 5}, complex128(4), false}, {[]bool{true, false, true, false, true, false}, nil, true}, } func TestColMajor_Dense_Trace(t *testing.T) { assert := assert.New(t) for i, tts := range colMajorTraceTests { T := New(WithShape(2, 3), AsFortran(tts.data)) trace, err := T.Trace() if checkErr(t, tts.err, err, "Trace", i) { continue } assert.Equal(tts.correct, trace) // T = New(WithBacking(tts.data)) _, err = T.Trace() if err == nil { t.Error("Expected an error when Trace() on non-matrices") } } } var colMajorInnerTests = []struct { a, b interface{} shapeA, shapeB Shape correct interface{} err bool }{ {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3, 1}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3, 1}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{1, 3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{1, 3}, float64(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3, 1}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3, 1}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{1, 3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{1, 3}, float32(5), false}, // stupids: type differences {Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float32, 0, 3), Range(Byte, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true}, // differing size {Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{4}, Shape{3}, nil, true}, // A is not a matrix {Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{2, 2}, Shape{3}, nil, true}, } func TestColMajor_Dense_Inner(t *testing.T) { for i, its := range colMajorInnerTests { a := New(WithShape(its.shapeA...), AsFortran(its.a)) b := New(WithShape(its.shapeB...), AsFortran(its.b)) T, err := a.Inner(b) if checkErr(t, its.err, err, "Inner", i) { continue } assert.Equal(t, its.correct, T) } } var colMajorMatVecMulTests = []linalgTest{ // Float64s {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, // float64s with transposed matrix {Range(Float64, 0, 6), Range(Float64, 0, 2), Shape{2, 3}, Shape{2}, true, false, Range(Float64, 52, 55), Range(Float64, 100, 103), Shape{3}, Shape{3}, []float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{3}, false, false, false}, // Float32s {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, // stupids : unpossible shapes (wrong A) {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{6}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad A shape {Range(Float64, 0, 8), Range(Float64, 0, 3), Shape{4, 2}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad B shape {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad reuse {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 55), Range(Float64, 100, 102), Shape{3}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true}, //stupids: bad incr shape {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 105), Shape{2}, Shape{5}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, // stupids: type mismatch A and B {Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B (non-Float) {Range(Float64, 0, 6), Range(Int, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch, reuse {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float32, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true}, // stupids: type mismatch, incr {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float32, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, // stupids: type mismatch, incr not a Number {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), []bool{true, true, true}, Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, } func TestColMajor_Dense_MatVecMul(t *testing.T) { assert := assert.New(t) for i, mvmt := range colMajorMatVecMulTests { a := New(WithShape(mvmt.shapeA...), AsFortran(mvmt.a)) b := New(WithShape(mvmt.shapeB...), AsFortran(mvmt.b)) if mvmt.transA { if err := a.T(); err != nil { t.Error(err) continue } } T, err := a.MatVecMul(b) if checkErr(t, mvmt.err, err, "Safe", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(mvmt.correct, T.Data()) // incr incr := New(WithShape(mvmt.shapeI...), AsFortran(mvmt.incr)) T, err = a.MatVecMul(b, WithIncr(incr)) if checkErr(t, mvmt.errIncr, err, "WithIncr", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(mvmt.correctIncr, T.Data()) // reuse reuse := New(WithShape(mvmt.shapeR...), AsFortran(mvmt.reuse)) T, err = a.MatVecMul(b, WithReuse(reuse)) if checkErr(t, mvmt.errReuse, err, "WithReuse", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(mvmt.correct, T.Data()) // reuse AND incr T, err = a.MatVecMul(b, WithIncr(incr), WithReuse(reuse)) if checkErr(t, mvmt.err, err, "WithReuse and WithIncr", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(mvmt.correctIncrReuse, T.Data()) } } var colMajorMatMulTests = []linalgTest{ // Float64s {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, false}, // Float32s {Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float32, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float32{10, 28, 13, 40}, []float32{110, 130, 114, 143}, []float32{120, 158, 127, 183}, Shape{2, 2}, false, false, false}, // Edge cases - Row Vecs (Float64) {Range(Float64, 0, 2), Range(Float64, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false, Range(Float64, 10, 16), Range(Float64, 100, 106), Shape{2, 3}, Shape{2, 3}, []float64{0, 0, 0, 1, 0, 2}, []float64{100, 103, 101, 105, 102, 107}, []float64{100, 103, 101, 106, 102, 109}, Shape{2, 3}, false, false, false}, {Range(Float64, 0, 2), Range(Float64, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false, Range(Float64, 10, 13), Range(Float64, 100, 103), Shape{1, 3}, Shape{1, 3}, []float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{1, 3}, false, false, false}, {Range(Float64, 0, 2), Range(Float64, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false, Range(Float64, 0, 1), Range(Float64, 100, 101), Shape{1, 1}, Shape{1, 1}, []float64{1}, []float64{101}, []float64{102}, Shape{1, 1}, false, false, false}, // Edge cases - Row Vecs (Float32) {Range(Float32, 0, 2), Range(Float32, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false, Range(Float32, 10, 16), Range(Float32, 100, 106), Shape{2, 3}, Shape{2, 3}, []float32{0, 0, 0, 1, 0, 2}, []float32{100, 103, 101, 105, 102, 107}, []float32{100, 103, 101, 106, 102, 109}, Shape{2, 3}, false, false, false}, {Range(Float32, 0, 2), Range(Float32, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false, Range(Float32, 10, 13), Range(Float32, 100, 103), Shape{1, 3}, Shape{1, 3}, []float32{3, 4, 5}, []float32{103, 105, 107}, []float32{106, 109, 112}, Shape{1, 3}, false, false, false}, {Range(Float32, 0, 2), Range(Float32, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false, Range(Float32, 0, 1), Range(Float32, 100, 101), Shape{1, 1}, Shape{1, 1}, []float32{1}, []float32{101}, []float32{102}, Shape{1, 1}, false, false, false}, // stupids - bad shape (not matrices): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids - bad shape (incompatible shapes): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6, 1}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids - bad shape (bad reuse shape): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 57), Range(Float64, 100, 104), Shape{5}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, true}, // stupids - bad shape (bad incr shape): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{4}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, true, false}, // stupids - type mismatch (a,b) {Range(Float64, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids - type mismatch (a,b) {Range(Float32, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids type mismatch (b not float) {Range(Float64, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids type mismatch (a not float) {Range(Int, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false}, // stupids: type mismatch (incr) {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, true, false}, // stupids: type mismatch (reuse) {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float32, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, true}, // stupids: type mismatch (reuse) {Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float32{10, 28, 13, 40}, []float32{110, 130, 114, 143}, []float32{120, 158, 127, 183}, Shape{2, 2}, false, false, true}, } func TestColMajorDense_MatMul(t *testing.T) { assert := assert.New(t) for i, mmt := range colMajorMatMulTests { a := New(WithShape(mmt.shapeA...), AsFortran(mmt.a)) b := New(WithShape(mmt.shapeB...), AsFortran(mmt.b)) T, err := a.MatMul(b) if checkErr(t, mmt.err, err, "Safe", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(mmt.correct, T.Data(), "Test %d", i) // incr incr := New(WithShape(mmt.shapeI...), AsFortran(mmt.incr)) T, err = a.MatMul(b, WithIncr(incr)) if checkErr(t, mmt.errIncr, err, "WithIncr", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correctIncr, T.Data()) // reuse reuse := New(WithShape(mmt.shapeR...), AsFortran(mmt.reuse)) T, err = a.MatMul(b, WithReuse(reuse)) if checkErr(t, mmt.errReuse, err, "WithReuse", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correct, T.Data()) // reuse AND incr T, err = a.MatMul(b, WithIncr(incr), WithReuse(reuse)) if checkErr(t, mmt.err, err, "WithIncr and WithReuse", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correctIncrReuse, T.Data()) } } var colMajorOuterTests = []linalgTest{ // Float64s {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, false, false, false}, // Float32s {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float32, 52, 61), Range(Float32, 100, 109), Shape{3, 3}, Shape{3, 3}, []float32{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float32{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float32{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, false, false, false}, // stupids - a or b not vector {Range(Float64, 0, 3), Range(Float64, 0, 6), Shape{3}, Shape{3, 2}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, true, false, false}, // stupids - bad incr shape {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 106), Shape{3, 3}, Shape{3, 2}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, false, true, false}, // stupids - bad reuse shape {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 58), Range(Float64, 100, 109), Shape{3, 2}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, false, false, true}, // stupids - b not Float {Range(Float64, 0, 3), Range(Int, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, true, false, false}, // stupids - a not Float {Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, true, false, false}, // stupids - a-b type mismatch {Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, true, false, false}, // stupids a-b type mismatch {Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3}, true, false, false}, } func TestColMajor_Dense_Outer(t *testing.T) { assert := assert.New(t) for i, ot := range colMajorOuterTests { a := New(WithShape(ot.shapeA...), AsFortran(ot.a)) b := New(WithShape(ot.shapeB...), AsFortran(ot.b)) T, err := a.Outer(b) if checkErr(t, ot.err, err, "Safe", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(ot.correct, T.Data()) // incr incr := New(WithShape(ot.shapeI...), AsFortran(ot.incr)) T, err = a.Outer(b, WithIncr(incr)) if checkErr(t, ot.errIncr, err, "WithIncr", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(ot.correctIncr, T.Data()) // reuse reuse := New(WithShape(ot.shapeR...), AsFortran(ot.reuse)) T, err = a.Outer(b, WithReuse(reuse)) if checkErr(t, ot.errReuse, err, "WithReuse", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(ot.correct, T.Data()) // reuse AND incr T, err = a.Outer(b, WithIncr(incr), WithReuse(reuse)) if err != nil { t.Errorf("Reuse and Incr error'd %+v", err) continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsColMajor()) assert.Equal(ot.correctIncrReuse, T.Data()) } } tensor-0.9.24/dense_compat.go000066400000000000000000000367021426512615100161230ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "fmt" "math" "math/cmplx" "reflect" arrow "github.com/apache/arrow/go/arrow" arrowArray "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/bitutil" arrowTensor "github.com/apache/arrow/go/arrow/tensor" "github.com/chewxy/math32" "github.com/pkg/errors" "gonum.org/v1/gonum/mat" ) func convFromFloat64s(to Dtype, data []float64) interface{} { switch to { case Int: retVal := make([]int, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = int(v) } } return retVal case Int8: retVal := make([]int8, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = int8(v) } } return retVal case Int16: retVal := make([]int16, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = int16(v) } } return retVal case Int32: retVal := make([]int32, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = int32(v) } } return retVal case Int64: retVal := make([]int64, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = int64(v) } } return retVal case Uint: retVal := make([]uint, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = uint(v) } } return retVal case Uint8: retVal := make([]uint8, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = uint8(v) } } return retVal case Uint16: retVal := make([]uint16, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = uint16(v) } } return retVal case Uint32: retVal := make([]uint32, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = uint32(v) } } return retVal case Uint64: retVal := make([]uint64, len(data)) for i, v := range data { switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = uint64(v) } } return retVal case Float32: retVal := make([]float32, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = math32.NaN() case math.IsInf(v, 1): retVal[i] = math32.Inf(1) case math.IsInf(v, -1): retVal[i] = math32.Inf(-1) default: retVal[i] = float32(v) } } return retVal case Float64: retVal := make([]float64, len(data)) copy(retVal, data) return retVal case Complex64: retVal := make([]complex64, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = complex64(cmplx.NaN()) case math.IsInf(v, 0): retVal[i] = complex64(cmplx.Inf()) default: retVal[i] = complex(float32(v), float32(0)) } } return retVal case Complex128: retVal := make([]complex128, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = cmplx.NaN() case math.IsInf(v, 0): retVal[i] = cmplx.Inf() default: retVal[i] = complex(v, float64(0)) } } return retVal default: panic("Unsupported Dtype") } } func convToFloat64s(t *Dense) (retVal []float64) { retVal = make([]float64, t.len()) switch t.t { case Int: for i, v := range t.Ints() { retVal[i] = float64(v) } return retVal case Int8: for i, v := range t.Int8s() { retVal[i] = float64(v) } return retVal case Int16: for i, v := range t.Int16s() { retVal[i] = float64(v) } return retVal case Int32: for i, v := range t.Int32s() { retVal[i] = float64(v) } return retVal case Int64: for i, v := range t.Int64s() { retVal[i] = float64(v) } return retVal case Uint: for i, v := range t.Uints() { retVal[i] = float64(v) } return retVal case Uint8: for i, v := range t.Uint8s() { retVal[i] = float64(v) } return retVal case Uint16: for i, v := range t.Uint16s() { retVal[i] = float64(v) } return retVal case Uint32: for i, v := range t.Uint32s() { retVal[i] = float64(v) } return retVal case Uint64: for i, v := range t.Uint64s() { retVal[i] = float64(v) } return retVal case Float32: for i, v := range t.Float32s() { switch { case math32.IsNaN(v): retVal[i] = math.NaN() case math32.IsInf(v, 1): retVal[i] = math.Inf(1) case math32.IsInf(v, -1): retVal[i] = math.Inf(-1) default: retVal[i] = float64(v) } } return retVal case Float64: return t.Float64s() return retVal case Complex64: for i, v := range t.Complex64s() { switch { case cmplx.IsNaN(complex128(v)): retVal[i] = math.NaN() case cmplx.IsInf(complex128(v)): retVal[i] = math.Inf(1) default: retVal[i] = float64(real(v)) } } return retVal case Complex128: for i, v := range t.Complex128s() { switch { case cmplx.IsNaN(v): retVal[i] = math.NaN() case cmplx.IsInf(v): retVal[i] = math.Inf(1) default: retVal[i] = real(v) } } return retVal default: panic(fmt.Sprintf("Cannot convert *Dense of %v to []float64", t.t)) } } func convToFloat64(x interface{}) float64 { switch xt := x.(type) { case int: return float64(xt) case int8: return float64(xt) case int16: return float64(xt) case int32: return float64(xt) case int64: return float64(xt) case uint: return float64(xt) case uint8: return float64(xt) case uint16: return float64(xt) case uint32: return float64(xt) case uint64: return float64(xt) case float32: return float64(xt) case float64: return float64(xt) case complex64: return float64(real(xt)) case complex128: return real(xt) default: panic("Cannot convert to float64") } } // FromMat64 converts a *"gonum/matrix/mat64".Dense into a *tensorf64.Tensor. func FromMat64(m *mat.Dense, opts ...FuncOpt) *Dense { r, c := m.Dims() fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) toCopy := fo.Safe() as := fo.As() if as.Type == nil { as = Float64 } switch as.Kind() { case reflect.Int: backing := convFromFloat64s(Int, m.RawMatrix().Data).([]int) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Int8: backing := convFromFloat64s(Int8, m.RawMatrix().Data).([]int8) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Int16: backing := convFromFloat64s(Int16, m.RawMatrix().Data).([]int16) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Int32: backing := convFromFloat64s(Int32, m.RawMatrix().Data).([]int32) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Int64: backing := convFromFloat64s(Int64, m.RawMatrix().Data).([]int64) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Uint: backing := convFromFloat64s(Uint, m.RawMatrix().Data).([]uint) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Uint8: backing := convFromFloat64s(Uint8, m.RawMatrix().Data).([]uint8) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Uint16: backing := convFromFloat64s(Uint16, m.RawMatrix().Data).([]uint16) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Uint32: backing := convFromFloat64s(Uint32, m.RawMatrix().Data).([]uint32) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Uint64: backing := convFromFloat64s(Uint64, m.RawMatrix().Data).([]uint64) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Float32: backing := convFromFloat64s(Float32, m.RawMatrix().Data).([]float32) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Float64: var backing []float64 if toCopy { backing = make([]float64, len(m.RawMatrix().Data)) copy(backing, m.RawMatrix().Data) } else { backing = m.RawMatrix().Data } retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Complex64: backing := convFromFloat64s(Complex64, m.RawMatrix().Data).([]complex64) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal case reflect.Complex128: backing := convFromFloat64s(Complex128, m.RawMatrix().Data).([]complex128) retVal := New(WithBacking(backing), WithShape(r, c)) return retVal default: panic(fmt.Sprintf("Unsupported Dtype - cannot convert float64 to %v", as)) } panic("Unreachable") } // ToMat64 converts a *Dense to a *mat.Dense. All the values are converted into float64s. // This function will only convert matrices. Anything *Dense with dimensions larger than 2 will cause an error. func ToMat64(t *Dense, opts ...FuncOpt) (retVal *mat.Dense, err error) { // checks: if !t.IsNativelyAccessible() { return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Data is inaccessible") } if !t.IsMatrix() { // error return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape()) } fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) toCopy := fo.Safe() // fix dims r := t.Shape()[0] c := t.Shape()[1] var data []float64 switch { case t.t == Float64 && toCopy && !t.IsMaterializable(): data = make([]float64, t.len()) copy(data, t.Float64s()) case !t.IsMaterializable(): data = convToFloat64s(t) default: it := newFlatIterator(&t.AP) var next int for next, err = it.Next(); err == nil; next, err = it.Next() { if err = handleNoOp(err); err != nil { return } data = append(data, convToFloat64(t.Get(next))) } err = nil } retVal = mat.NewDense(r, c, data) return } // FromArrowArray converts an "arrow/array".Interface into a Tensor of matching DataType. func FromArrowArray(a arrowArray.Interface) *Dense { a.Retain() defer a.Release() r := a.Len() // TODO(poopoothegorilla): instead of creating bool ValidMask maybe // bitmapBytes can be used from arrow API mask := make([]bool, r) for i := 0; i < r; i++ { mask[i] = a.IsNull(i) } switch a.DataType() { case arrow.BinaryTypes.String: backing := make([]string, r) for i := 0; i < r; i++ { backing[i] = a.(*arrowArray.String).Value(i) } retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.FixedWidthTypes.Boolean: backing := make([]bool, r) for i := 0; i < r; i++ { backing[i] = a.(*arrowArray.Boolean).Value(i) } retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Int8: backing := a.(*arrowArray.Int8).Int8Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Int16: backing := a.(*arrowArray.Int16).Int16Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Int32: backing := a.(*arrowArray.Int32).Int32Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Int64: backing := a.(*arrowArray.Int64).Int64Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Uint8: backing := a.(*arrowArray.Uint8).Uint8Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Uint16: backing := a.(*arrowArray.Uint16).Uint16Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Uint32: backing := a.(*arrowArray.Uint32).Uint32Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Uint64: backing := a.(*arrowArray.Uint64).Uint64Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Float32: backing := a.(*arrowArray.Float32).Float32Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal case arrow.PrimitiveTypes.Float64: backing := a.(*arrowArray.Float64).Float64Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal default: panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType())) } panic("Unreachable") } // FromArrowTensor converts an "arrow/tensor".Interface into a Tensor of matching DataType. func FromArrowTensor(a arrowTensor.Interface) *Dense { a.Retain() defer a.Release() if !a.IsContiguous() { panic("Non-contiguous data is Unsupported") } var shape []int for _, val := range a.Shape() { shape = append(shape, int(val)) } l := a.Len() validMask := a.Data().Buffers()[0].Bytes() dataOffset := a.Data().Offset() mask := make([]bool, l) for i := 0; i < l; i++ { mask[i] = len(validMask) != 0 && bitutil.BitIsNotSet(validMask, dataOffset+i) } switch a.DataType() { case arrow.PrimitiveTypes.Int8: backing := a.(*arrowTensor.Int8).Int8Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Int16: backing := a.(*arrowTensor.Int16).Int16Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Int32: backing := a.(*arrowTensor.Int32).Int32Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Int64: backing := a.(*arrowTensor.Int64).Int64Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Uint8: backing := a.(*arrowTensor.Uint8).Uint8Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Uint16: backing := a.(*arrowTensor.Uint16).Uint16Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Uint32: backing := a.(*arrowTensor.Uint32).Uint32Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Uint64: backing := a.(*arrowTensor.Uint64).Uint64Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Float32: backing := a.(*arrowTensor.Float32).Float32Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) case arrow.PrimitiveTypes.Float64: backing := a.(*arrowTensor.Float64).Float64Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) default: panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType())) } panic("Unreachable") } tensor-0.9.24/dense_compat_test.go000066400000000000000000000535041426512615100171610ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" arrow "github.com/apache/arrow/go/arrow" arrowArray "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/memory" arrowTensor "github.com/apache/arrow/go/arrow/tensor" "github.com/stretchr/testify/assert" "gonum.org/v1/gonum/mat" ) var toMat64Tests = []struct { data interface{} sliced interface{} shape Shape dt Dtype }{ {Range(Int, 0, 6), []int{0, 1, 3, 4}, Shape{2, 3}, Int}, {Range(Int8, 0, 6), []int8{0, 1, 3, 4}, Shape{2, 3}, Int8}, {Range(Int16, 0, 6), []int16{0, 1, 3, 4}, Shape{2, 3}, Int16}, {Range(Int32, 0, 6), []int32{0, 1, 3, 4}, Shape{2, 3}, Int32}, {Range(Int64, 0, 6), []int64{0, 1, 3, 4}, Shape{2, 3}, Int64}, {Range(Uint, 0, 6), []uint{0, 1, 3, 4}, Shape{2, 3}, Uint}, {Range(Uint8, 0, 6), []uint8{0, 1, 3, 4}, Shape{2, 3}, Uint8}, {Range(Uint16, 0, 6), []uint16{0, 1, 3, 4}, Shape{2, 3}, Uint16}, {Range(Uint32, 0, 6), []uint32{0, 1, 3, 4}, Shape{2, 3}, Uint32}, {Range(Uint64, 0, 6), []uint64{0, 1, 3, 4}, Shape{2, 3}, Uint64}, {Range(Float32, 0, 6), []float32{0, 1, 3, 4}, Shape{2, 3}, Float32}, {Range(Float64, 0, 6), []float64{0, 1, 3, 4}, Shape{2, 3}, Float64}, {Range(Complex64, 0, 6), []complex64{0, 1, 3, 4}, Shape{2, 3}, Complex64}, {Range(Complex128, 0, 6), []complex128{0, 1, 3, 4}, Shape{2, 3}, Complex128}, } func TestToMat64(t *testing.T) { assert := assert.New(t) for i, tmt := range toMat64Tests { T := New(WithBacking(tmt.data), WithShape(tmt.shape...)) var m *mat.Dense var err error if m, err = ToMat64(T); err != nil { t.Errorf("ToMat basic test %d failed : %v", i, err) continue } conv := anyToFloat64s(tmt.data) assert.Equal(conv, m.RawMatrix().Data, "i %d from %v", i, tmt.dt) if T, err = sliceDense(T, nil, makeRS(0, 2)); err != nil { t.Errorf("Slice failed %v", err) continue } if m, err = ToMat64(T); err != nil { t.Errorf("ToMat of slice test %d failed : %v", i, err) continue } conv = anyToFloat64s(tmt.sliced) assert.Equal(conv, m.RawMatrix().Data, "sliced test %d from %v", i, tmt.dt) t.Logf("Done") if tmt.dt == Float64 { T = New(WithBacking(tmt.data), WithShape(tmt.shape...)) if m, err = ToMat64(T, UseUnsafe()); err != nil { t.Errorf("ToMat64 unsafe test %d failed: %v", i, err) } conv = anyToFloat64s(tmt.data) assert.Equal(conv, m.RawMatrix().Data, "float64 unsafe i %d from %v", i, tmt.dt) conv[0] = 1000 assert.Equal(conv, m.RawMatrix().Data, "float64 unsafe i %d from %v", i, tmt.dt) conv[0] = 0 // reset for future tests that use the same backing } } // idiocy test T := New(Of(Float64), WithShape(2, 3, 4)) _, err := ToMat64(T) if err == nil { t.Error("Expected an error when trying to convert a 3-T to *mat.Dense") } } func TestFromMat64(t *testing.T) { assert := assert.New(t) var m *mat.Dense var T *Dense var backing []float64 for i, tmt := range toMat64Tests { backing = Range(Float64, 0, 6).([]float64) m = mat.NewDense(2, 3, backing) T = FromMat64(m) conv := anyToFloat64s(tmt.data) assert.Equal(conv, T.Float64s(), "test %d: []float64 from %v", i, tmt.dt) assert.True(T.Shape().Eq(tmt.shape)) T = FromMat64(m, As(tmt.dt)) assert.Equal(tmt.data, T.Data()) assert.True(T.Shape().Eq(tmt.shape)) if tmt.dt == Float64 { backing = Range(Float64, 0, 6).([]float64) m = mat.NewDense(2, 3, backing) T = FromMat64(m, UseUnsafe()) assert.Equal(backing, T.Float64s()) assert.True(T.Shape().Eq(tmt.shape)) backing[0] = 1000 assert.Equal(backing, T.Float64s(), "test %d - unsafe float64", i) } } } var toArrowArrayTests = []struct { data interface{} valid []bool dt arrow.DataType shape Shape }{ { data: Range(Int8, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Int8, shape: Shape{6, 1}, }, { data: Range(Int16, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Int16, shape: Shape{6, 1}, }, { data: Range(Int32, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Int32, shape: Shape{6, 1}, }, { data: Range(Int64, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Int64, shape: Shape{6, 1}, }, { data: Range(Uint8, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Uint8, shape: Shape{6, 1}, }, { data: Range(Uint16, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Uint16, shape: Shape{6, 1}, }, { data: Range(Uint32, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Uint32, shape: Shape{6, 1}, }, { data: Range(Uint64, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Uint64, shape: Shape{6, 1}, }, { data: Range(Float32, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Float32, shape: Shape{6, 1}, }, { data: Range(Float64, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.Float64, shape: Shape{6, 1}, }, } func TestFromArrowArray(t *testing.T) { assert := assert.New(t) var T *Dense pool := memory.NewGoAllocator() for i, taat := range toArrowArrayTests { var m arrowArray.Interface switch taat.dt { case arrow.BinaryTypes.String: b := arrowArray.NewStringBuilder(pool) defer b.Release() b.AppendValues( []string{"0", "1", "2", "3", "4", "5"}, taat.valid, ) m = b.NewArray() defer m.Release() case arrow.FixedWidthTypes.Boolean: b := arrowArray.NewBooleanBuilder(pool) defer b.Release() b.AppendValues( []bool{true, false, true, false, true, false}, taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Int8: b := arrowArray.NewInt8Builder(pool) defer b.Release() b.AppendValues( Range(Int8, 0, 6).([]int8), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Int16: b := arrowArray.NewInt16Builder(pool) defer b.Release() b.AppendValues( Range(Int16, 0, 6).([]int16), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Int32: b := arrowArray.NewInt32Builder(pool) defer b.Release() b.AppendValues( Range(Int32, 0, 6).([]int32), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Int64: b := arrowArray.NewInt64Builder(pool) defer b.Release() b.AppendValues( Range(Int64, 0, 6).([]int64), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Uint8: b := arrowArray.NewUint8Builder(pool) defer b.Release() b.AppendValues( Range(Uint8, 0, 6).([]uint8), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Uint16: b := arrowArray.NewUint16Builder(pool) defer b.Release() b.AppendValues( Range(Uint16, 0, 6).([]uint16), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Uint32: b := arrowArray.NewUint32Builder(pool) defer b.Release() b.AppendValues( Range(Uint32, 0, 6).([]uint32), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Uint64: b := arrowArray.NewUint64Builder(pool) defer b.Release() b.AppendValues( Range(Uint64, 0, 6).([]uint64), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Float32: b := arrowArray.NewFloat32Builder(pool) defer b.Release() b.AppendValues( Range(Float32, 0, 6).([]float32), taat.valid, ) m = b.NewArray() defer m.Release() case arrow.PrimitiveTypes.Float64: b := arrowArray.NewFloat64Builder(pool) defer b.Release() b.AppendValues( Range(Float64, 0, 6).([]float64), taat.valid, ) m = b.NewArray() defer m.Release() default: t.Errorf("DataType not supported in tests: %v", taat.dt) } T = FromArrowArray(m) switch taat.dt { case arrow.PrimitiveTypes.Int8: conv := taat.data.([]int8) assert.Equal(conv, T.Int8s(), "test %d: []int8 from %v", i, taat.dt) case arrow.PrimitiveTypes.Int16: conv := taat.data.([]int16) assert.Equal(conv, T.Int16s(), "test %d: []int16 from %v", i, taat.dt) case arrow.PrimitiveTypes.Int32: conv := taat.data.([]int32) assert.Equal(conv, T.Int32s(), "test %d: []int32 from %v", i, taat.dt) case arrow.PrimitiveTypes.Int64: conv := taat.data.([]int64) assert.Equal(conv, T.Int64s(), "test %d: []int64 from %v", i, taat.dt) case arrow.PrimitiveTypes.Uint8: conv := taat.data.([]uint8) assert.Equal(conv, T.Uint8s(), "test %d: []uint8 from %v", i, taat.dt) case arrow.PrimitiveTypes.Uint16: conv := taat.data.([]uint16) assert.Equal(conv, T.Uint16s(), "test %d: []uint16 from %v", i, taat.dt) case arrow.PrimitiveTypes.Uint32: conv := taat.data.([]uint32) assert.Equal(conv, T.Uint32s(), "test %d: []uint32 from %v", i, taat.dt) case arrow.PrimitiveTypes.Uint64: conv := taat.data.([]uint64) assert.Equal(conv, T.Uint64s(), "test %d: []uint64 from %v", i, taat.dt) case arrow.PrimitiveTypes.Float32: conv := taat.data.([]float32) assert.Equal(conv, T.Float32s(), "test %d: []float32 from %v", i, taat.dt) case arrow.PrimitiveTypes.Float64: conv := taat.data.([]float64) assert.Equal(conv, T.Float64s(), "test %d: []float64 from %v", i, taat.dt) default: t.Errorf("DataType not supported in tests: %v", taat.dt) } for i, invalid := range T.Mask() { assert.Equal(taat.valid[i], !invalid) } assert.True(T.Shape().Eq(taat.shape)) } } var toArrowTensorTests = []struct { rowMajorData interface{} colMajorData interface{} rowMajorValid []bool colMajorValid []bool dt arrow.DataType shape Shape }{ { rowMajorData: []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []int8{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Int8, shape: Shape{2, 5}, }, { rowMajorData: []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []int16{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Int16, shape: Shape{2, 5}, }, { rowMajorData: []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []int32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Int32, shape: Shape{2, 5}, }, { rowMajorData: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []int64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Int64, shape: Shape{2, 5}, }, { rowMajorData: []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []uint8{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Uint8, shape: Shape{2, 5}, }, { rowMajorData: []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []uint16{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Uint16, shape: Shape{2, 5}, }, { rowMajorData: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []uint32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Uint32, shape: Shape{2, 5}, }, { rowMajorData: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []uint64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Uint64, shape: Shape{2, 5}, }, { rowMajorData: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []float32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Float32, shape: Shape{2, 5}, }, { rowMajorData: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []float64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.Float64, shape: Shape{2, 5}, }, } func TestFromArrowTensor(t *testing.T) { assert := assert.New(t) var rowMajorT *Dense var colMajorT *Dense pool := memory.NewGoAllocator() for i, taat := range toArrowTensorTests { var rowMajorArr arrowArray.Interface var colMajorArr arrowArray.Interface var rowMajor arrowTensor.Interface var colMajor arrowTensor.Interface switch taat.dt { case arrow.PrimitiveTypes.Int8: b := arrowArray.NewInt8Builder(pool) defer b.Release() b.AppendValues( []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewInt8(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewInt8(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int8SizeBytes), int64(arrow.Int8SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Int16: b := arrowArray.NewInt16Builder(pool) defer b.Release() b.AppendValues( []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewInt16(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewInt16(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int16SizeBytes), int64(arrow.Int16SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Int32: b := arrowArray.NewInt32Builder(pool) defer b.Release() b.AppendValues( []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewInt32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewInt32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int32SizeBytes), int64(arrow.Int32SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Int64: b := arrowArray.NewInt64Builder(pool) defer b.Release() b.AppendValues( []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewInt64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewInt64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int64SizeBytes), int64(arrow.Int64SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Uint8: b := arrowArray.NewUint8Builder(pool) defer b.Release() b.AppendValues( []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewUint8(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewUint8(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint8SizeBytes), int64(arrow.Uint8SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Uint16: b := arrowArray.NewUint16Builder(pool) defer b.Release() b.AppendValues( []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewUint16(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewUint16(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint16SizeBytes), int64(arrow.Uint16SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Uint32: b := arrowArray.NewUint32Builder(pool) defer b.Release() b.AppendValues( []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewUint32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewUint32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint32SizeBytes), int64(arrow.Uint32SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Uint64: b := arrowArray.NewUint64Builder(pool) defer b.Release() b.AppendValues( []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewUint64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewUint64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint64SizeBytes), int64(arrow.Uint64SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Float32: b := arrowArray.NewFloat32Builder(pool) defer b.Release() b.AppendValues( []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewFloat32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewFloat32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Float32SizeBytes), int64(arrow.Float32SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() case arrow.PrimitiveTypes.Float64: b := arrowArray.NewFloat64Builder(pool) defer b.Release() b.AppendValues( []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.NewFloat64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.NewFloat64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Float64SizeBytes), int64(arrow.Float64SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() default: t.Errorf("DataType not supported in tests: %v", taat.dt) } rowMajorT = FromArrowTensor(rowMajor) colMajorT = FromArrowTensor(colMajor) assert.Equal(taat.rowMajorData, rowMajorT.Data(), "test %d: row major %v", i, taat.dt) assert.Equal(len(taat.rowMajorValid), len(rowMajorT.Mask()), "test %d: row major %v mask length incorrect", i, taat.dt) for i, invalid := range rowMajorT.Mask() { assert.Equal(taat.rowMajorValid[i], !invalid, "test %d: row major %v mask value incorrect", i, taat.dt) } assert.True(colMajorT.Shape().Eq(taat.shape)) assert.Equal(taat.colMajorData, colMajorT.Data(), "test %d: column major %v", i, taat.dt) assert.Equal(len(taat.colMajorValid), len(colMajorT.Mask()), "test %d: column major %v mask length incorrect", i, taat.dt) for i, invalid := range colMajorT.Mask() { assert.Equal(taat.colMajorValid[i], !invalid, "test %d: column major %v mask value incorrect", i, taat.dt) } assert.True(rowMajorT.Shape().Eq(taat.shape)) } } tensor-0.9.24/dense_format.go000066400000000000000000000233361426512615100161270ustar00rootroot00000000000000package tensor import ( "bytes" "fmt" "reflect" "strconv" ) var fmtFlags = [...]rune{'+', '-', '#', ' ', '0'} var fmtByte = []byte("%") var precByte = []byte(".") var newline = []byte("\n") var ( matFirstStart = []byte("⎡") matFirstEnd = []byte("⎤\n") matLastStart = []byte("⎣") matLastEnd = []byte("⎦\n") rowStart = []byte("⎢") rowEnd = []byte("⎥\n") vecStart = []byte("[") vecEnd = []byte("]") colVecStart = []byte("C[") rowVecStart = []byte("R[") hElisionCompact = []byte("⋯ ") hElision = []byte("... ") vElisionCompact = []byte(" ⋮ \n") vElision = []byte(".\n.\n.\n") ufVec = []byte("Vector") ufMat = []byte("Matrix") ufTensor = []byte("Tensor-") hInvalid = []byte("--") ) type fmtState struct { fmt.State buf *bytes.Buffer pad []byte hElision, vElision []byte meta bool flat bool ext bool // extended (i.e no elision) comp bool // compact c rune // c is here mainly for struct packing reasons w, p int // width and precision base int // used only for int/byte arrays rows, cols int pr, pc int // printed row, printed col } func newFmtState(f fmt.State, c rune) *fmtState { retVal := &fmtState{ State: f, buf: bytes.NewBuffer(make([]byte, 10)), c: c, meta: f.Flag('+'), flat: f.Flag('-'), ext: f.Flag('#'), comp: c == 's', hElision: hElision, vElision: vElision, } w, _ := f.Width() p, _ := f.Precision() retVal.w = w retVal.p = p return retVal } func (f *fmtState) originalFmt() string { buf := bytes.NewBuffer(fmtByte) for _, flag := range fmtFlags { if f.Flag(int(flag)) { buf.WriteRune(flag) } } // width if w, ok := f.Width(); ok { buf.WriteString(strconv.Itoa(w)) } // precision if p, ok := f.Precision(); ok { buf.Write(precByte) buf.WriteString(strconv.Itoa(p)) } buf.WriteRune(f.c) return buf.String() } func (f *fmtState) cleanFmt() string { buf := bytes.NewBuffer(fmtByte) // width if w, ok := f.Width(); ok { buf.WriteString(strconv.Itoa(w)) } // precision if p, ok := f.Precision(); ok { buf.Write(precByte) buf.WriteString(strconv.Itoa(p)) } buf.WriteRune(f.c) return buf.String() } // does the calculation for metadata func (f *fmtState) populate(t *Dense) { switch { case t.IsVector(): f.rows = 1 f.cols = t.Size() case t.IsScalarEquiv(): f.rows = 1 f.cols = 1 default: f.rows = t.Shape()[t.Dims()-2] f.cols = t.Shape()[t.Dims()-1] } switch { case f.flat && f.ext: f.pc = t.len() case f.flat && f.comp: f.pc = 5 f.hElision = hElisionCompact case f.flat: f.pc = 10 case f.ext: f.pc = f.cols f.pr = f.rows case f.comp: f.pc = MinInt(f.cols, 4) f.pr = MinInt(f.rows, 4) f.hElision = hElisionCompact f.vElision = vElisionCompact default: f.pc = MinInt(f.cols, 8) f.pr = MinInt(f.rows, 8) } } func (f *fmtState) acceptableRune(d *Dense) { if f.c == 'H' { f.meta = true return // accept H as header only } switch d.t.Kind() { case reflect.Float64: switch f.c { case 'f', 'e', 'E', 'G', 'b': default: f.c = 'g' } case reflect.Float32: switch f.c { case 'f', 'e', 'E', 'G', 'b': default: f.c = 'g' } case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: switch f.c { case 'b': f.base = 2 case 'd': f.base = 10 case 'o': f.base = 8 case 'x', 'X': f.base = 16 default: f.base = 10 f.c = 'd' } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch f.c { case 'b': f.base = 2 case 'd': f.base = 10 case 'o': f.base = 8 case 'x', 'X': f.base = 16 default: f.base = 10 f.c = 'd' } case reflect.Bool: f.c = 't' default: f.c = 'v' } } func (f *fmtState) calcWidth(d *Dense) { format := f.cleanFmt() f.w = 0 masked := false if d.IsMasked() { if d.MaskedAny().(bool) { masked = true } } for i := 0; i < d.len(); i++ { w, _ := fmt.Fprintf(f.buf, format, d.Get(i)) if masked { if d.mask[i] { w, _ = fmt.Fprintf(f.buf, "%s", hInvalid) } } if w > f.w { f.w = w } f.buf.Reset() } } func (f *fmtState) makePad() { f.pad = make([]byte, MaxInt(f.w, 2)) for i := range f.pad { f.pad[i] = ' ' } } func (f *fmtState) writeHElision() { f.Write(f.hElision) } func (f *fmtState) writeVElision() { f.Write(f.vElision) } // Format implements fmt.Formatter. Formatting can be controlled with verbs and flags. All default Go verbs are supported and work as expected. // By default, only 8 columns and rows are printed (the first and the last 4 columns and rows, while the middle columns and rows are ellided) // Special flags are: // '-' for printing a flat array of values // '+' for printing extra metadata before printing the tensor (it prints shape, stride and type, which are useful for debugging) // '#' for printing the full tensor - there are no elisions. Overrides the 's' verb // // Special care also needs be taken for the verb 's' - it prints a super compressed version of the tensor, only printing 4 cols and 4 rows. func (t *Dense) Format(s fmt.State, c rune) { if c == 'i' { fmt.Fprintf(s, "INFO:\n\tAP: %v\n\tOLD: %v\n\tTRANS %v\n\tENGINE: %T\n", t.AP, t.old, t.transposeWith, t.e) return } f := newFmtState(s, c) if t.IsScalar() { o := f.originalFmt() fmt.Fprintf(f, o, t.Get(0)) return } f.acceptableRune(t) f.calcWidth(t) f.makePad() f.populate(t) if f.meta { switch { case t.IsVector(): f.Write(ufVec) case t.Dims() == 2: f.Write(ufMat) default: f.Write(ufTensor) fmt.Fprintf(f, "%d", t.Dims()) } fmt.Fprintf(f, " %v %v\n", t.Shape(), t.Strides()) } if f.c == 'H' { return } if !t.IsNativelyAccessible() { fmt.Fprintf(f, "Inaccesible data") return } format := f.cleanFmt() if f.flat { f.Write(vecStart) switch { case f.ext: for i := 0; i < t.len(); i++ { if !t.IsMasked() { fmt.Fprintf(f, format, t.Get(i)) } else { if t.mask[i] { fmt.Fprintf(f, "%s", hInvalid) } else { fmt.Fprintf(f, format, t.Get(i)) } } if i < t.len()-1 { f.Write(f.pad[:1]) } } case t.viewOf != 0: it := IteratorFromDense(t) var c, i int var err error for i, err = it.Next(); err == nil; i, err = it.Next() { if !t.IsMasked() { fmt.Fprintf(f, format, t.Get(i)) } else { if t.mask[i] { fmt.Fprintf(f, "%s", hInvalid) } else { fmt.Fprintf(f, format, t.Get(i)) } } f.Write(f.pad[:1]) c++ if c >= f.pc { f.writeHElision() break } } if err != nil { if _, noop := err.(NoOpError); !noop { fmt.Fprintf(f, "ERROR ITERATING: %v", err) } } default: for i := 0; i < f.pc; i++ { if !t.IsMasked() { fmt.Fprintf(f, format, t.Get(i)) } else { if t.mask[i] { fmt.Fprintf(f, "%s", hInvalid) } else { fmt.Fprintf(f, format, t.Get(i)) } } f.Write(f.pad[:1]) } if f.pc < t.len() { f.writeHElision() } } f.Write(vecEnd) return } // standard stuff it := NewIterator(&t.AP) coord := it.Coord() firstRow := true firstVal := true var lastRow, lastCol int var expected int for next, err := it.Next(); err == nil; next, err = it.Next() { if next < expected { continue } var col, row int row = lastRow col = lastCol if f.rows > f.pr && row > f.pr/2 && row < f.rows-f.pr/2 { continue } if firstVal { if firstRow { switch { case t.IsColVec(): f.Write(colVecStart) case t.IsRowVec(): f.Write(rowVecStart) case t.IsVector(): f.Write(vecStart) case t.IsScalarEquiv(): for i := 0; i < t.Dims(); i++ { f.Write(vecStart) } default: f.Write(matFirstStart) } } else { var matLastRow bool if !t.IsVector() { matLastRow = coord[len(coord)-2] == f.rows-1 } if matLastRow { f.Write(matLastStart) } else { f.Write(rowStart) } } firstVal = false } // actual printing of the value if f.cols <= f.pc || (col < f.pc/2 || (col >= f.cols-f.pc/2)) { var w int if t.IsMasked() { if t.mask[next] { w, _ = fmt.Fprintf(f.buf, "%s", hInvalid) } else { w, _ = fmt.Fprintf(f.buf, format, t.Get(next)) } } else { w, _ = fmt.Fprintf(f.buf, format, t.Get(next)) } f.Write(f.pad[:f.w-w]) // prepad f.Write(f.buf.Bytes()) // write if col < f.cols-1 { // pad with a space f.Write(f.pad[:2]) } f.buf.Reset() } else if col == f.pc/2 { f.writeHElision() } // done printing // check for end of rows if col == f.cols-1 { eom := row == f.rows-1 switch { case t.IsVector(): f.Write(vecEnd) return case t.IsScalarEquiv(): for i := 0; i < t.Dims(); i++ { f.Write(vecEnd) } return case firstRow: f.Write(matFirstEnd) case eom: f.Write(matLastEnd) if t.IsMatrix() { return } // one newline for every dimension above 2 for i := t.Dims(); i > 2; i-- { f.Write(newline) } default: f.Write(rowEnd) } if firstRow { firstRow = false } if eom { firstRow = true } firstVal = true // figure out elision if f.rows > f.pr && row+1 == f.pr/2 { expectedCoord := BorrowInts(len(coord)) copy(expectedCoord, coord) expectedCoord[len(expectedCoord)-2] = f.rows - (f.pr / 2) expected, _ = Ltoi(t.Shape(), t.Strides(), expectedCoord...) ReturnInts(expectedCoord) f.writeVElision() } } // cleanup switch { case t.IsRowVec(): lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] case t.IsColVec(): lastRow = coord[len(coord)-1] lastCol = coord[len(coord)-2] case t.IsVector(): lastCol = coord[len(coord)-1] default: lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] } } } tensor-0.9.24/dense_format_test.go000066400000000000000000000175341426512615100171710ustar00rootroot00000000000000package tensor import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestDense_Format(t *testing.T) { // if os.Getenv("TRAVISTEST") == "true" { // t.Skip("skipping format test; This is being run on TravisCI") // } assert := assert.New(t) var T *Dense var res, expected string // Scalar T = New(Of(Float64), FromScalar(3.14)) res = fmt.Sprintf("%3.3f", T) assert.Equal("3.140", res) // Scalar-equiv (vector) T = New(WithBacking([]float64{3.14}), WithShape(1)) res = fmt.Sprintf("%3.3f", T) assert.Equal("[3.140]", res) // Scalar-equiv (n-dimensional) T = New(WithBacking([]float64{3.14}), WithShape(1, 1, 1, 1)) res = fmt.Sprintf("%3.3f", T) assert.Equal("[[[[3.140]]]]", res) // short vector T = New(Of(Float64), WithShape(4)) res = fmt.Sprintf("%v", T) expected = "[0 0 0 0]" assert.Equal(expected, res) T = New(WithShape(2, 2), WithBacking([]float64{3.141515163242, 20, 5.15, 6.28})) res = fmt.Sprintf("\n%v", T) expected = ` ⎡3.141515163242 20⎤ ⎣ 5.15 6.28⎦ ` assert.Equal(expected, res, res) // precision res = fmt.Sprintf("\n%0.2v", T) expected = ` ⎡3.1 20⎤ ⎣5.2 6.3⎦ ` assert.Equal(expected, res, res) // with metadata res = fmt.Sprintf("\n%+0.2v", T) expected = ` Matrix (2, 2) [2 1] ⎡3.1 20⎤ ⎣5.2 6.3⎦ ` assert.Equal(expected, res, res) // many columns T = New(WithShape(16, 14), WithBacking(Range(Float32, 0, 16*14))) res = fmt.Sprintf("\n%v", T) expected = ` ⎡ 0 1 2 3 ... 10 11 12 13⎤ ⎢ 14 15 16 17 ... 24 25 26 27⎥ ⎢ 28 29 30 31 ... 38 39 40 41⎥ ⎢ 42 43 44 45 ... 52 53 54 55⎥ . . . ⎢168 169 170 171 ... 178 179 180 181⎥ ⎢182 183 184 185 ... 192 193 194 195⎥ ⎢196 197 198 199 ... 206 207 208 209⎥ ⎣210 211 212 213 ... 220 221 222 223⎦ ` assert.Equal(expected, res, "expected %v. Got %v", expected, res) // many cols, rows, compressed T = New(WithShape(16, 14), WithBacking(Range(Float64, 0, 16*14))) res = fmt.Sprintf("\n%s", T) // this clunky string addition thing is because some editors like to trim whitespace. // There should be two spaces after ` ⋮` . expected = ` ⎡ 0 1 ⋯ 12 13⎤ ⎢ 14 15 ⋯ 26 27⎥ ` + ` ⋮ ` + ` ` + `⎢196 197 ⋯ 208 209⎥ ⎣210 211 ⋯ 222 223⎦ ` assert.Equal(expected, res, "expected %v. Got %v", expected, res) // many cols, full T = New(WithShape(8, 9), WithBacking(Range(Float64, 0, 8*9))) res = fmt.Sprintf("\n%#v", T) expected = ` ⎡ 0 1 2 3 4 5 6 7 8⎤ ⎢ 9 10 11 12 13 14 15 16 17⎥ ⎢18 19 20 21 22 23 24 25 26⎥ ⎢27 28 29 30 31 32 33 34 35⎥ ⎢36 37 38 39 40 41 42 43 44⎥ ⎢45 46 47 48 49 50 51 52 53⎥ ⎢54 55 56 57 58 59 60 61 62⎥ ⎣63 64 65 66 67 68 69 70 71⎦ ` assert.Equal(expected, res, res) // vectors T = New(Of(Int), WithShape(3, 1)) res = fmt.Sprintf("%v", T) expected = `C[0 0 0]` assert.Equal(expected, res) T = New(Of(Int32), WithShape(1, 3)) res = fmt.Sprintf("%v", T) expected = `R[0 0 0]` assert.Equal(expected, res) // 3+ Dimensional Tensors - super janky for now T = New(WithShape(2, 3, 2), WithBacking(Range(Float64, 0, 2*3*2))) res = fmt.Sprintf("\n%v", T) expected = ` ⎡ 0 1⎤ ⎢ 2 3⎥ ⎣ 4 5⎦ ⎡ 6 7⎤ ⎢ 8 9⎥ ⎣10 11⎦ ` assert.Equal(expected, res, res) // checking metadata + compression res = fmt.Sprintf("\n%+s", T) expected = ` Tensor-3 (2, 3, 2) [6 2 1] ⎡ 0 1⎤ ⎢ 2 3⎥ ⎣ 4 5⎦ ⎡ 6 7⎤ ⎢ 8 9⎥ ⎣10 11⎦ ` assert.Equal(expected, res, res) // check flat + compress res = fmt.Sprintf("%-s", T) expected = `[0 1 2 3 4 ⋯ ]` assert.Equal(expected, res, res) // check flat res = fmt.Sprintf("%-3.3f", T) expected = `[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000 8.000 9.000 ... ]` assert.Equal(expected, res, res) // check flat + extended res = fmt.Sprintf("%-#v", T) expected = `[0 1 2 3 4 5 6 7 8 9 10 11]` assert.Equal(expected, res, res) /* Test Views and Sliced Tensors */ var V Tensor var err error V, err = T.Slice(makeRS(1, 2)) if err != nil { t.Error(err) } // flat mode for view res = fmt.Sprintf("\n%-s", V) expected = "\n[6 7 8 9 10 ⋯ ]" assert.Equal(expected, res, res) // standard res = fmt.Sprintf("\n%+s", V) expected = ` Matrix (3, 2) [2 1] ⎡ 6 7⎤ ⎢ 8 9⎥ ⎣10 11⎦ ` assert.Equal(expected, res, res) // T[:, 1] V, err = T.Slice(nil, ss(1)) res = fmt.Sprintf("\n%+s", V) expected = ` Matrix (2, 2) [6 1] ⎡2 3⎤ ⎣8 9⎦ ` assert.Equal(expected, res, res) // transpose a view V.T() expected = ` Matrix (2, 2) [1 6] ⎡2 8⎤ ⎣3 9⎦ ` res = fmt.Sprintf("\n%+s", V) assert.Equal(expected, res, res) // T[1, :, 1] V, err = T.Slice(ss(1), nil, ss(1)) if err != nil { t.Error(err) } expected = `Vector (3) [2] [7881299347898368p-50 5066549580791808p-49 6192449487634432p-49]` res = fmt.Sprintf("%+b", V) assert.Equal(expected, res) // T[1, 1, 1] - will result in a scalar V, err = T.Slice(ss(1), ss(1), ss(1)) if err != nil { t.Error(err) } res = fmt.Sprintf("%#3.3E", V) expected = `9.000E+00` assert.Equal(expected, res) // on regular matrices T = New(WithShape(3, 5), WithBacking(Range(Float64, 0, 3*5))) V, err = T.Slice(ss(1)) if err != nil { t.Error(err) } expected = `[5 6 7 8 9]` res = fmt.Sprintf("%v", V) assert.Equal(expected, res) } var basicFmtTests = []struct { a interface{} format string correct string }{ {Range(Float64, 0, 4), "%1.1f", "[0.0 1.0 2.0 3.0]"}, {Range(Float32, 0, 4), "%1.1f", "[0.0 1.0 2.0 3.0]"}, {Range(Int, 0, 4), "%b", "[ 0 1 10 11]"}, {Range(Int, 0, 4), "%d", "[0 1 2 3]"}, {Range(Int, 6, 10), "%o", "[ 6 7 10 11]"}, {Range(Int, 14, 18), "%x", "[ e f 10 11]"}, {Range(Int, 0, 4), "%f", "[0 1 2 3]"}, {Range(Int32, 0, 4), "%b", "[ 0 1 10 11]"}, {Range(Int32, 0, 4), "%d", "[0 1 2 3]"}, {Range(Int32, 6, 10), "%o", "[ 6 7 10 11]"}, {Range(Int32, 14, 18), "%x", "[ e f 10 11]"}, {Range(Int32, 0, 4), "%f", "[0 1 2 3]"}, {Range(Int64, 0, 4), "%b", "[ 0 1 10 11]"}, {Range(Int64, 0, 4), "%d", "[0 1 2 3]"}, {Range(Int64, 6, 10), "%o", "[ 6 7 10 11]"}, {Range(Int64, 14, 18), "%x", "[ e f 10 11]"}, {Range(Int64, 0, 4), "%f", "[0 1 2 3]"}, {Range(Byte, 0, 4), "%b", "[ 0 1 10 11]"}, {Range(Byte, 0, 4), "%d", "[0 1 2 3]"}, {Range(Byte, 6, 10), "%o", "[ 6 7 10 11]"}, {Range(Byte, 14, 18), "%x", "[ e f 10 11]"}, {Range(Byte, 0, 4), "%f", "[0 1 2 3]"}, {[]bool{true, false, true, false}, "%f", "[ true false true false]"}, {[]bool{true, false, true, false}, "%s", "[ true false true false]"}, } func TestDense_Format_basics(t *testing.T) { for _, v := range basicFmtTests { T := New(WithBacking(v.a)) s := fmt.Sprintf(v.format, T) if s != v.correct { t.Errorf("Expected %q. Got %q", v.correct, s) } } } func TestDense_Format_Masked(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(1, 12)) data := T.Ints() for i := 0; i < len(data); i++ { data[i] = i } T.ResetMask(false) for i := 0; i < 12; i += 2 { T.mask[i] = true } s := fmt.Sprintf("%d", T) assert.Equal(`R[-- 1 -- 3 ... -- 9 -- 11]`, s) T = New(Of(Int), WithShape(2, 4, 16)) data = T.Ints() for i := 0; i < len(data); i++ { data[i] = i } T.ResetMask(false) for i := 0; i < len(data); i += 2 { T.mask[i] = true } s = fmt.Sprintf("%d", T) assert.Equal(`⎡ -- 1 -- 3 ... -- 13 -- 15⎤ ⎢ -- 17 -- 19 ... -- 29 -- 31⎥ ⎢ -- 33 -- 35 ... -- 45 -- 47⎥ ⎣ -- 49 -- 51 ... -- 61 -- 63⎦ ⎡ -- 65 -- 67 ... -- 77 -- 79⎤ ⎢ -- 81 -- 83 ... -- 93 -- 95⎥ ⎢ -- 97 -- 99 ... -- 109 -- 111⎥ ⎣ -- 113 -- 115 ... -- 125 -- 127⎦ `, s) } tensor-0.9.24/dense_generated.go000066400000000000000000000065631426512615100166000ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import "reflect" // Ones creates a *Dense with the provided shape and type func Ones(dt Dtype, shape ...int) *Dense { d := recycledDense(dt, shape) switch d.t.Kind() { case reflect.Int: d.Memset(int(1)) case reflect.Int8: d.Memset(int8(1)) case reflect.Int16: d.Memset(int16(1)) case reflect.Int32: d.Memset(int32(1)) case reflect.Int64: d.Memset(int64(1)) case reflect.Uint: d.Memset(uint(1)) case reflect.Uint8: d.Memset(uint8(1)) case reflect.Uint16: d.Memset(uint16(1)) case reflect.Uint32: d.Memset(uint32(1)) case reflect.Uint64: d.Memset(uint64(1)) case reflect.Float32: d.Memset(float32(1)) case reflect.Float64: d.Memset(float64(1)) case reflect.Complex64: d.Memset(complex64(1)) case reflect.Complex128: d.Memset(complex128(1)) case reflect.Bool: d.Memset(true) default: // TODO: add a Oner interface } return d } // I creates the identity matrix (usually a square) matrix with 1s across the diagonals, and zeroes elsewhere, like so: // Matrix(4,4) // ⎡1 0 0 0⎤ // ⎢0 1 0 0⎥ // ⎢0 0 1 0⎥ // ⎣0 0 0 1⎦ // While technically an identity matrix is a square matrix, in attempt to keep feature parity with Numpy, // the I() function allows you to create non square matrices, as well as an index to start the diagonals. // // For example: // T = I(Float64, 4, 4, 1) // Yields: // ⎡0 1 0 0⎤ // ⎢0 0 1 0⎥ // ⎢0 0 0 1⎥ // ⎣0 0 0 0⎦ // // The index k can also be a negative number: // T = I(Float64, 4, 4, -1) // Yields: // ⎡0 0 0 0⎤ // ⎢1 0 0 0⎥ // ⎢0 1 0 0⎥ // ⎣0 0 1 0⎦ func I(dt Dtype, r, c, k int) *Dense { ret := New(Of(dt), WithShape(r, c)) i := k if k < 0 { i = (-k) * c } var s *Dense var err error end := c - k if end > r { s, err = sliceDense(ret, nil) } else { s, err = sliceDense(ret, rs{0, end, 1}) } if err != nil { panic(err) } var nexts []int iter := newFlatIterator(&s.AP) nexts, err = iter.Slice(rs{i, s.Size(), c + 1}) switch s.t.Kind() { case reflect.Int: data := s.Ints() for _, v := range nexts { data[v] = 1 } case reflect.Int8: data := s.Int8s() for _, v := range nexts { data[v] = 1 } case reflect.Int16: data := s.Int16s() for _, v := range nexts { data[v] = 1 } case reflect.Int32: data := s.Int32s() for _, v := range nexts { data[v] = 1 } case reflect.Int64: data := s.Int64s() for _, v := range nexts { data[v] = 1 } case reflect.Uint: data := s.Uints() for _, v := range nexts { data[v] = 1 } case reflect.Uint8: data := s.Uint8s() for _, v := range nexts { data[v] = 1 } case reflect.Uint16: data := s.Uint16s() for _, v := range nexts { data[v] = 1 } case reflect.Uint32: data := s.Uint32s() for _, v := range nexts { data[v] = 1 } case reflect.Uint64: data := s.Uint64s() for _, v := range nexts { data[v] = 1 } case reflect.Float32: data := s.Float32s() for _, v := range nexts { data[v] = 1 } case reflect.Float64: data := s.Float64s() for _, v := range nexts { data[v] = 1 } case reflect.Complex64: data := s.Complex64s() for _, v := range nexts { data[v] = 1 } case reflect.Complex128: data := s.Complex128s() for _, v := range nexts { data[v] = 1 } } // TODO: create Oner interface for custom types return ret } tensor-0.9.24/dense_generated_test.go000066400000000000000000000365241426512615100176370ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" "github.com/stretchr/testify/assert" ) var onesTests = []struct { of Dtype shape Shape correct interface{} }{ {Int, ScalarShape(), int(1)}, {Int, Shape{2, 2}, []int{1, 1, 1, 1}}, {Int8, ScalarShape(), int8(1)}, {Int8, Shape{2, 2}, []int8{1, 1, 1, 1}}, {Int16, ScalarShape(), int16(1)}, {Int16, Shape{2, 2}, []int16{1, 1, 1, 1}}, {Int32, ScalarShape(), int32(1)}, {Int32, Shape{2, 2}, []int32{1, 1, 1, 1}}, {Int64, ScalarShape(), int64(1)}, {Int64, Shape{2, 2}, []int64{1, 1, 1, 1}}, {Uint, ScalarShape(), uint(1)}, {Uint, Shape{2, 2}, []uint{1, 1, 1, 1}}, {Uint8, ScalarShape(), uint8(1)}, {Uint8, Shape{2, 2}, []uint8{1, 1, 1, 1}}, {Uint16, ScalarShape(), uint16(1)}, {Uint16, Shape{2, 2}, []uint16{1, 1, 1, 1}}, {Uint32, ScalarShape(), uint32(1)}, {Uint32, Shape{2, 2}, []uint32{1, 1, 1, 1}}, {Uint64, ScalarShape(), uint64(1)}, {Uint64, Shape{2, 2}, []uint64{1, 1, 1, 1}}, {Float32, ScalarShape(), float32(1)}, {Float32, Shape{2, 2}, []float32{1, 1, 1, 1}}, {Float64, ScalarShape(), float64(1)}, {Float64, Shape{2, 2}, []float64{1, 1, 1, 1}}, {Complex64, ScalarShape(), complex64(1)}, {Complex64, Shape{2, 2}, []complex64{1, 1, 1, 1}}, {Complex128, ScalarShape(), complex128(1)}, {Complex128, Shape{2, 2}, []complex128{1, 1, 1, 1}}, {Bool, ScalarShape(), true}, {Bool, Shape{2, 2}, []bool{true, true, true, true}}, } func TestOnes(t *testing.T) { assert := assert.New(t) for _, ot := range onesTests { T := Ones(ot.of, ot.shape...) assert.True(ot.shape.Eq(T.Shape())) assert.Equal(ot.correct, T.Data()) } } // yes, it's a pun on eye tests, stop asking and go see your optometrist var eyeTests = []struct { E Dtype R, C, K int correct interface{} }{ {Int, 4, 4, 0, []int{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Int, 4, 4, 1, []int{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Int, 4, 4, 2, []int{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int, 4, 4, 3, []int{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int, 4, 4, 4, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int, 4, 4, -1, []int{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Int, 4, 4, -2, []int{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Int, 4, 4, -3, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Int, 4, 4, -4, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int, 4, 5, 0, []int{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Int, 4, 5, 1, []int{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Int, 4, 5, -1, []int{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Int8, 4, 4, 0, []int8{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Int8, 4, 4, 1, []int8{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Int8, 4, 4, 2, []int8{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int8, 4, 4, 3, []int8{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int8, 4, 4, 4, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int8, 4, 4, -1, []int8{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Int8, 4, 4, -2, []int8{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Int8, 4, 4, -3, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Int8, 4, 4, -4, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int8, 4, 5, 0, []int8{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Int8, 4, 5, 1, []int8{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Int8, 4, 5, -1, []int8{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Int16, 4, 4, 0, []int16{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Int16, 4, 4, 1, []int16{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Int16, 4, 4, 2, []int16{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int16, 4, 4, 3, []int16{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int16, 4, 4, 4, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int16, 4, 4, -1, []int16{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Int16, 4, 4, -2, []int16{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Int16, 4, 4, -3, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Int16, 4, 4, -4, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int16, 4, 5, 0, []int16{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Int16, 4, 5, 1, []int16{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Int16, 4, 5, -1, []int16{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Int32, 4, 4, 0, []int32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Int32, 4, 4, 1, []int32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Int32, 4, 4, 2, []int32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int32, 4, 4, 3, []int32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int32, 4, 4, 4, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int32, 4, 4, -1, []int32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Int32, 4, 4, -2, []int32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Int32, 4, 4, -3, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Int32, 4, 4, -4, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int32, 4, 5, 0, []int32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Int32, 4, 5, 1, []int32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Int32, 4, 5, -1, []int32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Int64, 4, 4, 0, []int64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Int64, 4, 4, 1, []int64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Int64, 4, 4, 2, []int64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int64, 4, 4, 3, []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int64, 4, 4, 4, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int64, 4, 4, -1, []int64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Int64, 4, 4, -2, []int64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Int64, 4, 4, -3, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Int64, 4, 4, -4, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Int64, 4, 5, 0, []int64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Int64, 4, 5, 1, []int64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Int64, 4, 5, -1, []int64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Uint, 4, 4, 0, []uint{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Uint, 4, 4, 1, []uint{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Uint, 4, 4, 2, []uint{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint, 4, 4, 3, []uint{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint, 4, 4, 4, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint, 4, 4, -1, []uint{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Uint, 4, 4, -2, []uint{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Uint, 4, 4, -3, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Uint, 4, 4, -4, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint, 4, 5, 0, []uint{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Uint, 4, 5, 1, []uint{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Uint, 4, 5, -1, []uint{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Uint8, 4, 4, 0, []uint8{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Uint8, 4, 4, 1, []uint8{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Uint8, 4, 4, 2, []uint8{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint8, 4, 4, 3, []uint8{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint8, 4, 4, 4, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint8, 4, 4, -1, []uint8{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Uint8, 4, 4, -2, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Uint8, 4, 4, -3, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Uint8, 4, 4, -4, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint8, 4, 5, 0, []uint8{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Uint8, 4, 5, 1, []uint8{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Uint8, 4, 5, -1, []uint8{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Uint16, 4, 4, 0, []uint16{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Uint16, 4, 4, 1, []uint16{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Uint16, 4, 4, 2, []uint16{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint16, 4, 4, 3, []uint16{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint16, 4, 4, 4, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint16, 4, 4, -1, []uint16{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Uint16, 4, 4, -2, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Uint16, 4, 4, -3, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Uint16, 4, 4, -4, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint16, 4, 5, 0, []uint16{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Uint16, 4, 5, 1, []uint16{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Uint16, 4, 5, -1, []uint16{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Uint32, 4, 4, 0, []uint32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Uint32, 4, 4, 1, []uint32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Uint32, 4, 4, 2, []uint32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint32, 4, 4, 3, []uint32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint32, 4, 4, 4, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint32, 4, 4, -1, []uint32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Uint32, 4, 4, -2, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Uint32, 4, 4, -3, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Uint32, 4, 4, -4, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint32, 4, 5, 0, []uint32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Uint32, 4, 5, 1, []uint32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Uint32, 4, 5, -1, []uint32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Uint64, 4, 4, 0, []uint64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Uint64, 4, 4, 1, []uint64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Uint64, 4, 4, 2, []uint64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint64, 4, 4, 3, []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint64, 4, 4, 4, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint64, 4, 4, -1, []uint64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Uint64, 4, 4, -2, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Uint64, 4, 4, -3, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Uint64, 4, 4, -4, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Uint64, 4, 5, 0, []uint64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Uint64, 4, 5, 1, []uint64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Uint64, 4, 5, -1, []uint64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Float32, 4, 4, 0, []float32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Float32, 4, 4, 1, []float32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Float32, 4, 4, 2, []float32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float32, 4, 4, 3, []float32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float32, 4, 4, 4, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float32, 4, 4, -1, []float32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Float32, 4, 4, -2, []float32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Float32, 4, 4, -3, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Float32, 4, 4, -4, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float32, 4, 5, 0, []float32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Float32, 4, 5, 1, []float32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Float32, 4, 5, -1, []float32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Float64, 4, 4, 0, []float64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Float64, 4, 4, 1, []float64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Float64, 4, 4, 2, []float64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float64, 4, 4, 3, []float64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float64, 4, 4, 4, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float64, 4, 4, -1, []float64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Float64, 4, 4, -2, []float64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Float64, 4, 4, -3, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Float64, 4, 4, -4, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Float64, 4, 5, 0, []float64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Float64, 4, 5, 1, []float64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Float64, 4, 5, -1, []float64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Complex64, 4, 4, 0, []complex64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Complex64, 4, 4, 1, []complex64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Complex64, 4, 4, 2, []complex64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex64, 4, 4, 3, []complex64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex64, 4, 4, 4, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex64, 4, 4, -1, []complex64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Complex64, 4, 4, -2, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Complex64, 4, 4, -3, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Complex64, 4, 4, -4, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex64, 4, 5, 0, []complex64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Complex64, 4, 5, 1, []complex64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Complex64, 4, 5, -1, []complex64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {Complex128, 4, 4, 0, []complex128{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, {Complex128, 4, 4, 1, []complex128{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, {Complex128, 4, 4, 2, []complex128{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex128, 4, 4, 3, []complex128{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex128, 4, 4, 4, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex128, 4, 4, -1, []complex128{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, {Complex128, 4, 4, -2, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, {Complex128, 4, 4, -3, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, {Complex128, 4, 4, -4, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {Complex128, 4, 5, 0, []complex128{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, {Complex128, 4, 5, 1, []complex128{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, {Complex128, 4, 5, -1, []complex128{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, } func TestI(t *testing.T) { assert := assert.New(t) var T Tensor for i, it := range eyeTests { T = I(it.E, it.R, it.C, it.K) assert.True(Shape{it.R, it.C}.Eq(T.Shape())) assert.Equal(it.correct, T.Data(), "Test %d-R: %d, C: %d K: %d", i, it.R, it.C, it.K) } } tensor-0.9.24/dense_getset_test.go000066400000000000000000000157201426512615100171670ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "reflect" "testing" "testing/quick" "github.com/stretchr/testify/assert" ) var denseSetGetTests = []struct { of Dtype data interface{} set interface{} correct []interface{} }{ {Bool, []bool{true, false, true, false, true, false}, false, []interface{}{bool(true), bool(false), bool(true), bool(false), bool(true), bool(false)}}, {Int, []int{0, 1, 2, 3, 4, 5}, 45, []interface{}{int(0), int(1), int(2), int(3), int(4), int(5)}}, {Int8, []int8{0, 1, 2, 3, 4, 5}, 45, []interface{}{int8(0), int8(1), int8(2), int8(3), int8(4), int8(5)}}, {Int16, []int16{0, 1, 2, 3, 4, 5}, 45, []interface{}{int16(0), int16(1), int16(2), int16(3), int16(4), int16(5)}}, {Int32, []int32{0, 1, 2, 3, 4, 5}, 45, []interface{}{int32(0), int32(1), int32(2), int32(3), int32(4), int32(5)}}, {Int64, []int64{0, 1, 2, 3, 4, 5}, 45, []interface{}{int64(0), int64(1), int64(2), int64(3), int64(4), int64(5)}}, {Uint, []uint{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint(0), uint(1), uint(2), uint(3), uint(4), uint(5)}}, {Uint8, []uint8{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint8(0), uint8(1), uint8(2), uint8(3), uint8(4), uint8(5)}}, {Uint16, []uint16{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint16(0), uint16(1), uint16(2), uint16(3), uint16(4), uint16(5)}}, {Uint32, []uint32{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint32(0), uint32(1), uint32(2), uint32(3), uint32(4), uint32(5)}}, {Uint64, []uint64{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint64(0), uint64(1), uint64(2), uint64(3), uint64(4), uint64(5)}}, {Float32, []float32{0, 1, 2, 3, 4, 5}, 45, []interface{}{float32(0), float32(1), float32(2), float32(3), float32(4), float32(5)}}, {Float64, []float64{0, 1, 2, 3, 4, 5}, 45, []interface{}{float64(0), float64(1), float64(2), float64(3), float64(4), float64(5)}}, {Complex64, []complex64{0, 1, 2, 3, 4, 5}, 45, []interface{}{complex64(0), complex64(1), complex64(2), complex64(3), complex64(4), complex64(5)}}, {Complex128, []complex128{0, 1, 2, 3, 4, 5}, 45, []interface{}{complex128(0), complex128(1), complex128(2), complex128(3), complex128(4), complex128(5)}}, {String, []string{"zero", "one", "two", "three", "four", "five"}, "HELLO WORLD", []interface{}{string("zero"), string("one"), string("two"), string("three"), string("four"), string("five")}}, } func TestDense_setget(t *testing.T) { assert := assert.New(t) for _, gts := range denseSetGetTests { T := New(Of(gts.of), WithShape(len(gts.correct))) for i, v := range gts.correct { T.Set(i, v) got := T.Get(i) assert.Equal(v, got) } } } var denseMemsetTests = []struct { of Dtype data interface{} val interface{} shape Shape correct interface{} }{ {Bool, []bool{true, false, true, false, true, false}, bool(false), Shape{2, 3}, []bool{false, false, false, false, false, false}}, {Int, []int{0, 1, 2, 3, 4, 5}, int(45), Shape{2, 3}, []int{45, 45, 45, 45, 45, 45}}, {Int8, []int8{0, 1, 2, 3, 4, 5}, int8(45), Shape{2, 3}, []int8{45, 45, 45, 45, 45, 45}}, {Int16, []int16{0, 1, 2, 3, 4, 5}, int16(45), Shape{2, 3}, []int16{45, 45, 45, 45, 45, 45}}, {Int32, []int32{0, 1, 2, 3, 4, 5}, int32(45), Shape{2, 3}, []int32{45, 45, 45, 45, 45, 45}}, {Int64, []int64{0, 1, 2, 3, 4, 5}, int64(45), Shape{2, 3}, []int64{45, 45, 45, 45, 45, 45}}, {Uint, []uint{0, 1, 2, 3, 4, 5}, uint(45), Shape{2, 3}, []uint{45, 45, 45, 45, 45, 45}}, {Uint8, []uint8{0, 1, 2, 3, 4, 5}, uint8(45), Shape{2, 3}, []uint8{45, 45, 45, 45, 45, 45}}, {Uint16, []uint16{0, 1, 2, 3, 4, 5}, uint16(45), Shape{2, 3}, []uint16{45, 45, 45, 45, 45, 45}}, {Uint32, []uint32{0, 1, 2, 3, 4, 5}, uint32(45), Shape{2, 3}, []uint32{45, 45, 45, 45, 45, 45}}, {Uint64, []uint64{0, 1, 2, 3, 4, 5}, uint64(45), Shape{2, 3}, []uint64{45, 45, 45, 45, 45, 45}}, {Float32, []float32{0, 1, 2, 3, 4, 5}, float32(45), Shape{2, 3}, []float32{45, 45, 45, 45, 45, 45}}, {Float64, []float64{0, 1, 2, 3, 4, 5}, float64(45), Shape{2, 3}, []float64{45, 45, 45, 45, 45, 45}}, {Complex64, []complex64{0, 1, 2, 3, 4, 5}, complex64(45), Shape{2, 3}, []complex64{45, 45, 45, 45, 45, 45}}, {Complex128, []complex128{0, 1, 2, 3, 4, 5}, complex128(45), Shape{2, 3}, []complex128{45, 45, 45, 45, 45, 45}}, {String, []string{"zero", "one", "two", "three", "four", "five"}, string("HELLO WORLD"), Shape{2, 3}, []string{"HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD"}}, } func TestDense_memset(t *testing.T) { assert := assert.New(t) for _, mts := range denseMemsetTests { T := New(Of(mts.of), WithShape(mts.shape...)) T.Memset(mts.val) assert.Equal(mts.correct, T.Data()) T = New(Of(mts.of), WithShape(mts.shape...), WithBacking(mts.data)) T2, _ := T.Slice(nil) T2.Memset(mts.val) assert.Equal(mts.correct, T2.Data()) } } var denseZeroTests = []struct { of Dtype data interface{} correct interface{} }{ {Bool, []bool{true, false, true, false, true, false}, []bool{false, false, false, false, false, false}}, {Int, []int{0, 1, 2, 3, 4, 5}, []int{0, 0, 0, 0, 0, 0}}, {Int8, []int8{0, 1, 2, 3, 4, 5}, []int8{0, 0, 0, 0, 0, 0}}, {Int16, []int16{0, 1, 2, 3, 4, 5}, []int16{0, 0, 0, 0, 0, 0}}, {Int32, []int32{0, 1, 2, 3, 4, 5}, []int32{0, 0, 0, 0, 0, 0}}, {Int64, []int64{0, 1, 2, 3, 4, 5}, []int64{0, 0, 0, 0, 0, 0}}, {Uint, []uint{0, 1, 2, 3, 4, 5}, []uint{0, 0, 0, 0, 0, 0}}, {Uint8, []uint8{0, 1, 2, 3, 4, 5}, []uint8{0, 0, 0, 0, 0, 0}}, {Uint16, []uint16{0, 1, 2, 3, 4, 5}, []uint16{0, 0, 0, 0, 0, 0}}, {Uint32, []uint32{0, 1, 2, 3, 4, 5}, []uint32{0, 0, 0, 0, 0, 0}}, {Uint64, []uint64{0, 1, 2, 3, 4, 5}, []uint64{0, 0, 0, 0, 0, 0}}, {Float32, []float32{0, 1, 2, 3, 4, 5}, []float32{0, 0, 0, 0, 0, 0}}, {Float64, []float64{0, 1, 2, 3, 4, 5}, []float64{0, 0, 0, 0, 0, 0}}, {Complex64, []complex64{0, 1, 2, 3, 4, 5}, []complex64{0, 0, 0, 0, 0, 0}}, {Complex128, []complex128{0, 1, 2, 3, 4, 5}, []complex128{0, 0, 0, 0, 0, 0}}, {String, []string{"zero", "one", "two", "three", "four", "five"}, []string{"", "", "", "", "", ""}}, } func TestDense_Zero(t *testing.T) { assert := assert.New(t) for _, mts := range denseZeroTests { typ := reflect.TypeOf(mts.data) val := reflect.ValueOf(mts.data) data := reflect.MakeSlice(typ, val.Len(), val.Cap()) reflect.Copy(data, val) T := New(Of(mts.of), WithBacking(data.Interface())) T.Zero() assert.Equal(mts.correct, T.Data()) T = New(Of(mts.of), WithBacking(mts.data)) T2, _ := T.Slice(nil) T2.Zero() assert.Equal(mts.correct, T2.Data()) } } func TestDense_Eq(t *testing.T) { eqFn := func(q *Dense) bool { a := q.Clone().(*Dense) if !q.Eq(a) { t.Error("Expected a clone to be exactly equal") return false } a.Zero() // Bools are excluded because the probability of having an array of all false is very high if q.Eq(a) && a.len() > 3 && a.Dtype() != Bool { t.Errorf("a %v", a.Data()) t.Errorf("q %v", q.Data()) t.Error("Expected *Dense to be not equal") return false } return true } if err := quick.Check(eqFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Failed to perform equality checks") } } tensor-0.9.24/dense_io.go000066400000000000000000000477161426512615100152560ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "bytes" "encoding/binary" "encoding/csv" "encoding/gob" "fmt" "io" "reflect" "regexp" "strconv" "strings" flatbuffers "github.com/google/flatbuffers/go" "github.com/pkg/errors" "gorgonia.org/tensor/internal/serialization/fb" "gorgonia.org/tensor/internal/serialization/pb" ) /* GOB SERIALIZATION */ // GobEncode implements gob.GobEncoder func (t *Dense) GobEncode() (p []byte, err error) { var buf bytes.Buffer encoder := gob.NewEncoder(&buf) if err = encoder.Encode(t.Shape()); err != nil { return } if err = encoder.Encode(t.Strides()); err != nil { return } if err = encoder.Encode(t.AP.o); err != nil { return } if err = encoder.Encode(t.AP.Δ); err != nil { return } if err = encoder.Encode(t.mask); err != nil { return } data := t.Data() if err = encoder.Encode(&data); err != nil { return } return buf.Bytes(), err } // GobDecode implements gob.GobDecoder func (t *Dense) GobDecode(p []byte) (err error) { buf := bytes.NewBuffer(p) decoder := gob.NewDecoder(buf) var shape Shape if err = decoder.Decode(&shape); err != nil { return } var strides []int if err = decoder.Decode(&strides); err != nil { return } var o DataOrder var tr Triangle if err = decoder.Decode(&o); err == nil { if err = decoder.Decode(&tr); err != nil { return } } t.AP.Init(shape, strides) t.AP.o = o t.AP.Δ = tr var mask []bool if err = decoder.Decode(&mask); err != nil { return } var data interface{} if err = decoder.Decode(&data); err != nil { return } t.fromSlice(data) t.addMask(mask) t.fix() if t.e == nil { t.e = StdEng{} } return t.sanity() } /* NPY SERIALIZATION */ var npyDescRE = regexp.MustCompile(`'descr':\s*'([^']*)'`) var rowOrderRE = regexp.MustCompile(`'fortran_order':\s*(False|True)`) var shapeRE = regexp.MustCompile(`'shape':\s*\(([^\(]*)\)`) type binaryWriter struct { io.Writer err error seq int } func (w *binaryWriter) w(x interface{}) { if w.err != nil { return } w.err = binary.Write(w, binary.LittleEndian, x) w.seq++ } func (w *binaryWriter) Err() error { if w.err == nil { return nil } return errors.Wrapf(w.err, "Sequence %d", w.seq) } type binaryReader struct { io.Reader err error seq int } func (r *binaryReader) Read(data interface{}) { if r.err != nil { return } r.err = binary.Read(r.Reader, binary.LittleEndian, data) r.seq++ } func (r *binaryReader) Err() error { if r.err == nil { return nil } return errors.Wrapf(r.err, "Sequence %d", r.seq) } // WriteNpy writes the *Tensor as a numpy compatible serialized file. // // The format is very well documented here: // http://docs.scipy.org/doc/numpy/neps/npy-format.html // // Gorgonia specifically uses Version 1.0, as 65535 bytes should be more than enough for the headers. // The values are written in little endian order, because let's face it - // 90% of the world's computers are running on x86+ processors. // // This method does not close the writer. Closing (if needed) is deferred to the caller // If tensor is masked, invalid values are replaced by the default fill value. func (t *Dense) WriteNpy(w io.Writer) (err error) { var npdt string if npdt, err = t.t.numpyDtype(); err != nil { return } var header string if t.Dims() == 1 { // when t is a 1D vector, numpy expects "(N,)" instead of "(N)" which t.Shape() returns. header = "{'descr': '<%v', 'fortran_order': False, 'shape': (%d,)}" header = fmt.Sprintf(header, npdt, t.Shape()[0]) } else { header = "{'descr': '<%v', 'fortran_order': False, 'shape': %v}" header = fmt.Sprintf(header, npdt, t.Shape()) } padding := 16 - ((10 + len(header)) % 16) if padding > 0 { header = header + strings.Repeat(" ", padding) } bw := binaryWriter{Writer: w} bw.Write([]byte("\x93NUMPY")) // stupid magic bw.w(byte(1)) // major version bw.w(byte(0)) // minor version bw.w(uint16(len(header))) // 4 bytes to denote header length if err = bw.Err(); err != nil { return err } bw.Write([]byte(header)) bw.seq = 0 if t.IsMasked() { fillval := t.FillValue() it := FlatMaskedIteratorFromDense(t) for i, err := it.Next(); err == nil; i, err = it.Next() { if t.mask[i] { bw.w(fillval) } else { bw.w(t.Get(i)) } } } else { for i := 0; i < t.len(); i++ { bw.w(t.Get(i)) } } return bw.Err() } // ReadNpy reads NumPy formatted files into a *Dense func (t *Dense) ReadNpy(r io.Reader) (err error) { br := binaryReader{Reader: r} var magic [6]byte if br.Read(magic[:]); string(magic[:]) != "\x93NUMPY" { return errors.Errorf("Not a numpy file. Got %q as the magic number instead", string(magic[:])) } var version, minor byte if br.Read(&version); version != 1 { return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)") } if br.Read(&minor); minor != 0 { return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)") } var headerLen uint16 br.Read(&headerLen) header := make([]byte, int(headerLen)) br.Read(header) if err = br.Err(); err != nil { return } // extract stuff from header var match [][]byte if match = npyDescRE.FindSubmatch(header); match == nil { return errors.New("No dtype information in npy file") } // TODO: check for endianness. For now we assume everything is little endian if t.t, err = fromNumpyDtype(string(match[1][1:])); err != nil { return } if match = rowOrderRE.FindSubmatch(header); match == nil { return errors.New("No Row Order information found in the numpy file") } if string(match[1]) != "False" { return errors.New("Cannot yet read from Fortran Ordered Numpy files") } if match = shapeRE.FindSubmatch(header); match == nil { return errors.New("No shape information found in npy file") } sizesStr := strings.Split(string(match[1]), ",") var shape Shape for _, s := range sizesStr { s = strings.Trim(s, " ") if len(s) == 0 { break } var size int if size, err = strconv.Atoi(s); err != nil { return } shape = append(shape, size) } size := shape.TotalSize() if t.e == nil { t.e = StdEng{} } t.makeArray(size) switch t.t.Kind() { case reflect.Int: data := t.Ints() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Int8: data := t.Int8s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Int16: data := t.Int16s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Int32: data := t.Int32s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Int64: data := t.Int64s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Uint: data := t.Uints() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Uint8: data := t.Uint8s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Uint16: data := t.Uint16s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Uint32: data := t.Uint32s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Uint64: data := t.Uint64s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Float32: data := t.Float32s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Float64: data := t.Float64s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Complex64: data := t.Complex64s() for i := 0; i < size; i++ { br.Read(&data[i]) } case reflect.Complex128: data := t.Complex128s() for i := 0; i < size; i++ { br.Read(&data[i]) } } if err = br.Err(); err != nil { return err } t.AP.zeroWithDims(len(shape)) t.setShape(shape...) t.fix() return t.sanity() } /* CSV SERIALIZATION */ // WriteCSV writes the *Dense to a CSV. It accepts an optional string formatting ("%v", "%f", etc...), which controls what is written to the CSV. // If tensor is masked, invalid values are replaced by the default fill value. func (t *Dense) WriteCSV(w io.Writer, formats ...string) (err error) { // checks: if !t.IsMatrix() { // error err = errors.Errorf("Cannot write *Dense to CSV. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape()) return } format := "%v" if len(formats) > 0 { format = formats[0] } cw := csv.NewWriter(w) it := IteratorFromDense(t) coord := it.Coord() // rows := t.Shape()[0] cols := t.Shape()[1] record := make([]string, 0, cols) var i, k, lastCol int isMasked := t.IsMasked() fillval := t.FillValue() fillstr := fmt.Sprintf(format, fillval) for i, err = it.Next(); err == nil; i, err = it.Next() { record = append(record, fmt.Sprintf(format, t.Get(i))) if isMasked { if t.mask[i] { record[k] = fillstr } k++ } if lastCol == cols-1 { if err = cw.Write(record); err != nil { // TODO: wrap errors return } cw.Flush() record = record[:0] } // cleanup switch { case t.IsRowVec(): // lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] case t.IsColVec(): // lastRow = coord[len(coord)-1] lastCol = coord[len(coord)-2] case t.IsVector(): lastCol = coord[len(coord)-1] default: // lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] } } return nil } // convFromStrs converts a []string to a slice of the Dtype provided. It takes a provided backing slice. // If into is nil, then a backing slice will be created. func convFromStrs(to Dtype, record []string, into interface{}) (interface{}, error) { var err error switch to.Kind() { case reflect.Int: retVal := make([]int, len(record)) var backing []int if into == nil { backing = make([]int, 0, len(record)) } else { backing = into.([]int) } for i, v := range record { var i64 int64 if i64, err = strconv.ParseInt(v, 10, 0); err != nil { return nil, err } retVal[i] = int(i64) } backing = append(backing, retVal...) return backing, nil case reflect.Int8: retVal := make([]int8, len(record)) var backing []int8 if into == nil { backing = make([]int8, 0, len(record)) } else { backing = into.([]int8) } for i, v := range record { var i64 int64 if i64, err = strconv.ParseInt(v, 10, 8); err != nil { return nil, err } retVal[i] = int8(i64) } backing = append(backing, retVal...) return backing, nil case reflect.Int16: retVal := make([]int16, len(record)) var backing []int16 if into == nil { backing = make([]int16, 0, len(record)) } else { backing = into.([]int16) } for i, v := range record { var i64 int64 if i64, err = strconv.ParseInt(v, 10, 16); err != nil { return nil, err } retVal[i] = int16(i64) } backing = append(backing, retVal...) return backing, nil case reflect.Int32: retVal := make([]int32, len(record)) var backing []int32 if into == nil { backing = make([]int32, 0, len(record)) } else { backing = into.([]int32) } for i, v := range record { var i64 int64 if i64, err = strconv.ParseInt(v, 10, 32); err != nil { return nil, err } retVal[i] = int32(i64) } backing = append(backing, retVal...) return backing, nil case reflect.Int64: retVal := make([]int64, len(record)) var backing []int64 if into == nil { backing = make([]int64, 0, len(record)) } else { backing = into.([]int64) } for i, v := range record { var i64 int64 if i64, err = strconv.ParseInt(v, 10, 64); err != nil { return nil, err } retVal[i] = int64(i64) } backing = append(backing, retVal...) return backing, nil case reflect.Uint: retVal := make([]uint, len(record)) var backing []uint if into == nil { backing = make([]uint, 0, len(record)) } else { backing = into.([]uint) } for i, v := range record { var u uint64 if u, err = strconv.ParseUint(v, 10, 0); err != nil { return nil, err } retVal[i] = uint(u) } backing = append(backing, retVal...) return backing, nil case reflect.Uint8: retVal := make([]uint8, len(record)) var backing []uint8 if into == nil { backing = make([]uint8, 0, len(record)) } else { backing = into.([]uint8) } for i, v := range record { var u uint64 if u, err = strconv.ParseUint(v, 10, 8); err != nil { return nil, err } retVal[i] = uint8(u) } backing = append(backing, retVal...) return backing, nil case reflect.Uint16: retVal := make([]uint16, len(record)) var backing []uint16 if into == nil { backing = make([]uint16, 0, len(record)) } else { backing = into.([]uint16) } for i, v := range record { var u uint64 if u, err = strconv.ParseUint(v, 10, 16); err != nil { return nil, err } retVal[i] = uint16(u) } backing = append(backing, retVal...) return backing, nil case reflect.Uint32: retVal := make([]uint32, len(record)) var backing []uint32 if into == nil { backing = make([]uint32, 0, len(record)) } else { backing = into.([]uint32) } for i, v := range record { var u uint64 if u, err = strconv.ParseUint(v, 10, 32); err != nil { return nil, err } retVal[i] = uint32(u) } backing = append(backing, retVal...) return backing, nil case reflect.Uint64: retVal := make([]uint64, len(record)) var backing []uint64 if into == nil { backing = make([]uint64, 0, len(record)) } else { backing = into.([]uint64) } for i, v := range record { var u uint64 if u, err = strconv.ParseUint(v, 10, 64); err != nil { return nil, err } retVal[i] = uint64(u) } backing = append(backing, retVal...) return backing, nil case reflect.Float32: retVal := make([]float32, len(record)) var backing []float32 if into == nil { backing = make([]float32, 0, len(record)) } else { backing = into.([]float32) } for i, v := range record { var f float64 if f, err = strconv.ParseFloat(v, 32); err != nil { return nil, err } retVal[i] = float32(f) } backing = append(backing, retVal...) return backing, nil case reflect.Float64: retVal := make([]float64, len(record)) var backing []float64 if into == nil { backing = make([]float64, 0, len(record)) } else { backing = into.([]float64) } for i, v := range record { if retVal[i], err = strconv.ParseFloat(v, 64); err != nil { return nil, err } } backing = append(backing, retVal...) return backing, nil case reflect.String: var backing []string if into == nil { backing = make([]string, 0, len(record)) } else { backing = into.([]string) } backing = append(backing, record...) return backing, nil default: return nil, errors.Errorf(methodNYI, "convFromStrs", to) } } // ReadCSV reads a CSV into a *Dense. It will override the underlying data. // // BUG(chewxy): reading CSV doesn't handle CSVs with different columns per row yet. func (t *Dense) ReadCSV(r io.Reader, opts ...FuncOpt) (err error) { fo := ParseFuncOpts(opts...) as := fo.As() if as.Type == nil { as = Float64 } cr := csv.NewReader(r) var record []string var rows, cols int var backing interface{} for { record, err = cr.Read() if err == io.EOF { break } else if err != nil { return } if backing, err = convFromStrs(as, record, backing); err != nil { return } cols = len(record) rows++ } t.fromSlice(backing) t.AP.zero() t.AP.SetShape(rows, cols) return nil return errors.Errorf("not yet handled") } /* FB SERIALIZATION */ // FBEncode encodes to a byte slice using flatbuffers. // // Only natively accessible data can be encided func (t *Dense) FBEncode() ([]byte, error) { builder := flatbuffers.NewBuilder(1024) fb.DenseStartShapeVector(builder, len(t.shape)) for i := len(t.shape) - 1; i >= 0; i-- { builder.PrependInt32(int32(t.shape[i])) } shape := builder.EndVector(len(t.shape)) fb.DenseStartStridesVector(builder, len(t.strides)) for i := len(t.strides) - 1; i >= 0; i-- { builder.PrependInt32(int32(t.strides[i])) } strides := builder.EndVector(len(t.strides)) var o uint32 switch { case t.o.IsRowMajor() && t.o.IsContiguous(): o = 0 case t.o.IsRowMajor() && !t.o.IsContiguous(): o = 1 case t.o.IsColMajor() && t.o.IsContiguous(): o = 2 case t.o.IsColMajor() && !t.o.IsContiguous(): o = 3 } var triangle int32 switch t.Δ { case NotTriangle: triangle = fb.TriangleNOT_TRIANGLE case Upper: triangle = fb.TriangleUPPER case Lower: triangle = fb.TriangleLOWER case Symmetric: triangle = fb.TriangleSYMMETRIC } dt := builder.CreateString(t.Dtype().String()) data := t.byteSlice() fb.DenseStartDataVector(builder, len(data)) for i := len(data) - 1; i >= 0; i-- { builder.PrependUint8(data[i]) } databyte := builder.EndVector(len(data)) fb.DenseStart(builder) fb.DenseAddShape(builder, shape) fb.DenseAddStrides(builder, strides) fb.DenseAddO(builder, o) fb.DenseAddT(builder, triangle) fb.DenseAddType(builder, dt) fb.DenseAddData(builder, databyte) serialized := fb.DenseEnd(builder) builder.Finish(serialized) return builder.FinishedBytes(), nil } // FBDecode decodes a byteslice from a flatbuffer table into a *Dense func (t *Dense) FBDecode(buf []byte) error { serialized := fb.GetRootAsDense(buf, 0) o := serialized.O() switch o { case 0: t.o = 0 case 1: t.o = MakeDataOrder(NonContiguous) case 2: t.o = MakeDataOrder(ColMajor) case 3: t.o = MakeDataOrder(ColMajor, NonContiguous) } tri := serialized.T() switch tri { case fb.TriangleNOT_TRIANGLE: t.Δ = NotTriangle case fb.TriangleUPPER: t.Δ = Upper case fb.TriangleLOWER: t.Δ = Lower case fb.TriangleSYMMETRIC: t.Δ = Symmetric } t.shape = Shape(BorrowInts(serialized.ShapeLength())) for i := 0; i < serialized.ShapeLength(); i++ { t.shape[i] = int(int32(serialized.Shape(i))) } t.strides = BorrowInts(serialized.StridesLength()) for i := 0; i < serialized.ShapeLength(); i++ { t.strides[i] = int(serialized.Strides(i)) } typ := string(serialized.Type()) for _, dt := range allTypes.set { if dt.String() == typ { t.t = dt break } } if t.e == nil { t.e = StdEng{} } t.makeArray(t.shape.TotalSize()) // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, serialized.DataBytes()) t.fix() return t.sanity() } /* PB SERIALIZATION */ // PBEncode encodes the Dense into a protobuf byte slice. func (t *Dense) PBEncode() ([]byte, error) { var toSerialize pb.Dense toSerialize.Shape = make([]int32, len(t.shape)) for i, v := range t.shape { toSerialize.Shape[i] = int32(v) } toSerialize.Strides = make([]int32, len(t.strides)) for i, v := range t.strides { toSerialize.Strides[i] = int32(v) } switch { case t.o.IsRowMajor() && t.o.IsContiguous(): toSerialize.O = pb.RowMajorContiguous case t.o.IsRowMajor() && !t.o.IsContiguous(): toSerialize.O = pb.RowMajorNonContiguous case t.o.IsColMajor() && t.o.IsContiguous(): toSerialize.O = pb.ColMajorContiguous case t.o.IsColMajor() && !t.o.IsContiguous(): toSerialize.O = pb.ColMajorNonContiguous } toSerialize.T = pb.Triangle(t.Δ) toSerialize.Type = t.t.String() data := t.byteSlice() toSerialize.Data = make([]byte, len(data)) copy(toSerialize.Data, data) return toSerialize.Marshal() } // PBDecode unmarshalls a protobuf byteslice into a *Dense. func (t *Dense) PBDecode(buf []byte) error { var toSerialize pb.Dense if err := toSerialize.Unmarshal(buf); err != nil { return err } t.shape = make(Shape, len(toSerialize.Shape)) for i, v := range toSerialize.Shape { t.shape[i] = int(v) } t.strides = make([]int, len(toSerialize.Strides)) for i, v := range toSerialize.Strides { t.strides[i] = int(v) } switch toSerialize.O { case pb.RowMajorContiguous: case pb.RowMajorNonContiguous: t.o = MakeDataOrder(NonContiguous) case pb.ColMajorContiguous: t.o = MakeDataOrder(ColMajor) case pb.ColMajorNonContiguous: t.o = MakeDataOrder(ColMajor, NonContiguous) } t.Δ = Triangle(toSerialize.T) typ := string(toSerialize.Type) for _, dt := range allTypes.set { if dt.String() == typ { t.t = dt break } } if t.e == nil { t.e = StdEng{} } t.makeArray(t.shape.TotalSize()) // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, toSerialize.Data) return t.sanity() } tensor-0.9.24/dense_io_test.go000066400000000000000000000164751426512615100163130ustar00rootroot00000000000000package tensor import ( "bytes" "encoding/gob" "io/ioutil" "os" "os/exec" "regexp" "testing" "github.com/stretchr/testify/assert" ) func TestSaveLoadNumpy(t *testing.T) { if os.Getenv("CI_NO_PYTHON") == "true" { t.Skip("skipping test; This is being run on a CI tool that does not have Python") } assert := assert.New(t) T := New(WithShape(2, 2), WithBacking([]float64{1, 5, 10, -1})) // also checks the 1D Vector. T1D := New(WithShape(4), WithBacking([]float64{1, 5, 10, -1})) f, _ := os.OpenFile("test.npy", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) f1D, _ := os.OpenFile("test1D.npy", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) T.WriteNpy(f) f.Close() T1D.WriteNpy(f1D) f1D.Close() defer func() { // cleanup err := os.Remove("test.npy") if err != nil { t.Error(err) } err = os.Remove("test1D.npy") if err != nil { t.Error(err) } }() script := "import numpy as np\nx = np.load('test.npy')\nprint(x)\nx = np.load('test1D.npy')\nprint(x)" // Configurable python command, in order to be able to use python or python3 pythonCommand := os.Getenv("PYTHON_COMMAND") if pythonCommand == "" { pythonCommand = "python" } cmd := exec.Command(pythonCommand) stdin, err := cmd.StdinPipe() if err != nil { t.Error(err) } stderr, err := cmd.StderrPipe() if err != nil { t.Error(err) } go func() { defer stdin.Close() stdin.Write([]byte(script)) }() buf := new(bytes.Buffer) cmd.Stdout = buf if err = cmd.Start(); err != nil { t.Error(err) t.Logf("Do you have a python with numpy installed? You can change the python interpreter by setting the environment variable PYTHON_COMMAND. Current value: PYTHON_COMMAND=%s", pythonCommand) } importError := `ImportError: No module named numpy` slurpErr, _ := ioutil.ReadAll(stderr) if ok, _ := regexp.Match(importError, slurpErr); ok { t.Skipf("Skipping numpy test. It would appear that you do not have Numpy installed.") } if err := cmd.Wait(); err != nil { t.Errorf("%q", err.Error()) } expected := `\[\[\s*1\.\s*5\.\]\n \[\s*10\.\s*-1\.\]\]\n` if ok, _ := regexp.Match(expected, buf.Bytes()); !ok { t.Errorf("Did not successfully read numpy file, \n%q\n%q", buf.String(), expected) } // ok now to test if it can read T2 := new(Dense) buf = new(bytes.Buffer) T.WriteNpy(buf) if err = T2.ReadNpy(buf); err != nil { t.Fatal(err) } assert.Equal(T.Shape(), T2.Shape()) assert.Equal(T.Strides(), T2.Strides()) assert.Equal(T.Data(), T2.Data()) // ok now to test if it can read 1D T1D2 := new(Dense) buf = new(bytes.Buffer) T1D.WriteNpy(buf) if err = T1D2.ReadNpy(buf); err != nil { t.Fatal(err) } assert.Equal(T1D.Shape(), T1D2.Shape()) assert.Equal(T1D.Strides(), T1D2.Strides()) assert.Equal(T1D.Data(), T1D2.Data()) // try with masked array. masked elements should be filled with default value T.ResetMask(false) T.mask[0] = true T3 := new(Dense) buf = new(bytes.Buffer) T.WriteNpy(buf) if err = T3.ReadNpy(buf); err != nil { t.Fatal(err) } assert.Equal(T.Shape(), T3.Shape()) assert.Equal(T.Strides(), T3.Strides()) data := T.Float64s() data[0] = T.FillValue().(float64) assert.Equal(data, T3.Data()) // try with 1D masked array. masked elements should be filled with default value T1D.ResetMask(false) T1D.mask[0] = true T1D3 := new(Dense) buf = new(bytes.Buffer) T1D.WriteNpy(buf) if err = T1D3.ReadNpy(buf); err != nil { t.Fatal(err) } assert.Equal(T1D.Shape(), T1D3.Shape()) assert.Equal(T1D.Strides(), T1D3.Strides()) data = T1D.Float64s() data[0] = T1D.FillValue().(float64) assert.Equal(data, T1D3.Data()) } func TestSaveLoadCSV(t *testing.T) { assert := assert.New(t) for _, gtd := range serializationTestData { if _, ok := gtd.([]complex64); ok { continue } if _, ok := gtd.([]complex128); ok { continue } buf := new(bytes.Buffer) T := New(WithShape(2, 2), WithBacking(gtd)) if err := T.WriteCSV(buf); err != nil { t.Error(err) } T2 := new(Dense) if err := T2.ReadCSV(buf, As(T.t)); err != nil { t.Error(err) } assert.Equal(T.Shape(), T2.Shape(), "Test: %v", gtd) assert.Equal(T.Data(), T2.Data()) } T := New(WithShape(2, 2), WithBacking([]float64{1, 5, 10, -1})) f, _ := os.OpenFile("test.csv", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) T.WriteCSV(f) f.Close() // cleanup err := os.Remove("test.csv") if err != nil { t.Error(err) } // try with masked array. masked elements should be filled with default value T.ResetMask(false) T.mask[0] = true f, _ = os.OpenFile("test.csv", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) T.WriteCSV(f) f.Close() // cleanup again err = os.Remove("test.csv") if err != nil { t.Error(err) } } var serializationTestData = []interface{}{ []int{1, 5, 10, -1}, []int8{1, 5, 10, -1}, []int16{1, 5, 10, -1}, []int32{1, 5, 10, -1}, []int64{1, 5, 10, -1}, []uint{1, 5, 10, 255}, []uint8{1, 5, 10, 255}, []uint16{1, 5, 10, 255}, []uint32{1, 5, 10, 255}, []uint64{1, 5, 10, 255}, []float32{1, 5, 10, -1}, []float64{1, 5, 10, -1}, []complex64{1, 5, 10, -1}, []complex128{1, 5, 10, -1}, []string{"hello", "world", "hello", "世界"}, } func TestDense_GobEncodeDecode(t *testing.T) { assert := assert.New(t) var err error for _, gtd := range serializationTestData { buf := new(bytes.Buffer) encoder := gob.NewEncoder(buf) decoder := gob.NewDecoder(buf) T := New(WithShape(2, 2), WithBacking(gtd)) if err = encoder.Encode(T); err != nil { t.Errorf("Error while encoding %v: %v", gtd, err) continue } T2 := new(Dense) if err = decoder.Decode(T2); err != nil { t.Errorf("Error while decoding %v: %v", gtd, err) continue } assert.Equal(T.Shape(), T2.Shape()) assert.Equal(T.Strides(), T2.Strides()) assert.Equal(T.Data(), T2.Data()) // try with masked array. masked elements should be filled with default value buf = new(bytes.Buffer) encoder = gob.NewEncoder(buf) decoder = gob.NewDecoder(buf) T.ResetMask(false) T.mask[0] = true assert.True(T.IsMasked()) if err = encoder.Encode(T); err != nil { t.Errorf("Error while encoding %v: %v", gtd, err) continue } T3 := new(Dense) if err = decoder.Decode(T3); err != nil { t.Errorf("Error while decoding %v: %v", gtd, err) continue } assert.Equal(T.Shape(), T3.Shape()) assert.Equal(T.Strides(), T3.Strides()) assert.Equal(T.Data(), T3.Data()) assert.Equal(T.mask, T3.mask) } } func TestDense_FBEncodeDecode(t *testing.T) { assert := assert.New(t) for _, gtd := range serializationTestData { T := New(WithShape(2, 2), WithBacking(gtd)) buf, err := T.FBEncode() if err != nil { t.Errorf("UNPOSSIBLE!: %v", err) continue } T2 := new(Dense) if err = T2.FBDecode(buf); err != nil { t.Errorf("Error while decoding %v: %v", gtd, err) continue } assert.Equal(T.Shape(), T2.Shape()) assert.Equal(T.Strides(), T2.Strides()) assert.Equal(T.Data(), T2.Data()) // TODO: MASKED ARRAY } } func TestDense_PBEncodeDecode(t *testing.T) { assert := assert.New(t) for _, gtd := range serializationTestData { T := New(WithShape(2, 2), WithBacking(gtd)) buf, err := T.PBEncode() if err != nil { t.Errorf("UNPOSSIBLE!: %v", err) continue } T2 := new(Dense) if err = T2.PBDecode(buf); err != nil { t.Errorf("Error while decoding %v: %v", gtd, err) continue } assert.Equal(T.Shape(), T2.Shape()) assert.Equal(T.Strides(), T2.Strides()) assert.Equal(T.Data(), T2.Data()) // TODO: MASKED ARRAY } } tensor-0.9.24/dense_linalg.go000066400000000000000000000273401426512615100161040ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) // Trace returns the trace of the matrix (i.e. the sum of the diagonal elements). It only works for matrices func (t *Dense) Trace() (retVal interface{}, err error) { e := t.e if tracer, ok := e.(Tracer); ok { return tracer.Trace(t) } return nil, errors.Errorf("Engine %T does not support Trace", e) } // Inner performs a dot product on two vectors. If t or other are not vectors, it will return an error. func (t *Dense) Inner(other Tensor) (retVal interface{}, err error) { // check that the data is a float if err = typeclassCheck(t.t, floatcmplxTypes); err != nil { return nil, errors.Wrapf(err, unsupportedDtype, t.t, "Inner") } // check both are vectors if !t.Shape().IsVector() || !other.Shape().IsVector() { return nil, errors.Errorf("Inner only works when there are two vectors. t's Shape: %v; other's Shape %v", t.Shape(), other.Shape()) } // we do this check instead of the more common t.Shape()[1] != other.Shape()[0], // basically to ensure a similarity with numpy's dot and vectors. if t.len() != other.DataSize() { return nil, errors.Errorf(shapeMismatch, t.Shape(), other.Shape()) } e := t.e switch ip := e.(type) { case InnerProderF32: return ip.Inner(t, other) case InnerProderF64: return ip.Inner(t, other) case InnerProder: return ip.Inner(t, other) } return nil, errors.Errorf("Engine does not support Inner()") } // MatVecMul performs a matrix-vector multiplication. func (t *Dense) MatVecMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) { // check that it's a matrix x vector if t.Dims() != 2 || !other.Shape().IsVector() { err = errors.Errorf("MatVecMul requires t be a matrix and other to be a vector. Got t's shape: %v, other's shape: %v", t.Shape(), other.Shape()) return } // checks that t is mxn matrix m := t.Shape()[0] n := t.Shape()[1] // check shape var odim int oshape := other.Shape() switch { case oshape.IsColVec(): odim = oshape[0] case oshape.IsRowVec(): odim = oshape[1] case oshape.IsVector(): odim = oshape[0] default: err = errors.Errorf(shapeMismatch, t.Shape(), other.Shape()) // should be unreachable return } if odim != n { err = errors.Errorf(shapeMismatch, n, other.Shape()) return } expectedShape := Shape{m} // check whether retVal has the same size as the resulting matrix would be: mx1 fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil { err = errors.Wrapf(err, opFail, "MatVecMul") return } if retVal == nil { retVal = recycledDense(t.t, expectedShape, WithEngine(t.e)) if t.o.IsColMajor() { AsFortran(nil)(retVal) } } e := t.e if mvm, ok := e.(MatVecMuler); ok { if err = mvm.MatVecMul(t, other, retVal); err != nil { return nil, errors.Wrapf(err, opFail, "MatVecMul") } return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape) } return nil, errors.New("engine does not support MatVecMul") } // MatMul is the basic matrix multiplication that you learned in high school. It takes an optional reuse ndarray, where the ndarray is reused as the result. // If that isn't passed in, a new ndarray will be created instead. func (t *Dense) MatMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) { // check that both are matrices if !t.Shape().IsMatrix() || !other.Shape().IsMatrix() { err = errors.Errorf("MatMul requires both operands to be matrices. Got t's shape: %v, other's shape: %v", t.Shape(), other.Shape()) return } // checks that t is mxk matrix var m, n, k int m = t.Shape()[0] k = t.Shape()[1] n = other.Shape()[1] // check shape if k != other.Shape()[0] { err = errors.Errorf(shapeMismatch, t.Shape(), other.Shape()) return } // check whether retVal has the same size as the resulting matrix would be: mxn expectedShape := Shape{m, n} fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil { err = errors.Wrapf(err, opFail, "MatMul") return } if retVal == nil { retVal = recycledDense(t.t, expectedShape, WithEngine(t.e)) if t.o.IsColMajor() { AsFortran(nil)(retVal) } } e := t.e if mm, ok := e.(MatMuler); ok { if err = mm.MatMul(t, other, retVal); err != nil { return } return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape) } return nil, errors.New("engine does not support MatMul") } // Outer finds the outer product of two vectors func (t *Dense) Outer(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) { // check both are vectors if !t.Shape().IsVector() || !other.Shape().IsVector() { err = errors.Errorf("Outer only works when there are two vectors. t's shape: %v. other's shape: %v", t.Shape(), other.Shape()) return } m := t.Size() n := other.Size() // check whether retVal has the same size as the resulting matrix would be: mxn expectedShape := Shape{m, n} fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil { err = errors.Wrapf(err, opFail, "Outer") return } if retVal == nil { retVal = recycledDense(t.t, expectedShape, WithEngine(t.e)) if t.o.IsColMajor() { AsFortran(nil)(retVal) } } e := t.e // DGER does not have any beta. So the values have to be zeroed first if the tensor is to be reused retVal.Zero() if op, ok := e.(OuterProder); ok { if err = op.Outer(t, other, retVal); err != nil { return nil, errors.Wrapf(err, opFail, "engine.uter") } return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape) } return nil, errors.New("engine does not support Outer") } // TensorMul is for multiplying Tensors with more than 2 dimensions. // // The algorithm is conceptually simple (but tricky to get right): // 1. Transpose and reshape the Tensors in such a way that both t and other are 2D matrices // 2. Use DGEMM to multiply them // 3. Reshape the results to be the new expected result // // This function is a Go implementation of Numpy's tensordot method. It simplifies a lot of what Numpy does. func (t *Dense) TensorMul(other Tensor, axesA, axesB []int) (retVal *Dense, err error) { ts := t.Shape() td := len(ts) os := other.Shape() od := len(os) na := len(axesA) nb := len(axesB) sameLength := na == nb if sameLength { for i := 0; i < na; i++ { if ts[axesA[i]] != os[axesB[i]] { sameLength = false break } if axesA[i] < 0 { axesA[i] += td } if axesB[i] < 0 { axesB[i] += od } } } if !sameLength { err = errors.Errorf(shapeMismatch, ts, os) return } // handle shapes var notins []int for i := 0; i < td; i++ { notin := true for _, a := range axesA { if i == a { notin = false break } } if notin { notins = append(notins, i) } } newAxesA := BorrowInts(len(notins) + len(axesA)) defer ReturnInts(newAxesA) newAxesA = newAxesA[:0] newAxesA = append(notins, axesA...) n2 := 1 for _, a := range axesA { n2 *= ts[a] } newShapeT := Shape(BorrowInts(2)) defer ReturnInts(newShapeT) newShapeT[0] = ts.TotalSize() / n2 newShapeT[1] = n2 retShape1 := BorrowInts(len(ts)) defer ReturnInts(retShape1) retShape1 = retShape1[:0] for _, ni := range notins { retShape1 = append(retShape1, ts[ni]) } // work on other now notins = notins[:0] for i := 0; i < od; i++ { notin := true for _, a := range axesB { if i == a { notin = false break } } if notin { notins = append(notins, i) } } newAxesB := BorrowInts(len(notins) + len(axesB)) defer ReturnInts(newAxesB) newAxesB = newAxesB[:0] newAxesB = append(axesB, notins...) newShapeO := Shape(BorrowInts(2)) defer ReturnInts(newShapeO) newShapeO[0] = n2 newShapeO[1] = os.TotalSize() / n2 retShape2 := BorrowInts(len(ts)) retShape2 = retShape2[:0] for _, ni := range notins { retShape2 = append(retShape2, os[ni]) } // we borrowClone because we don't want to touch the original Tensors doT := t.Clone().(*Dense) doOther := other.Clone().(*Dense) defer ReturnTensor(doT) defer ReturnTensor(doOther) if err = doT.T(newAxesA...); err != nil { return } doT.Transpose() // we have to materialize the transpose first or the underlying data won't be changed and the reshape that follows would be meaningless if err = doT.Reshape(newShapeT...); err != nil { return } if err = doOther.T(newAxesB...); err != nil { return } doOther.Transpose() if err = doOther.Reshape(newShapeO...); err != nil { return } // the magic happens here var rt Tensor if rt, err = Dot(doT, doOther); err != nil { return } retVal = rt.(*Dense) retShape := BorrowInts(len(retShape1) + len(retShape2)) defer ReturnInts(retShape) retShape = retShape[:0] retShape = append(retShape, retShape1...) retShape = append(retShape, retShape2...) if len(retShape) == 0 { // In case a scalar is returned, it should be returned as shape = {1} retShape = append(retShape, 1) } if err = retVal.Reshape(retShape...); err != nil { return } return } // SVD does the Single Value Decomposition for the *Dense. // // How it works is it temporarily converts the *Dense into a gonum/mat64 matrix, and uses Gonum's SVD function to perform the SVD. // In the future, when gonum/lapack fully supports float32, we'll look into rewriting this func (t *Dense) SVD(uv, full bool) (s, u, v *Dense, err error) { e := t.Engine() if svder, ok := e.(SVDer); ok { var sT, uT, vT Tensor if sT, uT, vT, err = svder.SVD(t, uv, full); err != nil { return nil, nil, nil, errors.Wrap(err, "Error while performing *Dense.SVD") } if s, err = assertDense(sT); err != nil { return nil, nil, nil, errors.Wrapf(err, "sT is not *Dense (uv %t full %t). Got %T instead", uv, full, sT) } // if not uv and not full, u can be nil if u, err = assertDense(uT); err != nil && !(!uv && !full) { return nil, nil, nil, errors.Wrapf(err, "uT is not *Dense (uv %t full %t). Got %T instead", uv, full, uT) } // if not uv and not full, v can be nil if v, err = assertDense(vT); err != nil && !(!uv && !full) { return nil, nil, nil, errors.Wrapf(err, "vT is not *Dense (uv %t full %t). Got %T instead", uv, full, vT) } return s, u, v, nil } return nil, nil, nil, errors.New("Engine does not support SVD") } /* UTILITY FUNCTIONS */ // handleReuse extracts a *Dense from Tensor, and checks the shape of the reuse Tensor func handleReuse(reuse Tensor, expectedShape Shape, safe bool) (retVal *Dense, err error) { if reuse != nil { if retVal, err = assertDense(reuse); err != nil { err = errors.Wrapf(err, opFail, "handling reuse") return } if !safe { return } if err = reuseCheckShape(retVal, expectedShape); err != nil { err = errors.Wrapf(err, "Unable to process reuse *Dense Tensor. Shape error.") return } return } return } // handleIncr is the cleanup step for when there is an Tensor to increment. If the result tensor is the same as the reuse Tensor, the result tensor gets returned to the pool func handleIncr(res *Dense, reuse, incr Tensor, expectedShape Shape) (retVal *Dense, err error) { // handle increments if incr != nil { if !expectedShape.Eq(incr.Shape()) { err = errors.Errorf(shapeMismatch, expectedShape, incr.Shape()) return } var incrD *Dense var ok bool if incrD, ok = incr.(*Dense); !ok { err = errors.Errorf(extractionFail, "*Dense", incr) return } if err = typeclassCheck(incrD.t, numberTypes); err != nil { err = errors.Wrapf(err, "handleIncr only handles Number types. Got %v instead", incrD.t) return } if _, err = incrD.Add(res, UseUnsafe()); err != nil { return } // vecAdd(incr.data, retVal.data) // return retVal to pool - if and only if retVal is not reuse // reuse indicates that someone else also has the reference to the *Dense if res != reuse { ReturnTensor(res) } // then retVal = incrD return } return res, nil } tensor-0.9.24/dense_linalg_test.go000066400000000000000000001002041426512615100171320ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" "gorgonia.org/vecf64" ) type linalgTest struct { a, b interface{} shapeA, shapeB Shape transA, transB bool reuse, incr interface{} shapeR, shapeI Shape correct interface{} correctIncr interface{} correctIncrReuse interface{} correctShape Shape err bool errIncr bool errReuse bool } var traceTests = []struct { data interface{} correct interface{} err bool }{ {[]int{0, 1, 2, 3, 4, 5}, int(4), false}, {[]int8{0, 1, 2, 3, 4, 5}, int8(4), false}, {[]int16{0, 1, 2, 3, 4, 5}, int16(4), false}, {[]int32{0, 1, 2, 3, 4, 5}, int32(4), false}, {[]int64{0, 1, 2, 3, 4, 5}, int64(4), false}, {[]uint{0, 1, 2, 3, 4, 5}, uint(4), false}, {[]uint8{0, 1, 2, 3, 4, 5}, uint8(4), false}, {[]uint16{0, 1, 2, 3, 4, 5}, uint16(4), false}, {[]uint32{0, 1, 2, 3, 4, 5}, uint32(4), false}, {[]uint64{0, 1, 2, 3, 4, 5}, uint64(4), false}, {[]float32{0, 1, 2, 3, 4, 5}, float32(4), false}, {[]float64{0, 1, 2, 3, 4, 5}, float64(4), false}, {[]complex64{0, 1, 2, 3, 4, 5}, complex64(4), false}, {[]complex128{0, 1, 2, 3, 4, 5}, complex128(4), false}, {[]bool{true, false, true, false, true, false}, nil, true}, } func TestDense_Trace(t *testing.T) { assert := assert.New(t) for i, tts := range traceTests { T := New(WithBacking(tts.data), WithShape(2, 3)) trace, err := T.Trace() if checkErr(t, tts.err, err, "Trace", i) { continue } assert.Equal(tts.correct, trace) // T = New(WithBacking(tts.data)) _, err = T.Trace() if err == nil { t.Error("Expected an error when Trace() on non-matrices") } } } var innerTests = []struct { a, b interface{} shapeA, shapeB Shape correct interface{} err bool }{ {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3, 1}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3, 1}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{1, 3}, float64(5), false}, {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{1, 3}, float64(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3, 1}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3, 1}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{1, 3}, float32(5), false}, {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{1, 3}, float32(5), false}, // stupids: type differences {Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float32, 0, 3), Range(Byte, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, nil, true}, {Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true}, // differing size {Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{4}, Shape{3}, nil, true}, // A is not a matrix {Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{2, 2}, Shape{3}, nil, true}, } func TestDense_Inner(t *testing.T) { for i, its := range innerTests { a := New(WithShape(its.shapeA...), WithBacking(its.a)) b := New(WithShape(its.shapeB...), WithBacking(its.b)) T, err := a.Inner(b) if checkErr(t, its.err, err, "Inner", i) { continue } assert.Equal(t, its.correct, T) } } var matVecMulTests = []linalgTest{ // Float64s {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false}, // float64s with transposed matrix {Range(Float64, 0, 6), Range(Float64, 0, 2), Shape{2, 3}, Shape{2}, true, false, Range(Float64, 52, 55), Range(Float64, 100, 103), Shape{3}, Shape{3}, []float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{3}, false, false, false}, // Float32s {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, {Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false, Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2}, []float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false}, // stupids : unpossible shapes (wrong A) {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{6}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad A shape {Range(Float64, 0, 8), Range(Float64, 0, 3), Shape{4, 2}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad B shape {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, //stupids: bad reuse {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 55), Range(Float64, 100, 102), Shape{3}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true}, //stupids: bad incr shape {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 105), Shape{2}, Shape{5}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, // stupids: type mismatch A and B {Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B {Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch A and B (non-Float) {Range(Float64, 0, 6), Range(Int, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false}, // stupids: type mismatch, reuse {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float32, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true}, // stupids: type mismatch, incr {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), Range(Float32, 100, 103), Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, // stupids: type mismatch, incr not a Number {Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false, Range(Float64, 52, 54), []bool{true, true, true}, Shape{2}, Shape{3}, []float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false}, } func TestDense_MatVecMul(t *testing.T) { assert := assert.New(t) for i, mvmt := range matVecMulTests { a := New(WithBacking(mvmt.a), WithShape(mvmt.shapeA...)) b := New(WithBacking(mvmt.b), WithShape(mvmt.shapeB...)) if mvmt.transA { if err := a.T(); err != nil { t.Error(err) continue } } T, err := a.MatVecMul(b) if checkErr(t, mvmt.err, err, "Safe", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsRowMajor()) assert.Equal(mvmt.correct, T.Data()) // incr incr := New(WithBacking(mvmt.incr), WithShape(mvmt.shapeI...)) T, err = a.MatVecMul(b, WithIncr(incr)) if checkErr(t, mvmt.errIncr, err, "WithIncr", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsRowMajor()) assert.Equal(mvmt.correctIncr, T.Data()) // reuse reuse := New(WithBacking(mvmt.reuse), WithShape(mvmt.shapeR...)) T, err = a.MatVecMul(b, WithReuse(reuse)) if checkErr(t, mvmt.errReuse, err, "WithReuse", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.True(T.DataOrder().IsRowMajor()) assert.Equal(mvmt.correct, T.Data()) // reuse AND incr T, err = a.MatVecMul(b, WithIncr(incr), WithReuse(reuse)) if checkErr(t, mvmt.err, err, "WithReuse and WithIncr", i) { continue } assert.True(mvmt.correctShape.Eq(T.Shape())) assert.Equal(mvmt.correctIncrReuse, T.Data()) } } var matMulTests = []linalgTest{ // Float64s {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, false}, // Float32s {Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float32, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float32{10, 13, 28, 40}, []float32{110, 114, 130, 143}, []float32{120, 127, 158, 183}, Shape{2, 2}, false, false, false}, // Edge cases - Row Vecs (Float64) {Range(Float64, 0, 2), Range(Float64, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false, Range(Float64, 10, 16), Range(Float64, 100, 106), Shape{2, 3}, Shape{2, 3}, []float64{0, 0, 0, 0, 1, 2}, []float64{100, 101, 102, 103, 105, 107}, []float64{100, 101, 102, 103, 106, 109}, Shape{2, 3}, false, false, false}, {Range(Float64, 0, 2), Range(Float64, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false, Range(Float64, 10, 13), Range(Float64, 100, 103), Shape{1, 3}, Shape{1, 3}, []float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{1, 3}, false, false, false}, {Range(Float64, 0, 2), Range(Float64, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false, Range(Float64, 0, 1), Range(Float64, 100, 101), Shape{1, 1}, Shape{1, 1}, []float64{1}, []float64{101}, []float64{102}, Shape{1, 1}, false, false, false}, // Edge cases - Row Vecs (Float32) {Range(Float32, 0, 2), Range(Float32, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false, Range(Float32, 10, 16), Range(Float32, 100, 106), Shape{2, 3}, Shape{2, 3}, []float32{0, 0, 0, 0, 1, 2}, []float32{100, 101, 102, 103, 105, 107}, []float32{100, 101, 102, 103, 106, 109}, Shape{2, 3}, false, false, false}, {Range(Float32, 0, 2), Range(Float32, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false, Range(Float32, 10, 13), Range(Float32, 100, 103), Shape{1, 3}, Shape{1, 3}, []float32{3, 4, 5}, []float32{103, 105, 107}, []float32{106, 109, 112}, Shape{1, 3}, false, false, false}, {Range(Float32, 0, 2), Range(Float32, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false, Range(Float32, 0, 1), Range(Float32, 100, 101), Shape{1, 1}, Shape{1, 1}, []float32{1}, []float32{101}, []float32{102}, Shape{1, 1}, false, false, false}, // stupids - bad shape (not matrices): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids - bad shape (incompatible shapes): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6, 1}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids - bad shape (bad reuse shape): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 57), Range(Float64, 100, 104), Shape{5}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, true}, // stupids - bad shape (bad incr shape): {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{4}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, true, false}, // stupids - type mismatch (a,b) {Range(Float64, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids - type mismatch (a,b) {Range(Float32, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids type mismatch (b not float) {Range(Float64, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids type mismatch (a not float) {Range(Int, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false}, // stupids: type mismatch (incr) {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, true, false}, // stupids: type mismatch (reuse) {Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float32, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2}, []float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, true}, // stupids: type mismatch (reuse) {Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false, Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2}, []float32{10, 13, 28, 40}, []float32{110, 114, 130, 143}, []float32{120, 127, 158, 183}, Shape{2, 2}, false, false, true}, } func TestDense_MatMul(t *testing.T) { assert := assert.New(t) for i, mmt := range matMulTests { a := New(WithBacking(mmt.a), WithShape(mmt.shapeA...)) b := New(WithBacking(mmt.b), WithShape(mmt.shapeB...)) T, err := a.MatMul(b) if checkErr(t, mmt.err, err, "Safe", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correct, T.Data()) // incr incr := New(WithBacking(mmt.incr), WithShape(mmt.shapeI...)) T, err = a.MatMul(b, WithIncr(incr)) if checkErr(t, mmt.errIncr, err, "WithIncr", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correctIncr, T.Data()) // reuse reuse := New(WithBacking(mmt.reuse), WithShape(mmt.shapeR...)) T, err = a.MatMul(b, WithReuse(reuse)) if checkErr(t, mmt.errReuse, err, "WithReuse", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correct, T.Data()) // reuse AND incr T, err = a.MatMul(b, WithIncr(incr), WithReuse(reuse)) if checkErr(t, mmt.err, err, "WithIncr and WithReuse", i) { continue } assert.True(mmt.correctShape.Eq(T.Shape())) assert.Equal(mmt.correctIncrReuse, T.Data()) } } var outerTests = []linalgTest{ // Float64s {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, false, false, false}, // Float32s {Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float32, 52, 61), Range(Float32, 100, 109), Shape{3, 3}, Shape{3, 3}, []float32{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float32{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float32{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, false, false, false}, // stupids - a or b not vector {Range(Float64, 0, 3), Range(Float64, 0, 6), Shape{3}, Shape{3, 2}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, true, false, false}, // stupids - bad incr shape {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 106), Shape{3, 3}, Shape{3, 2}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, false, true, false}, // stupids - bad reuse shape {Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 58), Range(Float64, 100, 109), Shape{3, 2}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, false, false, true}, // stupids - b not Float {Range(Float64, 0, 3), Range(Int, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, true, false, false}, // stupids - a not Float {Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, true, false, false}, // stupids - a-b type mismatch {Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, true, false, false}, // stupids a-b type mismatch {Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false, Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3}, []float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3}, true, false, false}, } func TestDense_Outer(t *testing.T) { assert := assert.New(t) for i, ot := range outerTests { a := New(WithBacking(ot.a), WithShape(ot.shapeA...)) b := New(WithBacking(ot.b), WithShape(ot.shapeB...)) T, err := a.Outer(b) if checkErr(t, ot.err, err, "Safe", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.Equal(ot.correct, T.Data()) // incr incr := New(WithBacking(ot.incr), WithShape(ot.shapeI...)) T, err = a.Outer(b, WithIncr(incr)) if checkErr(t, ot.errIncr, err, "WithIncr", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.Equal(ot.correctIncr, T.Data()) // reuse reuse := New(WithBacking(ot.reuse), WithShape(ot.shapeR...)) T, err = a.Outer(b, WithReuse(reuse)) if checkErr(t, ot.errReuse, err, "WithReuse", i) { continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.Equal(ot.correct, T.Data()) // reuse AND incr T, err = a.Outer(b, WithIncr(incr), WithReuse(reuse)) if err != nil { t.Errorf("Reuse and Incr error'd %+v", err) continue } assert.True(ot.correctShape.Eq(T.Shape())) assert.Equal(ot.correctIncrReuse, T.Data()) } } var tensorMulTests = []struct { a, b interface{} shapeA, shapeB Shape reuse, incr interface{} shapeR, shapeI Shape correct interface{} correctIncr interface{} correctIncrReuse interface{} correctShape Shape err bool errIncr bool errReuse bool axesA, axesB []int }{ {a: Range(Float64, 0, 60), b: Range(Float64, 0, 24), shapeA: Shape{3, 4, 5}, shapeB: Shape{4, 3, 2}, axesA: []int{1, 0}, axesB: []int{0, 1}, correct: []float64{4400, 4730, 4532, 4874, 4664, 5018, 4796, 5162, 4928, 5306}, correctShape: Shape{5, 2}}, } func TestDense_TensorMul(t *testing.T) { assert := assert.New(t) for i, tmt := range tensorMulTests { a := New(WithShape(tmt.shapeA...), WithBacking(tmt.a)) b := New(WithShape(tmt.shapeB...), WithBacking(tmt.b)) T, err := a.TensorMul(b, tmt.axesA, tmt.axesB) if checkErr(t, tmt.err, err, "Safe", i) { continue } assert.True(tmt.correctShape.Eq(T.Shape())) assert.Equal(tmt.correct, T.Data()) } } func TestDot(t *testing.T) { assert := assert.New(t) var a, b, c, r Tensor var A, B, R, R2 Tensor var s, s2 Tensor var incr Tensor var err error var expectedShape Shape var expectedData []float64 var expectedScalar float64 // vector-vector t.Log("Vec⋅Vec") a = New(Of(Float64), WithShape(3, 1), WithBacking(Range(Float64, 0, 3))) b = New(Of(Float64), WithShape(3, 1), WithBacking(Range(Float64, 0, 3))) r, err = Dot(a, b) expectedShape = Shape{1} expectedScalar = float64(5) assert.Nil(err) assert.Equal(expectedScalar, r.Data()) assert.True(ScalarShape().Eq(r.Shape())) // vector-mat (which is the same as matᵀ*vec) t.Log("Vec⋅Mat dot, should be equal to Aᵀb") A = New(Of(Float64), WithShape(3, 2), WithBacking(Range(Float64, 0, 6))) R, err = Dot(b, A) expectedShape = Shape{2} expectedData = []float64{10, 13} assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // mat-mat t.Log("Mat⋅Mat") A = New(Of(Float64), WithShape(4, 5), WithBacking(Range(Float64, 0, 20))) B = New(Of(Float64), WithShape(5, 10), WithBacking(Range(Float64, 2, 52))) R, err = Dot(A, B) expectedShape = Shape{4, 10} expectedData = []float64{ 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 870, 905, 940, 975, 1010, 1045, 1080, 1115, 1150, 1185, 1420, 1480, 1540, 1600, 1660, 1720, 1780, 1840, 1900, 1960, 1970, 2055, 2140, 2225, 2310, 2395, 2480, 2565, 2650, 2735, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // T-T t.Log("3T⋅3T") A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24))) B = New(Of(Float64), WithShape(3, 4, 2), WithBacking(Range(Float64, 0, 24))) R, err = Dot(A, B) expectedShape = Shape{2, 3, 3, 2} expectedData = []float64{ 28, 34, 76, 82, 124, 130, 76, 98, 252, 274, 428, 450, 124, 162, 428, 466, 732, 770, // 172, 226, 604, 658, 1036, 1090, 220, 290, 780, 850, 1340, 1410, 268, 354, 956, 1042, 1644, 1730, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // T-T t.Log("3T⋅4T") A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24))) B = New(Of(Float64), WithShape(2, 3, 4, 5), WithBacking(Range(Float64, 0, 120))) R, err = Dot(A, B) expectedShape = Shape{2, 3, 2, 3, 5} expectedData = []float64{ 70, 76, 82, 88, 94, 190, 196, 202, 208, 214, 310, 316, 322, 328, 334, 430, 436, 442, 448, 454, 550, 556, 562, 568, 574, 670, 676, 682, 688, 694, 190, 212, 234, 256, 278, 630, 652, 674, 696, 718, 1070, 1092, 1114, 1136, 1158, 1510, 1532, 1554, 1576, 1598, 1950, 1972, 1994, 2016, 2038, 2390, 2412, 2434, 2456, 2478, 310, 348, 386, 424, 462, 1070, 1108, 1146, 1184, 1222, 1830, 1868, 1906, 1944, 1982, 2590, 2628, 2666, 2704, 2742, 3350, 3388, 3426, 3464, 3502, 4110, 4148, 4186, 4224, 4262, 430, 484, 538, 592, 646, 1510, 1564, 1618, 1672, 1726, 2590, 2644, 2698, 2752, 2806, 3670, 3724, 3778, 3832, 3886, 4750, 4804, 4858, 4912, 4966, 5830, 5884, 5938, 5992, 6046, 550, 620, 690, 760, 830, 1950, 2020, 2090, 2160, 2230, 3350, 3420, 3490, 3560, 3630, 4750, 4820, 4890, 4960, 5030, 6150, 6220, 6290, 6360, 6430, 7550, 7620, 7690, 7760, 7830, 670, 756, 842, 928, 1014, 2390, 2476, 2562, 2648, 2734, 4110, 4196, 4282, 4368, 4454, 5830, 5916, 6002, 6088, 6174, 7550, 7636, 7722, 7808, 7894, 9270, 9356, 9442, 9528, 9614, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // T-v t.Log("3T⋅Vec") b = New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4))) R, err = Dot(A, b) expectedShape = Shape{2, 3} expectedData = []float64{ 14, 38, 62, 86, 110, 134, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // v-T t.Log("Vec⋅3T") R2, err = Dot(b, B) expectedShape = Shape{2, 3, 5} expectedData = []float64{ 70, 76, 82, 88, 94, 190, 196, 202, 208, 214, 310, 316, 322, 328, 334, 430, 436, 442, 448, 454, 550, 556, 562, 568, 574, 670, 676, 682, 688, 694, } assert.Nil(err) assert.Equal(expectedData, R2.Data()) assert.Equal(expectedShape, R2.Shape()) // m-3T t.Log("Mat⋅3T") A = New(Of(Float64), WithShape(2, 4), WithBacking(Range(Float64, 0, 8))) B = New(Of(Float64), WithShape(2, 4, 5), WithBacking(Range(Float64, 0, 40))) R, err = Dot(A, B) expectedShape = Shape{2, 2, 5} expectedData = []float64{ 70, 76, 82, 88, 94, 190, 196, 202, 208, 214, 190, 212, 234, 256, 278, 630, 652, 674, 696, 718, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // test reuse // m-v with reuse t.Log("Mat⋅Vec with reuse") R = New(Of(Float64), WithShape(2)) R2, err = Dot(A, b, WithReuse(R)) expectedShape = Shape{2} expectedData = []float64{14, 38} assert.Nil(err) assert.Equal(R, R2) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // 3T-vec with reuse t.Logf("3T⋅vec with reuse") R = New(Of(Float64), WithShape(6)) A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24))) R2, err = Dot(A, b, WithReuse(R)) expectedShape = Shape{2, 3} expectedData = []float64{ 14, 38, 62, 86, 110, 134, } assert.Nil(err) assert.Equal(R, R2) assert.Equal(expectedData, R2.Data()) assert.Equal(expectedShape, R2.Shape()) // v-m t.Log("vec⋅Mat with reuse") R = New(Of(Float64), WithShape(2)) a = New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4))) B = New(Of(Float64), WithShape(4, 2), WithBacking(Range(Float64, 0, 8))) R2, err = Dot(a, B, WithReuse(R)) expectedShape = Shape{2} expectedData = []float64{28, 34} assert.Nil(err) assert.Equal(R, R2) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // test incr incrBack := make([]float64, 2) copy(incrBack, expectedData) incr = New(Of(Float64), WithBacking(incrBack), WithShape(2)) R, err = Dot(a, B, WithIncr(incr)) vecf64.Scale(expectedData, 2) assert.Nil(err) assert.Equal(incr, R) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // The Nearly Stupids s = New(FromScalar(5.0)) s2 = New(FromScalar(10.0)) R, err = Dot(s, s2) assert.Nil(err) assert.True(R.IsScalar()) assert.Equal(float64(50), R.Data()) R.Zero() R2, err = Dot(s, s2, WithReuse(R)) assert.Nil(err) assert.True(R2.IsScalar()) assert.Equal(float64(50), R2.Data()) R, err = Dot(s, A) expectedData = vecf64.Range(0, 24) vecf64.Scale(expectedData, 5) assert.Nil(err) assert.Equal(A.Shape(), R.Shape()) assert.Equal(expectedData, R.Data()) R.Zero() R2, err = Dot(s, A, WithReuse(R)) assert.Nil(err) assert.Equal(R, R2) assert.Equal(A.Shape(), R2.Shape()) assert.Equal(expectedData, R2.Data()) R, err = Dot(A, s) assert.Nil(err) assert.Equal(A.Shape(), R.Shape()) assert.Equal(expectedData, R.Data()) R.Zero() R2, err = Dot(A, s, WithReuse(R)) assert.Nil(err) assert.Equal(R, R2) assert.Equal(A.Shape(), R2.Shape()) assert.Equal(expectedData, R2.Data()) incr = New(Of(Float64), WithShape(R2.Shape()...)) copy(incr.Data().([]float64), expectedData) incr2 := incr.Clone().(*Dense) // backup a copy for the following test vecf64.Scale(expectedData, 2) R, err = Dot(A, s, WithIncr(incr)) assert.Nil(err) assert.Equal(incr, R) assert.Equal(A.Shape(), R.Shape()) assert.Equal(expectedData, R.Data()) incr = incr2 R, err = Dot(s, A, WithIncr(incr)) assert.Nil(err) assert.Equal(incr, R) assert.Equal(A.Shape(), R.Shape()) assert.Equal(expectedData, R.Data()) incr = New(Of(Float64), FromScalar(float64(50))) R, err = Dot(s, s2, WithIncr(incr)) assert.Nil(err) assert.Equal(R, incr) assert.True(R.IsScalar()) assert.Equal(float64(100), R.Data()) /* HERE BE STUPIDS */ // different sizes of vectors c = New(Of(Float64), WithShape(1, 100)) _, err = Dot(a, c) assert.NotNil(err) // vector mat, but with shape mismatch B = New(Of(Float64), WithShape(2, 3), WithBacking(Range(Float64, 0, 6))) _, err = Dot(b, B) assert.NotNil(err) // mat-mat but wrong reuse size A = New(Of(Float64), WithShape(2, 2)) R = New(Of(Float64), WithShape(5, 10)) _, err = Dot(A, B, WithReuse(R)) assert.NotNil(err) // mat-vec but wrong reuse size b = New(Of(Float64), WithShape(2)) _, err = Dot(A, b, WithReuse(R)) assert.NotNil(err) // T-T but misaligned shape A = New(Of(Float64), WithShape(2, 3, 4)) B = New(Of(Float64), WithShape(4, 2, 3)) _, err = Dot(A, B) assert.NotNil(err) } func TestOneDot(t *testing.T) { assert := assert.New(t) A := New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24))) b := New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4))) R, err := Dot(A, b) expectedShape := Shape{2, 3} expectedData := []float64{ 14, 38, 62, 86, 110, 134, } assert.Nil(err) assert.Equal(expectedData, R.Data()) assert.Equal(expectedShape, R.Shape()) // 3T-vec with reuse t.Logf("3T⋅vec with reuse") R.Zero() A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24))) R2, err := Dot(A, b, WithReuse(R)) expectedShape = Shape{2, 3} expectedData = []float64{ 14, 38, 62, 86, 110, 134, } assert.Nil(err) assert.Equal(R, R2) assert.Equal(expectedData, R2.Data()) assert.Equal(expectedShape, R2.Shape()) } tensor-0.9.24/dense_mapreduce.go000066400000000000000000000016161426512615100166010ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" // Apply applies a function to all the values in the tensor. func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) { var e Engine = t.e if e == nil { e = StdEng{} } if m, ok := e.(Mapper); ok { return m.Map(fn, t, opts...) } return nil, errors.Errorf("Execution engine %T for %v not a mapper", e, t) } // Reduce applies a reduction function and reduces the values along the given axis. func (t *Dense) Reduce(fn interface{}, axis int, defaultValue interface{}) (retVal *Dense, err error) { var e Engine = t.e if e == nil { e = StdEng{} } if rd, ok := e.(Reducer); ok { var val Tensor if val, err = rd.Reduce(fn, t, axis, defaultValue); err != nil { err = errors.Wrapf(err, opFail, "Dense.Reduce") return } retVal = val.(*Dense) return } return nil, errors.Errorf("Engine %v is not a Reducer", e) } tensor-0.9.24/dense_mask_filling.go000066400000000000000000000045611426512615100172750ustar00rootroot00000000000000package tensor import ( "unsafe" ) // FillValue returns the value used to fill the invalid entries of a masked array func (t *Dense) FillValue() interface{} { switch t.Dtype() { case Bool: return true case Int: return int(999999) case Int8: return int8(99) case Int16: return int16(9999) case Int32: return int32(999999) case Int64: return int64(999999) case Uint: return uint(999999) case Byte: return byte(99) case Uint8: return uint8(99) case Uint16: return uint16(9999) case Uint32: return uint32(999999) case Uint64: return uint64(999999) case Float32: return float32(1.0e20) case Float64: return float64(1.0e20) case Complex64: return complex64(1.0e20 + 0i) case Complex128: return complex128(1.0e20 + 0i) case String: return `N/A` case Uintptr: return uintptr(0x999999) case UnsafePointer: return unsafe.Pointer(nil) default: return nil } } // Filled returns a tensor with masked data replaced by default fill value, // or by optional passed value func (t *Dense) Filled(val ...interface{}) (interface{}, error) { tc := t.Clone().(*Dense) if !t.IsMasked() { return tc, nil } fillval := t.FillValue() if len(val) > 0 { fillval = val[0] } switch { case tc.IsScalar(): if tc.mask[0] { tc.Set(0, fillval) } case tc.IsRowVec() || tc.IsColVec(): sliceList := t.FlatMaskedContiguous() for i := range sliceList { tt, err := tc.Slice(nil, sliceList[i]) if err != nil { ts := tt.(*Dense) ts.Memset(fillval) } } default: it := IteratorFromDense(tc) for i, _, err := it.NextInvalid(); err == nil; i, _, err = it.NextInvalid() { tc.Set(i, fillval) } } return tc, nil } // FilledInplace replaces masked data with default fill value, // or by optional passed value func (t *Dense) FilledInplace(val ...interface{}) (interface{}, error) { if !t.IsMasked() { return t, nil } fillval := t.FillValue() if len(val) > 0 { fillval = val[0] } switch { case t.IsScalar(): if t.mask[0] { t.Set(0, fillval) } case t.IsRowVec() || t.IsColVec(): sliceList := t.FlatMaskedContiguous() for i := range sliceList { tt, err := t.Slice(nil, sliceList[i]) if err != nil { ts := tt.(*Dense) ts.Memset(fillval) } } default: it := IteratorFromDense(t) for i, _, err := it.NextInvalid(); err == nil; i, _, err = it.NextInvalid() { t.Set(i, fillval) } } return t, nil } tensor-0.9.24/dense_mask_inspection.go000066400000000000000000000143671426512615100200310ustar00rootroot00000000000000package tensor type maskedReduceFn func(Tensor) interface{} // MaskedReduce applies a reduction function of type maskedReduceFn to mask, and returns // either an int, or another array func MaskedReduce(t *Dense, retType Dtype, fn maskedReduceFn, axis ...int) interface{} { if len(axis) == 0 || t.IsVector() { return fn(t) } ax := axis[0] if ax >= t.Dims() { return -1 } // create object to be used for slicing slices := make([]Slice, t.Dims()) // calculate shape of tensor to be returned slices[ax] = makeRS(0, 0) tt, _ := t.Slice(slices...) ts := tt.(*Dense) retVal := NewDense(retType, ts.shape) //retVal is array to be returned it := NewIterator(retVal.Info()) // iterate through retVal slices[ax] = makeRS(0, t.shape[ax]) for _, err := it.Next(); err == nil; _, err = it.Next() { coord := it.Coord() k := 0 for d := range slices { if d != ax { slices[d] = makeRS(coord[k], coord[k]+1) k++ } else { slices[d] = nil } } tt, _ = t.Slice(slices...) ts = tt.(*Dense) retVal.SetAt(fn(ts), coord...) } return retVal } // MaskedAny returns True if any mask elements evaluate to True. // If object is not masked, returns false // !!! Not the same as numpy's, which looks at data elements and not at mask // Instead, equivalent to numpy ma.getmask(t).any(axis) func (t *Dense) MaskedAny(axis ...int) interface{} { return MaskedReduce(t, Bool, doMaskAny, axis...) } // MaskedAll returns True if all mask elements evaluate to True. // If object is not masked, returns false // !!! Not the same as numpy's, which looks at data elements and not at mask // Instead, equivalent to numpy ma.getmask(t).all(axis) func (t *Dense) MaskedAll(axis ...int) interface{} { return MaskedReduce(t, Bool, doMaskAll, axis...) } // MaskedCount counts the masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds func (t *Dense) MaskedCount(axis ...int) interface{} { return MaskedReduce(t, Int, doMaskCt, axis...) } // NonMaskedCount counts the non-masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds // MaskedCount counts the masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds func (t *Dense) NonMaskedCount(axis ...int) interface{} { return MaskedReduce(t, Int, doNonMaskCt, axis...) } func doMaskAll(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return false } m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if !v { return false } } } else { it := IteratorFromDense(t) i, _, _ := it.NextValid() if i != -1 { return false } } return true default: panic("Incompatible type") } } func doMaskAny(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return false } m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if v { return true } } } else { it := IteratorFromDense(t) i, _, _ := it.NextInvalid() if i != -1 { return true } } return false default: panic("Incompatible type") } } func doMaskCt(T Tensor) interface{} { switch t := T.(type) { case *Dense: // non masked case if !t.IsMasked() { return 0 } count := 0 m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if v { count++ } } } else { it := IteratorFromDense(t) for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { count++ } } return count default: panic("Incompatible type") } } func doNonMaskCt(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return t.Size() } return t.Size() - doMaskCt(t).(int) default: panic("Incompatible type") } } /* ----------- ************ Finding masked data ----------*/ // FlatNotMaskedContiguous is used to find contiguous unmasked data in a masked array. // Applies to a flattened version of the array. // Returns:A sorted sequence of slices (start index, end index). func (t *Dense) FlatNotMaskedContiguous() []Slice { sliceList := make([]Slice, 0, 4) it := IteratorFromDense(t) for start, _, err := it.NextValid(); err == nil; start, _, err = it.NextValid() { end, _, _ := it.NextInvalid() if end == -1 { end = t.Size() } sliceList = append(sliceList, makeRS(start, end)) } return sliceList } // FlatMaskedContiguous is used to find contiguous masked data in a masked array. // Applies to a flattened version of the array. // Returns:A sorted sequence of slices (start index, end index). func (t *Dense) FlatMaskedContiguous() []Slice { sliceList := make([]Slice, 0, 4) it := IteratorFromDense(t) for start, _, err := it.NextInvalid(); err == nil; start, _, err = it.NextInvalid() { end, _, _ := it.NextValid() if end == -1 { end = t.Size() } sliceList = append(sliceList, makeRS(start, end)) } return sliceList } // FlatNotMaskedEdges is used to find the indices of the first and last unmasked values // Applies to a flattened version of the array. // Returns: A pair of ints. -1 if all values are masked. func (t *Dense) FlatNotMaskedEdges() (int, int) { if !t.IsMasked() { return 0, t.Size() - 1 } var start, end int it := IteratorFromDense(t) it.SetForward() start, _, err := it.NextValid() if err != nil { return -1, -1 } it.SetReverse() end, _, _ = it.NextValid() return start, end } // FlatMaskedEdges is used to find the indices of the first and last masked values // Applies to a flattened version of the array. // Returns: A pair of ints. -1 if all values are unmasked. func (t *Dense) FlatMaskedEdges() (int, int) { if !t.IsMasked() { return 0, t.Size() - 1 } var start, end int it := IteratorFromDense(t) it.SetForward() start, _, err := it.NextInvalid() if err != nil { return -1, -1 } it.SetReverse() end, _, _ = it.NextInvalid() return start, end } // ClumpMasked returns a list of slices corresponding to the masked clumps of a 1-D array // Added to match numpy function names func (t *Dense) ClumpMasked() []Slice { return t.FlatMaskedContiguous() } // ClumpUnmasked returns a list of slices corresponding to the unmasked clumps of a 1-D array // Added to match numpy function names func (t *Dense) ClumpUnmasked() []Slice { return t.FlatNotMaskedContiguous() } tensor-0.9.24/dense_mask_inspection_test.go000066400000000000000000000116571426512615100210670ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) func TestMaskedInspection(t *testing.T) { assert := assert.New(t) var retT *Dense //vector case T := New(Of(Bool), WithShape(1, 12)) T.ResetMask(false) assert.False(T.MaskedAny().(bool)) for i := 0; i < 12; i += 2 { T.mask[i] = true } assert.True(T.MaskedAny().(bool)) assert.True(T.MaskedAny(0).(bool)) assert.False(T.MaskedAll().(bool)) assert.False(T.MaskedAll(0).(bool)) assert.Equal(6, T.MaskedCount()) assert.Equal(6, T.MaskedCount(0)) assert.Equal(6, T.NonMaskedCount()) assert.Equal(6, T.NonMaskedCount(0)) //contiguous mask case /*equivalent python code --------- import numpy.ma as ma a = ma.arange(12).reshape((2, 3, 2)) a[0,0,0]=ma.masked a[0,2,0]=ma.masked print(ma.getmask(a).all()) print(ma.getmask(a).any()) print(ma.count_masked(a)) print(ma.count(a)) print(ma.getmask(a).all(0)) print(ma.getmask(a).any(0)) print(ma.count_masked(a,0)) print(ma.count(a,0)) print(ma.getmask(a).all(1)) print(ma.getmask(a).any(1)) print(ma.count_masked(a,1)) print(ma.count(a,1)) print(ma.getmask(a).all(2)) print(ma.getmask(a).any(2)) print(ma.count_masked(a,2)) print(ma.count(a,2)) ----------- */ T = New(Of(Bool), WithShape(2, 3, 2)) T.ResetMask(false) for i := 0; i < 2; i += 2 { for j := 0; j < 3; j += 2 { for k := 0; k < 2; k += 2 { a, b, c := T.strides[0], T.strides[1], T.strides[2] T.mask[i*a+b*j+c*k] = true } } } assert.Equal([]bool{true, false, false, false, true, false, false, false, false, false, false, false}, T.mask) assert.Equal(false, T.MaskedAll()) assert.Equal(true, T.MaskedAny()) assert.Equal(2, T.MaskedCount()) assert.Equal(10, T.NonMaskedCount()) retT = T.MaskedAll(0).(*Dense) assert.Equal([]int{3, 2}, []int(retT.shape)) assert.Equal([]bool{false, false, false, false, false, false}, retT.Bools()) retT = T.MaskedAny(0).(*Dense) assert.Equal([]int{3, 2}, []int(retT.shape)) assert.Equal([]bool{true, false, false, false, true, false}, retT.Bools()) retT = T.MaskedCount(0).(*Dense) assert.Equal([]int{3, 2}, []int(retT.shape)) assert.Equal([]int{1, 0, 0, 0, 1, 0}, retT.Ints()) retT = T.NonMaskedCount(0).(*Dense) assert.Equal([]int{1, 2, 2, 2, 1, 2}, retT.Ints()) retT = T.MaskedAll(1).(*Dense) assert.Equal([]int{2, 2}, []int(retT.shape)) assert.Equal([]bool{false, false, false, false}, retT.Bools()) retT = T.MaskedAny(1).(*Dense) assert.Equal([]int{2, 2}, []int(retT.shape)) assert.Equal([]bool{true, false, false, false}, retT.Bools()) retT = T.MaskedCount(1).(*Dense) assert.Equal([]int{2, 2}, []int(retT.shape)) assert.Equal([]int{2, 0, 0, 0}, retT.Ints()) retT = T.NonMaskedCount(1).(*Dense) assert.Equal([]int{1, 3, 3, 3}, retT.Ints()) retT = T.MaskedAll(2).(*Dense) assert.Equal([]int{2, 3}, []int(retT.shape)) assert.Equal([]bool{false, false, false, false, false, false}, retT.Bools()) retT = T.MaskedAny(2).(*Dense) assert.Equal([]int{2, 3}, []int(retT.shape)) assert.Equal([]bool{true, false, true, false, false, false}, retT.Bools()) retT = T.MaskedCount(2).(*Dense) assert.Equal([]int{2, 3}, []int(retT.shape)) assert.Equal([]int{1, 0, 1, 0, 0, 0}, retT.Ints()) retT = T.NonMaskedCount(2).(*Dense) assert.Equal([]int{1, 2, 1, 2, 2, 2}, retT.Ints()) } func TestMaskedFindContiguous(t *testing.T) { assert := assert.New(t) T := NewDense(Int, []int{1, 100}) T.ResetMask(false) retSL := T.FlatNotMaskedContiguous() assert.Equal(1, len(retSL)) assert.Equal(rs{0, 100, 1}, retSL[0].(rs)) // test ability to find unmasked regions sliceList := make([]Slice, 0, 4) sliceList = append(sliceList, makeRS(3, 9), makeRS(14, 27), makeRS(51, 72), makeRS(93, 100)) T.ResetMask(true) for i := range sliceList { tt, _ := T.Slice(nil, sliceList[i]) ts := tt.(*Dense) ts.ResetMask(false) } retSL = T.FlatNotMaskedContiguous() assert.Equal(sliceList, retSL) retSL = T.ClumpUnmasked() assert.Equal(sliceList, retSL) // test ability to find masked regions T.ResetMask(false) for i := range sliceList { tt, _ := T.Slice(nil, sliceList[i]) ts := tt.(*Dense) ts.ResetMask(true) } retSL = T.FlatMaskedContiguous() assert.Equal(sliceList, retSL) retSL = T.ClumpMasked() assert.Equal(sliceList, retSL) } func TestMaskedFindEdges(t *testing.T) { assert := assert.New(t) T := NewDense(Int, []int{1, 100}) sliceList := make([]Slice, 0, 4) sliceList = append(sliceList, makeRS(0, 9), makeRS(14, 27), makeRS(51, 72), makeRS(93, 100)) // test ability to find unmasked edges T.ResetMask(false) for i := range sliceList { tt, _ := T.Slice(nil, sliceList[i]) ts := tt.(*Dense) ts.ResetMask(true) } start, end := T.FlatNotMaskedEdges() assert.Equal(9, start) assert.Equal(92, end) // test ability to find masked edges T.ResetMask(true) for i := range sliceList { tt, _ := T.Slice(nil, sliceList[i]) ts := tt.(*Dense) ts.ResetMask(false) } start, end = T.FlatMaskedEdges() assert.Equal(9, start) assert.Equal(92, end) } tensor-0.9.24/dense_maskcmp_methods.go000066400000000000000000001003511426512615100200060ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "math" "reflect" "github.com/pkg/errors" ) /* MaskedEqual */ // MaskedEqual sets the mask to true where the corresponding data is equal to val // Any values must be the same type as the tensor func (t *Dense) MaskedEqual(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a == x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a == x) } } } return nil } /* MaskedNotEqual */ // MaskedNotEqual sets the mask to true where the corresponding data is not equal to val // Any values must be the same type as the tensor func (t *Dense) MaskedNotEqual(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a != x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a != x) } } } return nil } /* MaskedValues */ // MaskedValues sets the mask to true where the corresponding data is equal to val // Any values must be the same type as the tensor func (t *Dense) MaskedValues(val1 interface{}, val2 interface{}, val3 ...interface{}) (err error) { if !isFloat(t.t) { err = errors.Errorf("Can only do MaskedValues with floating point types") return } if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) y := val2.(float32) delta := float64(1.0e-8) if len(val3) > 0 { delta = float64(val3[0].(float32)) + float64(y)*math.Abs(float64(x)) } if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (math.Abs(float64(a-x)) <= delta) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) y := val2.(float64) delta := float64(1.0e-8) if len(val3) > 0 { delta = float64(val3[0].(float64)) + float64(y)*math.Abs(float64(x)) } if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (math.Abs(float64(a-x)) <= delta) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta) } } } return nil } /* MaskedGreater */ // MaskedGreater sets the mask to true where the corresponding data is greater than val // Any values must be the same type as the tensor func (t *Dense) MaskedGreater(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a > x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a > x) } } } return nil } /* MaskedGreaterEqual */ // MaskedGreaterEqual sets the mask to true where the corresponding data is greater than or equal to val // Any values must be the same type as the tensor func (t *Dense) MaskedGreaterEqual(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a >= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a >= x) } } } return nil } /* MaskedLess */ // MaskedLess sets the mask to true where the corresponding data is less than val // Any values must be the same type as the tensor func (t *Dense) MaskedLess(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a < x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a < x) } } } return nil } /* MaskedLessEqual */ // MaskedLessEqual sets the mask to true where the corresponding data is less than or equal to val // Any values must be the same type as the tensor func (t *Dense) MaskedLessEqual(val1 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = (a <= x) } } else { for i := range data { a := data[i] mask[i] = mask[i] || (a <= x) } } } return nil } /* MaskedInside */ // MaskedInside sets the mask to true where the corresponding data is inside range of val // Any values must be the same type as the tensor func (t *Dense) MaskedInside(val1 interface{}, val2 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) y := val2.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) y := val2.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) y := val2.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) y := val2.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) y := val2.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) y := val2.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) y := val2.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) y := val2.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) y := val2.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) y := val2.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) y := val2.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) y := val2.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) y := val2.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a >= x) && (a <= y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a >= x) && (a <= y)) } } } return nil } /* MaskedOutside */ // MaskedOutside sets the mask to true where the corresponding data is outside range of val // Any values must be the same type as the tensor func (t *Dense) MaskedOutside(val1 interface{}, val2 interface{}) (err error) { if !t.IsMasked() { t.makeMask() } switch t.t.Kind() { case reflect.Int: data := t.Ints() mask := t.mask x := val1.(int) y := val2.(int) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Int8: data := t.Int8s() mask := t.mask x := val1.(int8) y := val2.(int8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Int16: data := t.Int16s() mask := t.mask x := val1.(int16) y := val2.(int16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Int32: data := t.Int32s() mask := t.mask x := val1.(int32) y := val2.(int32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Int64: data := t.Int64s() mask := t.mask x := val1.(int64) y := val2.(int64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Uint: data := t.Uints() mask := t.mask x := val1.(uint) y := val2.(uint) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Uint8: data := t.Uint8s() mask := t.mask x := val1.(uint8) y := val2.(uint8) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Uint16: data := t.Uint16s() mask := t.mask x := val1.(uint16) y := val2.(uint16) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Uint32: data := t.Uint32s() mask := t.mask x := val1.(uint32) y := val2.(uint32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Uint64: data := t.Uint64s() mask := t.mask x := val1.(uint64) y := val2.(uint64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Float32: data := t.Float32s() mask := t.mask x := val1.(float32) y := val2.(float32) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.Float64: data := t.Float64s() mask := t.mask x := val1.(float64) y := val2.(float64) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } case reflect.String: data := t.Strings() mask := t.mask x := val1.(string) y := val2.(string) if t.maskIsSoft { for i := range data { a := data[i] mask[i] = ((a < x) || (a > y)) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ((a < x) || (a > y)) } } } return nil } tensor-0.9.24/dense_maskcmp_methods_test.go000066400000000000000000003470711426512615100210610ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) /* MaskedEqual */ func TestDense_MaskedEqual_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedEqual_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedNotEqual */ func TestDense_MaskedNotEqual_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedNotEqual_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedValues */ func TestDense_MaskedValues_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedValues_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedGreater */ func TestDense_MaskedGreater_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreater_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedGreaterEqual */ func TestDense_MaskedGreaterEqual_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedGreaterEqual_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedLess */ func TestDense_MaskedLess_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLess_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedLessEqual */ func TestDense_MaskedLessEqual_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedLessEqual_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedInside */ func TestDense_MaskedInside_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedInside_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } /* MaskedOutside */ func TestDense_MaskedOutside_I(t *testing.T) { assert := assert.New(t) T := New(Of(Int), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Ints() for i := range data { data[i] = int(i) } T.MaskedEqual(int(0)) assert.True(T.IsMasked()) T.MaskedEqual(int(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int(1), int(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int(1), int(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_I8(t *testing.T) { assert := assert.New(t) T := New(Of(Int8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int8s() for i := range data { data[i] = int8(i) } T.MaskedEqual(int8(0)) assert.True(T.IsMasked()) T.MaskedEqual(int8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int8(1), int8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int8(1), int8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_I16(t *testing.T) { assert := assert.New(t) T := New(Of(Int16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int16s() for i := range data { data[i] = int16(i) } T.MaskedEqual(int16(0)) assert.True(T.IsMasked()) T.MaskedEqual(int16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int16(1), int16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int16(1), int16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_I32(t *testing.T) { assert := assert.New(t) T := New(Of(Int32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int32s() for i := range data { data[i] = int32(i) } T.MaskedEqual(int32(0)) assert.True(T.IsMasked()) T.MaskedEqual(int32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int32(1), int32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int32(1), int32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_I64(t *testing.T) { assert := assert.New(t) T := New(Of(Int64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Int64s() for i := range data { data[i] = int64(i) } T.MaskedEqual(int64(0)) assert.True(T.IsMasked()) T.MaskedEqual(int64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(int64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(int64(1), int64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(int64(1), int64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(int64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_U(t *testing.T) { assert := assert.New(t) T := New(Of(Uint), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uints() for i := range data { data[i] = uint(i) } T.MaskedEqual(uint(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint(1), uint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint(1), uint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_U8(t *testing.T) { assert := assert.New(t) T := New(Of(Uint8), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint8s() for i := range data { data[i] = uint8(i) } T.MaskedEqual(uint8(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint8(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint8(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint8(1), uint8(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint8(1), uint8(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint8(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_U16(t *testing.T) { assert := assert.New(t) T := New(Of(Uint16), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint16s() for i := range data { data[i] = uint16(i) } T.MaskedEqual(uint16(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint16(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint16(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint16(1), uint16(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint16(1), uint16(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint16(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_U32(t *testing.T) { assert := assert.New(t) T := New(Of(Uint32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint32s() for i := range data { data[i] = uint32(i) } T.MaskedEqual(uint32(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint32(1), uint32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint32(1), uint32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_U64(t *testing.T) { assert := assert.New(t) T := New(Of(Uint64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Uint64s() for i := range data { data[i] = uint64(i) } T.MaskedEqual(uint64(0)) assert.True(T.IsMasked()) T.MaskedEqual(uint64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(uint64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(uint64(1), uint64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(uint64(1), uint64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(uint64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_F32(t *testing.T) { assert := assert.New(t) T := New(Of(Float32), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float32s() for i := range data { data[i] = float32(i) } T.MaskedEqual(float32(0)) assert.True(T.IsMasked()) T.MaskedEqual(float32(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float32(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float32(1), float32(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float32(1), float32(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float32(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_F64(t *testing.T) { assert := assert.New(t) T := New(Of(Float64), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Float64s() for i := range data { data[i] = float64(i) } T.MaskedEqual(float64(0)) assert.True(T.IsMasked()) T.MaskedEqual(float64(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(float64(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(float64(1), float64(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(float64(1), float64(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(float64(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } func TestDense_MaskedOutside_Str(t *testing.T) { assert := assert.New(t) T := New(Of(String), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.Strings() for i := range data { data[i] = fmt.Sprint(i) } T.MaskedEqual(fmt.Sprint(0)) assert.True(T.IsMasked()) T.MaskedEqual(fmt.Sprint(1)) assert.True(T.mask[0] && T.mask[1]) T.MaskedNotEqual(fmt.Sprint(2)) assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { T.MaskedEqual(fmt.Sprint(i * 10)) } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5, j) } tensor-0.9.24/dense_matop.go000066400000000000000000000224101426512615100157470ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) // T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides // Usually this is more than enough, as BLAS will handle the rest of the transpose func (t *Dense) T(axes ...int) (err error) { var transform AP if transform, axes, err = t.AP.T(axes...); err != nil { return handleNoOp(err) } // is there any old transposes that need to be done first? // this is important, because any old transposes for dim >=3 are merely permutations of the strides if !t.old.IsZero() { if t.IsVector() { // the transform that was calculated was a waste of time - return it to the pool then untranspose t.UT() return } // check if the current axes are just a reverse of the previous transpose's isReversed := true for i, s := range t.oshape() { if transform.Shape()[i] != s { isReversed = false break } } // if it is reversed, well, we just restore the backed up one if isReversed { t.UT() return } // cool beans. No funny reversals. We'd have to actually do transpose then t.Transpose() } // swap out the old and the new t.old = t.AP t.transposeWith = axes t.AP = transform return nil } // UT is a quick way to untranspose a currently transposed *Dense // The reason for having this is quite simply illustrated by this problem: // T = NewTensor(WithShape(2,3,4)) // T.T(1,2,0) // // To untranspose that, we'd need to apply a transpose of (2,0,1). // This means having to keep track and calculate the transposes. // Instead, here's a helpful convenience function to instantly untranspose any previous transposes. // // Nothing will happen if there was no previous transpose func (t *Dense) UT() { if !t.old.IsZero() { ReturnInts(t.transposeWith) t.AP = t.old t.old.zeroOnly() t.transposeWith = nil } } // SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved. func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) { var transform AP if transform, axes, err = t.AP.T(axes...); err != nil { if err = handleNoOp(err); err != nil { return } } retVal = recycledDense(t.t, Shape{t.len()}, WithEngine(t.e)) copyDense(retVal, t) retVal.e = t.e retVal.oe = t.oe retVal.AP = transform t.AP.CloneTo(&retVal.old) retVal.transposeWith = axes return } // At returns the value at the given coordinate func (t *Dense) At(coords ...int) (interface{}, error) { if !t.IsNativelyAccessible() { return nil, errors.Errorf(inaccessibleData, t) } if len(coords) != t.Dims() { return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords)) } at, err := t.at(coords...) if err != nil { return nil, errors.Wrap(err, "At()") } return t.Get(at), nil } // MaskAt returns the value of the mask at a given coordinate // returns false (valid) if not tensor is not masked func (t *Dense) MaskAt(coords ...int) (bool, error) { if !t.IsMasked() { return false, nil } if !t.IsNativelyAccessible() { return false, errors.Errorf(inaccessibleData, t) } if len(coords) != t.Dims() { return true, errors.Errorf(dimMismatch, t.Dims(), len(coords)) } at, err := t.maskAt(coords...) if err != nil { return true, errors.Wrap(err, "MaskAt()") } return t.mask[at], nil } // SetAt sets the value at the given coordinate func (t *Dense) SetAt(v interface{}, coords ...int) error { if !t.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, t) } if len(coords) != t.Dims() { return errors.Errorf(dimMismatch, t.Dims(), len(coords)) } at, err := t.at(coords...) if err != nil { return errors.Wrap(err, "SetAt()") } t.Set(at, v) return nil } // SetMaskAtDataIndex set the value of the mask at a given index func (t *Dense) SetMaskAtIndex(v bool, i int) error { if !t.IsMasked() { return nil } t.mask[i] = v return nil } // SetMaskAt sets the mask value at the given coordinate func (t *Dense) SetMaskAt(v bool, coords ...int) error { if !t.IsMasked() { return nil } if !t.IsNativelyAccessible() { return errors.Errorf(inaccessibleData, t) } if len(coords) != t.Dims() { return errors.Errorf(dimMismatch, t.Dims(), len(coords)) } at, err := t.maskAt(coords...) if err != nil { return errors.Wrap(err, "SetAt()") } t.mask[at] = v return nil } // CopyTo copies the underlying data to the destination *Dense. The original data is untouched. // Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example: // T = NewTensor(WithShape(6)) // T2 = NewTensor(WithShape(2,3)) // err = T.CopyTo(T2) // err == nil // // The only time that this will fail is if the underlying sizes are different func (t *Dense) CopyTo(other *Dense) error { if other == t { return nil // nothing to copy to. Maybe return NoOpErr? } if other.Size() != t.Size() { return errors.Errorf(sizeMismatch, t.Size(), other.Size()) } // easy peasy lemon squeezy if t.viewOf == 0 && other.viewOf == 0 { copyDense(other, t) return nil } // TODO: use copyDenseIter return errors.Errorf(methodNYI, "CopyTo", "views") } // Narrow narrows the tensor. func (t *Dense) Narrow(dim, start, length int) (View, error) { dim = resolveAxis(dim, t.Dims()) slices := make([]Slice, MinInt(dim+1, t.Dims())) slices[dim] = S(start, start+length, 1) return t.Slice(slices...) } // Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense. // // Given: // T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4))) // V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1] // // Any modification to the values in V, will be reflected in T as well. // // The method treats as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax func (t *Dense) Slice(slices ...Slice) (retVal View, err error) { var newAP AP var ndStart, ndEnd int if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil { return } view := borrowDense() view.t = t.t view.e = t.e view.oe = t.oe view.flag = t.flag view.AP = newAP view.setParentTensor(t) t.sliceInto(ndStart, ndEnd, &view.array) if t.IsMasked() { view.mask = t.mask[ndStart:ndEnd] } return view, err } // SliceInto is a convenience method. It does NOT copy the values - it simply updates the AP of the view. // The underlying data is the same. // This method will override ALL the metadata in view. func (t *Dense) SliceInto(view *Dense, slices ...Slice) (retVal View, err error) { var newAP AP var ndStart, ndEnd int if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil { return } view.AP.zero() view.t = t.t view.e = t.e view.oe = t.oe view.flag = t.flag view.AP = newAP view.setParentTensor(t) t.sliceInto(ndStart, ndEnd, &view.array) if t.IsMasked() { view.mask = t.mask[ndStart:ndEnd] } return view, err } // RollAxis rolls the axis backwards until it lies in the given position. // // This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt // // As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj) func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) { dims := t.Dims() if !(axis >= 0 && axis < dims) { err = errors.Errorf(invalidAxis, axis, dims) return } if !(start >= 0 && start <= dims) { err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong") return } if axis < start { start-- } if axis == start { retVal = t return } axes := BorrowInts(dims) defer ReturnInts(axes) for i := 0; i < dims; i++ { axes[i] = i } copy(axes[axis:], axes[axis+1:]) copy(axes[start+1:], axes[start:]) axes[start] = axis if safe { return t.SafeT(axes...) } err = t.T(axes...) retVal = t return } /* Private Methods */ // returns the new index given the old index func (t *Dense) transposeIndex(i int, transposePat, strides []int) int { oldCoord, err := Itol(i, t.oshape(), t.ostrides()) if err != nil { err = errors.Wrapf(err, "transposeIndex ItoL failure. i %d original shape %v. original strides %v", i, t.oshape(), t.ostrides()) panic(err) } /* coordss, _ := Permute(transposePat, oldCoord) coords := coordss[0] expShape := t.Shape() index, _ := Ltoi(expShape, strides, coords...) */ // The above is the "conceptual" algorithm. // Too many checks above slows things down, so the below is the "optimized" edition var index int for i, axis := range transposePat { index += oldCoord[axis] * strides[i] } return index } // at returns the index at which the coordinate is referring to. // This function encapsulates the addressing of elements in a contiguous block. // For a 2D ndarray, ndarray.at(i,j) is // at = ndarray.strides[0]*i + ndarray.strides[1]*j // This is of course, extensible to any number of dimensions. func (t *Dense) at(coords ...int) (at int, err error) { return Ltoi(t.Shape(), t.Strides(), coords...) } // maskat returns the mask index at which the coordinate is referring to. func (t *Dense) maskAt(coords ...int) (at int, err error) { //TODO: Add check for non-masked tensor return t.at(coords...) } tensor-0.9.24/dense_matop_memmove.go000066400000000000000000000073401426512615100175010ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" // This file contains code pertaining to tensor operations that actually move memory // Transpose() actually transposes the data. // This is a generalized version of the inplace matrix transposition algorithm from Wikipedia: // https://en.wikipedia.org/wiki/In-place_matrix_transposition func (t *Dense) Transpose() error { // if there is no oldinfo, that means the current info is the latest, and not the transpose if t.old.IsZero() { return nil } if t.IsScalar() { return nil // cannot transpose scalars - no data movement } defer func() { t.old.zero() t.transposeWith = nil }() expShape := t.Shape() // important! because the strides would have changed once the underlying data changed var expStrides []int if t.AP.o.IsColMajor() { expStrides = expShape.CalcStridesColMajor() } else { expStrides = expShape.CalcStrides() } defer ReturnInts(expStrides) defer func() { copy(t.AP.strides, expStrides) // dimensions do not change, so it's actually safe to do this t.sanity() }() if t.IsVector() { // no data movement return nil } // actually move data var e Engine = t.e transposer, ok := e.(Transposer) if !ok { return errors.Errorf("Engine does not support Transpose()") } return transposer.Transpose(t, expStrides) } // Repeat is like Numpy's repeat. It repeats the elements of an array. // The repeats param defines how many times each element in the axis is repeated. // Just like NumPy, the repeats param is broadcasted to fit the size of the given axis. func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) { e := t.Engine() if rp, ok := e.(Repeater); ok { return rp.Repeat(t, axis, repeats...) } return nil, errors.New("Engine does not support Repeat") } // Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function. func (t *Dense) Concat(axis int, Ts ...*Dense) (retVal *Dense, err error) { e := t.Engine() if c, ok := e.(Concater); ok { var ret Tensor others := densesToTensors(Ts) if ret, err = c.Concat(t, axis, others...); err != nil { return nil, errors.Wrapf(err, opFail, "Concat") } return ret.(*Dense), nil } return nil, errors.New("Engine does not support Concat") } // Hstack stacks other tensors columnwise (horizontal stacking) func (t *Dense) Hstack(others ...*Dense) (*Dense, error) { // check that everything is at least 1D if t.Dims() == 0 { return nil, errors.Errorf(atleastDims, 1) } for _, d := range others { if d.Dims() < 1 { return nil, errors.Errorf(atleastDims, 1) } } if t.Dims() == 1 { return t.Concat(0, others...) } return t.Concat(1, others...) } // Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions func (t *Dense) Vstack(others ...*Dense) (*Dense, error) { // check that everything is at least 2D if t.Dims() < 2 { return nil, errors.Errorf(atleastDims, 2) } for _, d := range others { if d.Dims() < 2 { return nil, errors.Errorf(atleastDims, 2) } } return t.Concat(0, others...) } // Stack stacks the other tensors along the axis specified. It is like Numpy's stack function. func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) { var ret DenseTensor var ok bool if ret, err = t.stackDense(axis, densesToDenseTensors(others)...); err != nil { return nil, err } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf("Return not *Dense") } return } func (t *Dense) stackDense(axis int, others ...DenseTensor) (retVal DenseTensor, err error) { if ds, ok := t.Engine().(DenseStacker); ok { return ds.StackDense(t, axis, others...) } return nil, errors.Errorf("Engine does not support DenseStacker") } tensor-0.9.24/dense_matop_test.go000066400000000000000000001221511426512615100170110ustar00rootroot00000000000000package tensor import ( "fmt" "testing" "github.com/stretchr/testify/assert" "gorgonia.org/vecf64" ) func cloneArray(a interface{}) interface{} { switch at := a.(type) { case []float64: retVal := make([]float64, len(at)) copy(retVal, at) return retVal case []float32: retVal := make([]float32, len(at)) copy(retVal, at) return retVal case []int: retVal := make([]int, len(at)) copy(retVal, at) return retVal case []int64: retVal := make([]int64, len(at)) copy(retVal, at) return retVal case []int32: retVal := make([]int32, len(at)) copy(retVal, at) return retVal case []byte: retVal := make([]byte, len(at)) copy(retVal, at) return retVal case []bool: retVal := make([]bool, len(at)) copy(retVal, at) return retVal } return nil } func castToDt(val float64, dt Dtype) interface{} { switch dt { case Bool: return false case Int: return int(val) case Int8: return int8(val) case Int16: return int16(val) case Int32: return int32(val) case Int64: return int64(val) case Uint: return uint(val) case Uint8: return uint8(val) case Uint16: return uint16(val) case Uint32: return uint32(val) case Uint64: return uint64(val) case Float32: return float32(val) case Float64: return float64(val) default: return 0 } } var atTests = []struct { data interface{} shape Shape coord []int correct interface{} err bool }{ // matrix {[]float64{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{0, 1}, float64(1), false}, {[]float32{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{1, 1}, float32(4), false}, {[]float64{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{1, 2, 3}, nil, true}, // 3-tensor {[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{2, 3, 4}, []int{1, 1, 1}, 17, false}, {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{2, 3, 4}, []int{1, 2, 3}, int64(23), false}, {[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{2, 3, 4}, []int{0, 3, 2}, 23, true}, } func TestDense_At(t *testing.T) { for i, ats := range atTests { T := New(WithShape(ats.shape...), WithBacking(ats.data)) got, err := T.At(ats.coord...) if checkErr(t, ats.err, err, "At", i) { continue } if got != ats.correct { t.Errorf("Expected %v. Got %v", ats.correct, got) } } } func Test_transposeIndex(t *testing.T) { a := []byte{0, 1, 2, 3} T := New(WithShape(2, 2), WithBacking(a)) correct := []int{0, 2, 1, 3} for i, v := range correct { got := T.transposeIndex(i, []int{1, 0}, []int{2, 1}) if v != got { t.Errorf("transposeIndex error. Expected %v. Got %v", v, got) } } } var transposeTests = []struct { name string shape Shape transposeWith []int data interface{} correctShape Shape correctStrides []int // after .T() correctStrides2 []int // after .Transpose() correctData interface{} }{ {"c.T()", Shape{4, 1}, nil, []float64{0, 1, 2, 3}, Shape{1, 4}, []int{1, 1}, []int{4, 1}, []float64{0, 1, 2, 3}}, {"r.T()", Shape{1, 4}, nil, []float32{0, 1, 2, 3}, Shape{4, 1}, []int{1, 1}, []int{1, 1}, []float32{0, 1, 2, 3}}, {"v.T()", Shape{4}, nil, []int{0, 1, 2, 3}, Shape{4}, []int{1}, []int{1}, []int{0, 1, 2, 3}}, {"M.T()", Shape{2, 3}, nil, []int64{0, 1, 2, 3, 4, 5}, Shape{3, 2}, []int{1, 3}, []int{2, 1}, []int64{0, 3, 1, 4, 2, 5}}, {"M.T(0,1) (NOOP)", Shape{2, 3}, []int{0, 1}, []int32{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{3, 1}, []int{3, 1}, []int32{0, 1, 2, 3, 4, 5}}, {"3T.T()", Shape{2, 3, 4}, nil, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1}, []byte{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}}, {"3T.T(2, 1, 0) (Same as .T())", Shape{2, 3, 4}, []int{2, 1, 0}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1}, []int{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}}, {"3T.T(2, 1, 0) (Same as .T())", Shape{2, 3, 4}, []int{2, 1, 0}, []int16{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1}, []int16{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}}, {"3T.T(0, 2, 1)", Shape{2, 3, 4}, []int{0, 2, 1}, []int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{2, 4, 3}, []int{12, 1, 4}, []int{12, 3, 1}, []int32{0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}}, {"3T.T{1, 0, 2)", Shape{2, 3, 4}, []int{1, 0, 2}, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{3, 2, 4}, []int{4, 12, 1}, []int{8, 4, 1}, []float64{0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}}, {"3T.T{1, 2, 0)", Shape{2, 3, 4}, []int{1, 2, 0}, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{3, 4, 2}, []int{4, 1, 12}, []int{8, 2, 1}, []float64{0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}}, {"3T.T{2, 0, 1)", Shape{2, 3, 4}, []int{2, 0, 1}, []float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, Shape{4, 2, 3}, []int{1, 12, 4}, []int{6, 3, 1}, []float32{0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}}, {"3T.T{0, 1, 2} (NOOP)", Shape{2, 3, 4}, []int{0, 1, 2}, []bool{true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}, Shape{2, 3, 4}, []int{12, 4, 1}, []int{12, 4, 1}, []bool{true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}}, {"M[2,2].T for bools, just for completeness sake", Shape{2, 2}, nil, []bool{true, true, false, false}, Shape{2, 2}, []int{1, 2}, []int{2, 1}, []bool{true, false, true, false}, }, {"M[2,2].T for strings, just for completeness sake", Shape{2, 2}, nil, []string{"hello", "world", "今日は", "世界"}, Shape{2, 2}, []int{1, 2}, []int{2, 1}, []string{"hello", "今日は", "world", "世界"}, }, } func TestDense_Transpose(t *testing.T) { assert := assert.New(t) var err error // standard transposes for _, tts := range transposeTests { T := New(WithShape(tts.shape...), WithBacking(tts.data)) if err = T.T(tts.transposeWith...); err != nil { t.Errorf("%v - %v", tts.name, err) continue } assert.True(tts.correctShape.Eq(T.Shape()), "Transpose %v Expected shape: %v. Got %v", tts.name, tts.correctShape, T.Shape()) assert.Equal(tts.correctStrides, T.Strides(), "Transpose %v. Expected stride: %v. Got %v", tts.name, tts.correctStrides, T.Strides()) T.Transpose() assert.True(tts.correctShape.Eq(T.Shape()), "Transpose %v Expected shape: %v. Got %v", tts.name, tts.correctShape, T.Shape()) assert.Equal(tts.correctStrides2, T.Strides(), "Transpose2 %v - Expected stride %v. Got %v", tts.name, tts.correctStrides2, T.Strides()) assert.Equal(tts.correctData, T.Data(), "Transpose %v", tts.name) } // test stacked .T() calls var T *Dense // column vector T = New(WithShape(4, 1), WithBacking(Range(Int, 0, 4))) if err = T.T(); err != nil { t.Errorf("Stacked .T() #1 for vector. Error: %v", err) goto matrev } if err = T.T(); err != nil { t.Errorf("Stacked .T() #1 for vector. Error: %v", err) goto matrev } assert.True(T.old.IsZero()) assert.Nil(T.transposeWith) assert.True(T.IsColVec()) matrev: // matrix, reversed T = New(WithShape(2, 3), WithBacking(Range(Byte, 0, 6))) if err = T.T(); err != nil { t.Errorf("Stacked .T() #1 for matrix reverse. Error: %v", err) goto matnorev } if err = T.T(); err != nil { t.Errorf("Stacked .T() #2 for matrix reverse. Error: %v", err) goto matnorev } assert.True(T.old.IsZero()) assert.Nil(T.transposeWith) assert.True(Shape{2, 3}.Eq(T.Shape())) matnorev: // 3-tensor, non reversed T = New(WithShape(2, 3, 4), WithBacking(Range(Int64, 0, 24))) if err = T.T(); err != nil { t.Fatalf("Stacked .T() #1 for tensor with no reverse. Error: %v", err) } if err = T.T(2, 0, 1); err != nil { t.Fatalf("Stacked .T() #2 for tensor with no reverse. Error: %v", err) } correctData := []int64{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23} assert.Equal(correctData, T.Data()) assert.Equal([]int{2, 0, 1}, T.transposeWith) assert.NotNil(T.old) } func TestTUT(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(Float64), WithShape(2, 3, 4)) T.T() T.UT() assert.True(T.old.IsZero()) assert.Nil(T.transposeWith) T.T(2, 0, 1) T.UT() assert.True(T.old.IsZero()) assert.Nil(T.transposeWith) } type repeatTest struct { name string tensor *Dense ne bool // should assert tensor not equal axis int repeats []int correct interface{} shape Shape err bool } var repeatTests = []repeatTest{ {"Scalar Repeat on axis 0", New(FromScalar(true)), true, 0, []int{3}, []bool{true, true, true}, Shape{3}, false, }, {"Scalar Repeat on axis 1", New(FromScalar(byte(255))), false, 1, []int{3}, []byte{255, 255, 255}, Shape{1, 3}, false, }, {"Vector Repeat on axis 0", New(WithShape(2), WithBacking([]int32{1, 2})), false, 0, []int{3}, []int32{1, 1, 1, 2, 2, 2}, Shape{6}, false, }, {"ColVec Repeat on axis 0", New(WithShape(2, 1), WithBacking([]int64{1, 2})), false, 0, []int{3}, []int64{1, 1, 1, 2, 2, 2}, Shape{6, 1}, false, }, {"RowVec Repeat on axis 0", New(WithShape(1, 2), WithBacking([]int{1, 2})), false, 0, []int{3}, []int{1, 2, 1, 2, 1, 2}, Shape{3, 2}, false, }, {"ColVec Repeat on axis 1", New(WithShape(2, 1), WithBacking([]float32{1, 2})), false, 1, []int{3}, []float32{1, 1, 1, 2, 2, 2}, Shape{2, 3}, false, }, {"RowVec Repeat on axis 1", New(WithShape(1, 2), WithBacking([]float64{1, 2})), false, 1, []int{3}, []float64{1, 1, 1, 2, 2, 2}, Shape{1, 6}, false, }, {"Vector Repeat on all axes", New(WithShape(2), WithBacking([]byte{1, 2})), false, AllAxes, []int{3}, []byte{1, 1, 1, 2, 2, 2}, Shape{6}, false, }, {"ColVec Repeat on all axes", New(WithShape(2, 1), WithBacking([]int32{1, 2})), false, AllAxes, []int{3}, []int32{1, 1, 1, 2, 2, 2}, Shape{6}, false, }, {"RowVec Repeat on all axes", New(WithShape(1, 2), WithBacking([]int64{1, 2})), false, AllAxes, []int{3}, []int64{1, 1, 1, 2, 2, 2}, Shape{6}, false, }, {"M[2,2] Repeat on all axes with repeats = (1,2,1,1)", New(WithShape(2, 2), WithBacking([]int{1, 2, 3, 4})), false, AllAxes, []int{1, 2, 1, 1}, []int{1, 2, 2, 3, 4}, Shape{5}, false, }, {"M[2,2] Repeat on axis 1 with repeats = (2, 1)", New(WithShape(2, 2), WithBacking([]float32{1, 2, 3, 4})), false, 1, []int{2, 1}, []float32{1, 1, 2, 3, 3, 4}, Shape{2, 3}, false, }, {"M[2,2] Repeat on axis 1 with repeats = (1, 2)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})), false, 1, []int{1, 2}, []float64{1, 2, 2, 3, 4, 4}, Shape{2, 3}, false, }, {"M[2,2] Repeat on axis 0 with repeats = (1, 2)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})), false, 0, []int{1, 2}, []float64{1, 2, 3, 4, 3, 4}, Shape{3, 2}, false, }, {"M[2,2] Repeat on axis 0 with repeats = (2, 1)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})), false, 0, []int{2, 1}, []float64{1, 2, 1, 2, 3, 4}, Shape{3, 2}, false, }, {"3T[2,3,2] Repeat on axis 1 with repeats = (1,2,1)", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))), false, 1, []int{1, 2, 1}, []float64{1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 9, 10, 9, 10, 11, 12}, Shape{2, 4, 2}, false, }, {"3T[2,3,2] Generic Repeat by 2", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))), false, AllAxes, []int{2}, []float64{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12}, Shape{24}, false, }, {"3T[2,3,2] repeat with broadcast errors", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))), false, 0, []int{1, 2, 1}, nil, nil, true, }, // idiots {"Nonexistent axis", New(WithShape(2, 1), WithBacking([]bool{true, false})), false, 2, []int{3}, nil, nil, true, }, } func TestDense_Repeat(t *testing.T) { assert := assert.New(t) for i, test := range repeatTests { T, err := test.tensor.Repeat(test.axis, test.repeats...) if checkErr(t, test.err, err, "Repeat", i) { continue } var D DenseTensor if D, err = getDenseTensor(T); err != nil { t.Errorf("Expected Repeat to return a *Dense. got %v of %T instead", T, T) continue } if test.ne { assert.NotEqual(test.tensor, D, test.name) } assert.Equal(test.correct, D.Data(), test.name) assert.Equal(test.shape, D.Shape(), test.name) } } func TestDense_Repeat_Slow(t *testing.T) { rt2 := make([]repeatTest, len(repeatTests)) for i, rt := range repeatTests { rt2[i] = repeatTest{ name: rt.name, ne: rt.ne, axis: rt.axis, repeats: rt.repeats, correct: rt.correct, shape: rt.shape, err: rt.err, tensor: rt.tensor.Clone().(*Dense), } } for i := range rt2 { maskLen := rt2[i].tensor.len() mask := make([]bool, maskLen) rt2[i].tensor.mask = mask } assert := assert.New(t) for i, test := range rt2 { T, err := test.tensor.Repeat(test.axis, test.repeats...) if checkErr(t, test.err, err, "Repeat", i) { continue } var D DenseTensor if D, err = getDenseTensor(T); err != nil { t.Errorf("Expected Repeat to return a *Dense. got %v of %T instead", T, T) continue } if test.ne { assert.NotEqual(test.tensor, D, test.name) } assert.Equal(test.correct, D.Data(), test.name) assert.Equal(test.shape, D.Shape(), test.name) } } func TestDense_CopyTo(t *testing.T) { assert := assert.New(t) var T, T2 *Dense var T3 Tensor var err error T = New(WithShape(2), WithBacking([]float64{1, 2})) T2 = New(Of(Float64), WithShape(1, 2)) err = T.CopyTo(T2) if err != nil { t.Fatal(err) } assert.Equal(T2.Data(), T.Data()) // now, modify T1's data T.Set(0, float64(5000)) assert.NotEqual(T2.Data(), T.Data()) // test views T = New(Of(Byte), WithShape(3, 3)) T2 = New(Of(Byte), WithShape(2, 2)) T3, _ = T.Slice(makeRS(0, 2), makeRS(0, 2)) // T[0:2, 0:2], shape == (2,2) if err = T2.CopyTo(T3.(*Dense)); err != nil { t.Log(err) // for now it's a not yet implemented error. TODO: FIX THIS } // dumbass time T = New(Of(Float32), WithShape(3, 3)) T2 = New(Of(Float32), WithShape(2, 2)) if err = T.CopyTo(T2); err == nil { t.Error("Expected an error") } if err = T.CopyTo(T); err != nil { t.Error("Copying a *Tensor to itself should yield no error. ") } } var denseSliceTests = []struct { name string data interface{} shape Shape slices []Slice correctShape Shape correctStride []int correctData interface{} }{ // scalar-equiv vector (issue 102) {"a[0], a is scalar-equiv", []float64{2}, Shape{1}, []Slice{ss(0)}, ScalarShape(), nil, 2.0}, // vector {"a[0]", []bool{true, true, false, false, false}, Shape{5}, []Slice{ss(0)}, ScalarShape(), nil, true}, {"a[0:2]", Range(Byte, 0, 5), Shape{5}, []Slice{makeRS(0, 2)}, Shape{2}, []int{1}, []byte{0, 1}}, {"a[1:5:2]", Range(Int32, 0, 5), Shape{5}, []Slice{makeRS(1, 5, 2)}, Shape{2}, []int{2}, []int32{1, 2, 3, 4}}, // colvec {"c[0]", Range(Int64, 0, 5), Shape{5, 1}, []Slice{ss(0)}, ScalarShape(), nil, int64(0)}, {"c[0:2]", Range(Float32, 0, 5), Shape{5, 1}, []Slice{makeRS(0, 2)}, Shape{2, 1}, []int{1, 1}, []float32{0, 1}}, {"c[1:5:2]", Range(Float64, 0, 5), Shape{5, 1}, []Slice{makeRS(0, 5, 2)}, Shape{2, 1}, []int{2, 1}, []float64{0, 1, 2, 3, 4}}, // // rowvec {"r[0]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{ss(0)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}}, {"r[0:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{makeRS(0, 2)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}}, {"r[0:5:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{makeRS(0, 5, 2)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}}, {"r[:, 0]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, ss(0)}, ScalarShape(), nil, float64(0)}, {"r[:, 0:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, makeRS(0, 2)}, Shape{1, 2}, []int{5, 1}, []float64{0, 1}}, {"r[:, 1:5:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, makeRS(1, 5, 2)}, Shape{1, 2}, []int{5, 2}, []float64{1, 2, 3, 4}}, // // matrix {"A[0]", Range(Float64, 0, 6), Shape{2, 3}, []Slice{ss(0)}, Shape{1, 3}, []int{1}, Range(Float64, 0, 3)}, {"A[0:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{makeRS(0, 2)}, Shape{2, 5}, []int{5, 1}, Range(Float64, 0, 10)}, {"A[0, 0]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), ss(0)}, ScalarShape(), nil, float64(0)}, {"A[0, 1:5]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), makeRS(1, 5)}, Shape{4}, []int{1}, Range(Float64, 1, 5)}, {"A[0, 1:5:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), makeRS(1, 5, 2)}, Shape{1, 2}, []int{2}, Range(Float64, 1, 5)}, {"A[:, 0]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, ss(0)}, Shape{4, 1}, []int{5}, Range(Float64, 0, 16)}, {"A[:, 1:5]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, makeRS(1, 5)}, Shape{4, 4}, []int{5, 1}, Range(Float64, 1, 20)}, {"A[:, 1:5:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, makeRS(1, 5, 2)}, Shape{4, 2}, []int{5, 2}, Range(Float64, 1, 20)}, // 3tensor with leading and trailing 1s {"3T1[0]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{ss(0)}, Shape{9, 1}, []int{1, 1}, Range(Float64, 0, 9)}, {"3T1[nil, 0:2]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(0, 2)}, Shape{1, 2, 1}, []int{9, 1, 1}, Range(Float64, 0, 2)}, {"3T1[nil, 0:5:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(0, 5, 3)}, Shape{1, 2, 1}, []int{9, 3, 1}, Range(Float64, 0, 5)}, {"3T1[nil, 1:5:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(1, 5, 3)}, Shape{1, 2, 1}, []int{9, 3, 1}, Range(Float64, 1, 5)}, {"3T1[nil, 1:9:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(1, 9, 3)}, Shape{1, 3, 1}, []int{9, 3, 1}, Range(Float64, 1, 9)}, // 3tensor {"3T[0]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(0)}, Shape{9, 2}, []int{2, 1}, Range(Float64, 0, 18)}, {"3T[1]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1)}, Shape{9, 2}, []int{2, 1}, Range(Float64, 18, 36)}, {"3T[1, 2]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), ss(2)}, Shape{2}, []int{1}, Range(Float64, 22, 24)}, {"3T[1, 2:4]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 4)}, Shape{2, 2}, []int{2, 1}, Range(Float64, 22, 26)}, {"3T[1, 2:8:2]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 8, 2)}, Shape{3, 2}, []int{4, 1}, Range(Float64, 22, 34)}, {"3T[1, 2:8:3]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 8, 3)}, Shape{2, 2}, []int{6, 1}, Range(Float64, 22, 34)}, {"3T[1, 2:9:2]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2)}, Shape{4, 7}, []int{14, 1}, Range(Float64, 77, 126)}, {"3T[1, 2:9:2, 1]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2), ss(1)}, Shape{4}, []int{14}, Range(Float64, 78, 121)}, // should this be a colvec? {"3T[1, 2:9:2, 1:4:2]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2), makeRS(1, 4, 2)}, Shape{4, 2}, []int{14, 2}, Range(Float64, 78, 123)}, } func TestDense_Slice(t *testing.T) { assert := assert.New(t) var T *Dense var V Tensor var err error for _, sts := range denseSliceTests { T = New(WithShape(sts.shape...), WithBacking(sts.data)) t.Log(sts.name) if V, err = T.Slice(sts.slices...); err != nil { t.Error(err) continue } assert.True(sts.correctShape.Eq(V.Shape()), "Test: %v - Incorrect Shape. Correct: %v. Got %v", sts.name, sts.correctShape, V.Shape()) assert.Equal(sts.correctStride, V.Strides(), "Test: %v - Incorrect Stride", sts.name) assert.Equal(sts.correctData, V.Data(), "Test: %v - Incorrect Data", sts.name) } // Transposed slice T = New(WithShape(2, 3), WithBacking(Range(Float32, 0, 6))) T.T() V, err = T.Slice(ss(0)) assert.True(Shape{2}.Eq(V.Shape())) assert.Equal([]int{3}, V.Strides()) assert.Equal([]float32{0, 1, 2, 3}, V.Data()) assert.True(V.(*Dense).old.IsZero()) // slice a sliced t.Logf("%v", V) V, err = V.Slice(makeRS(1, 2)) t.Logf("%v", V) assert.True(ScalarShape().Eq(V.Shape())) assert.Equal(float32(3), V.Data()) // And now, ladies and gentlemen, the idiots! // too many slices _, err = T.Slice(ss(1), ss(2), ss(3), ss(4)) if err == nil { t.Error("Expected a DimMismatchError error") } // out of range sliced _, err = T.Slice(makeRS(20, 5)) if err == nil { t.Error("Expected a IndexError") } // surely nobody can be this dumb? Having a start of negatives _, err = T.Slice(makeRS(-1, 1)) if err == nil { t.Error("Expected a IndexError") } } func TestDense_Narrow(t *testing.T) { testCases := []struct { x *Dense dim, start, length int expected *Dense }{ { x: New( WithShape(3), WithBacking([]int{1, 2, 3}), ), dim: 0, start: 1, length: 1, expected: New( WithShape(), WithBacking([]int{2}), ), }, { x: New( WithShape(3, 3), WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}), ), dim: 0, start: 0, length: 2, expected: New( WithShape(2, 3), WithBacking([]int{1, 2, 3, 4, 5, 6}), ), }, { x: New( WithShape(3, 3), WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}), ), dim: 1, start: 1, length: 2, expected: New( WithShape(3, 2), WithBacking([]int{2, 3, 5, 6, 8, 9}), ), }, { x: New( WithShape(3, 3), WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}), ), dim: 1, start: 0, length: 1, expected: New( WithShape(3), WithBacking([]int{1, 4, 7}), ), }, } for i, tC := range testCases { t.Run(fmt.Sprintf("Example #%d narrow(%v,%d,%d,%v)", i+1, tC.x.Shape(), tC.dim, tC.start, tC.length), func(t *testing.T) { c := assert.New(t) // t.Logf("X:\n%v", tC.x) y, err := tC.x.Narrow(tC.dim, tC.start, tC.length) c.NoError(err) // t.Logf("y:\n%v", y) yMat := y.Materialize() c.Equal(tC.expected.Shape(), yMat.Shape()) c.Equal(tC.expected.Data(), yMat.Data()) // err = y.Memset(1024) // c.NoError(err) // t.Logf("After Memset\nY: %v\nX:\n%v", y, tC.x) }) } } func TestDense_SliceInto(t *testing.T) { V := New(WithShape(100), Of(Byte)) T := New(WithBacking([]float64{1, 2, 3, 4, 5, 6}), WithShape(2, 3)) T.SliceInto(V, ss(0)) assert.True(t, Shape{3}.Eq(V.Shape()), "Got %v", V.Shape()) assert.Equal(t, []float64{1, 2, 3}, V.Data()) } var rollaxisTests = []struct { axis, start int correctShape Shape }{ {0, 0, Shape{1, 2, 3, 4}}, {0, 1, Shape{1, 2, 3, 4}}, {0, 2, Shape{2, 1, 3, 4}}, {0, 3, Shape{2, 3, 1, 4}}, {0, 4, Shape{2, 3, 4, 1}}, {1, 0, Shape{2, 1, 3, 4}}, {1, 1, Shape{1, 2, 3, 4}}, {1, 2, Shape{1, 2, 3, 4}}, {1, 3, Shape{1, 3, 2, 4}}, {1, 4, Shape{1, 3, 4, 2}}, {2, 0, Shape{3, 1, 2, 4}}, {2, 1, Shape{1, 3, 2, 4}}, {2, 2, Shape{1, 2, 3, 4}}, {2, 3, Shape{1, 2, 3, 4}}, {2, 4, Shape{1, 2, 4, 3}}, {3, 0, Shape{4, 1, 2, 3}}, {3, 1, Shape{1, 4, 2, 3}}, {3, 2, Shape{1, 2, 4, 3}}, {3, 3, Shape{1, 2, 3, 4}}, {3, 4, Shape{1, 2, 3, 4}}, } // The RollAxis tests are directly adapted from Numpy's test cases. func TestDense_RollAxis(t *testing.T) { assert := assert.New(t) var T *Dense var err error for _, rats := range rollaxisTests { T = New(Of(Byte), WithShape(1, 2, 3, 4)) if _, err = T.RollAxis(rats.axis, rats.start, false); assert.NoError(err) { assert.True(rats.correctShape.Eq(T.Shape()), "%d %d Expected %v, got %v", rats.axis, rats.start, rats.correctShape, T.Shape()) } } } var concatTests = []struct { name string dt Dtype a interface{} b interface{} shape Shape shapeB Shape axis int correctShape Shape correctData interface{} }{ // Float64 {"vector", Float64, nil, nil, Shape{2}, nil, 0, Shape{4}, []float64{0, 1, 0, 1}}, {"matrix; axis 0 ", Float64, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []float64{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Float64, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []float64{0, 1, 0, 1, 2, 3, 2, 3}}, // Float32 {"vector", Float32, nil, nil, Shape{2}, nil, 0, Shape{4}, []float32{0, 1, 0, 1}}, {"matrix; axis 0 ", Float32, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []float32{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Float32, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []float32{0, 1, 0, 1, 2, 3, 2, 3}}, // Int {"vector", Int, nil, nil, Shape{2}, nil, 0, Shape{4}, []int{0, 1, 0, 1}}, {"matrix; axis 0 ", Int, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Int, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int{0, 1, 0, 1, 2, 3, 2, 3}}, // Int64 {"vector", Int64, nil, nil, Shape{2}, nil, 0, Shape{4}, []int64{0, 1, 0, 1}}, {"matrix; axis 0 ", Int64, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int64{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Int64, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int64{0, 1, 0, 1, 2, 3, 2, 3}}, // Int32 {"vector", Int32, nil, nil, Shape{2}, nil, 0, Shape{4}, []int32{0, 1, 0, 1}}, {"matrix; axis 0 ", Int32, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int32{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Int32, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int32{0, 1, 0, 1, 2, 3, 2, 3}}, // Byte {"vector", Byte, nil, nil, Shape{2}, nil, 0, Shape{4}, []byte{0, 1, 0, 1}}, {"matrix; axis 0 ", Byte, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []byte{0, 1, 2, 3, 0, 1, 2, 3}}, {"matrix; axis 1 ", Byte, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []byte{0, 1, 0, 1, 2, 3, 2, 3}}, // Bool {"vector", Bool, []bool{true, false}, nil, Shape{2}, nil, 0, Shape{4}, []bool{true, false, true, false}}, {"matrix; axis 0 ", Bool, []bool{true, false, true, false}, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []bool{true, false, true, false, true, false, true, false}}, {"matrix; axis 1 ", Bool, []bool{true, false, true, false}, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []bool{true, false, true, false, true, false, true, false}}, // gorgonia/gorgonia#218 related {"matrix; axis 0", Float64, nil, nil, Shape{2, 2}, Shape{1, 2}, 0, Shape{3, 2}, []float64{0, 1, 2, 3, 0, 1}}, {"matrix; axis 1", Float64, nil, nil, Shape{2, 2}, Shape{2, 1}, 1, Shape{2, 3}, []float64{0, 1, 0, 2, 3, 1}}, {"colvec matrix, axis 0", Float64, nil, nil, Shape{2, 1}, Shape{1, 1}, 0, Shape{3, 1}, []float64{0, 1, 0}}, {"rowvec matrix, axis 1", Float64, nil, nil, Shape{1, 2}, Shape{1, 1}, 1, Shape{1, 3}, []float64{0, 1, 0}}, {"3tensor; axis 0", Float64, nil, nil, Shape{2, 3, 2}, Shape{1, 3, 2}, 0, Shape{3, 3, 2}, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5}}, {"3tensor; axis 2", Float64, nil, nil, Shape{2, 3, 2}, Shape{2, 3, 1}, 2, Shape{2, 3, 3}, []float64{0, 1, 0, 2, 3, 1, 4, 5, 2, 6, 7, 3, 8, 9, 4, 10, 11, 5}}, {"3tensor; axis 1", Float64, nil, nil, Shape{2, 3, 2}, Shape{2, 1, 2}, 1, Shape{2, 4, 2}, []float64{0, 1, 2, 3, 4, 5, 0, 1, 6, 7, 8, 9, 10, 11, 2, 3}}, } func TestDense_Concat(t *testing.T) { assert := assert.New(t) for _, cts := range concatTests { var T0, T1 *Dense if cts.a == nil { T0 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize()))) } else { T0 = New(WithShape(cts.shape...), WithBacking(cts.a)) } switch { case cts.shapeB == nil && cts.a == nil: T1 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize()))) case cts.shapeB == nil && cts.a != nil: T1 = New(WithShape(cts.shape...), WithBacking(cloneArray(cts.a))) case cts.shapeB != nil && cts.b == nil: T1 = New(WithShape(cts.shapeB...), WithBacking(Range(cts.dt, 0, cts.shapeB.TotalSize()))) case cts.shapeB != nil && cts.b != nil: T1 = New(WithShape(cts.shapeB...), WithBacking(cts.b)) } T2, err := T0.Concat(cts.axis, T1) if err != nil { t.Errorf("Test %v failed: %v", cts.name, err) continue } assert.True(cts.correctShape.Eq(T2.Shape())) assert.Equal(cts.correctData, T2.Data()) } //Masked case for _, cts := range concatTests { var T0, T1 *Dense if cts.a == nil { T0 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize()))) T0.MaskedEqual(castToDt(0.0, cts.dt)) } else { T0 = New(WithShape(cts.shape...), WithBacking(cts.a)) T0.MaskedEqual(castToDt(0.0, cts.dt)) } switch { case cts.shapeB == nil && cts.a == nil: T1 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize()))) case cts.shapeB == nil && cts.a != nil: T1 = New(WithShape(cts.shape...), WithBacking(cloneArray(cts.a))) case cts.shapeB != nil && cts.b == nil: T1 = New(WithShape(cts.shapeB...), WithBacking(Range(cts.dt, 0, cts.shapeB.TotalSize()))) case cts.shapeB != nil && cts.b != nil: T1 = New(WithShape(cts.shapeB...), WithBacking(cts.b)) } T1.MaskedEqual(castToDt(0.0, cts.dt)) T2, err := T0.Concat(cts.axis, T1) if err != nil { t.Errorf("Test %v failed: %v", cts.name, err) continue } T3 := New(WithShape(cts.correctShape...), WithBacking(cts.correctData)) T3.MaskedEqual(castToDt(0.0, cts.dt)) assert.True(cts.correctShape.Eq(T2.Shape())) assert.Equal(cts.correctData, T2.Data()) assert.Equal(T3.mask, T2.mask) } } func TestDense_Concat_sliced(t *testing.T) { v := New( WithShape(1, 5), WithBacking([]float64{0, 1, 2, 3, 4}), ) cols := make([]Tensor, v.Shape().TotalSize()) for i := 0; i < v.Shape().TotalSize(); i++ { sliced, err := v.Slice(nil, ss(i)) if err != nil { t.Fatalf("Failed to slice %d. Error: %v", i, err) } if err = sliced.Reshape(sliced.Shape().TotalSize(), 1); err != nil { t.Fatalf("Failed to reshape %d. Error %v", i, err) } cols[i] = sliced } result, err := Concat(1, cols[0], cols[1:]...) if err != nil { t.Error(err) } assert.Equal(t, v.Data(), result.Data()) if v.Uintptr() == result.Uintptr() { t.Error("They should not share the same backing data!") } } var simpleStackTests = []struct { name string dt Dtype shape Shape axis int stackCount int correctShape Shape correctData interface{} }{ // Size 8 {"vector, axis 0, stack 2", Float64, Shape{2}, 0, 2, Shape{2, 2}, []float64{0, 1, 100, 101}}, {"vector, axis 1, stack 2", Float64, Shape{2}, 1, 2, Shape{2, 2}, []float64{0, 100, 1, 101}}, {"matrix, axis 0, stack 2", Float64, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []float64{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}}, {"matrix, axis 1, stack 2", Float64, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []float64{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}}, {"matrix, axis 2, stack 2", Float64, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []float64{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}}, {"matrix, axis 0, stack 3", Float64, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []float64{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}}, {"matrix, axis 1, stack 3", Float64, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []float64{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}}, {"matrix, axis 2, stack 3", Float64, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []float64{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}}, // Size 4 {"vector, axis 0, stack 2 (f32)", Float32, Shape{2}, 0, 2, Shape{2, 2}, []float32{0, 1, 100, 101}}, {"vector, axis 1, stack 2 (f32)", Float32, Shape{2}, 1, 2, Shape{2, 2}, []float32{0, 100, 1, 101}}, {"matrix, axis 0, stack 2 (f32)", Float32, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []float32{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}}, {"matrix, axis 1, stack 2 (f32)", Float32, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []float32{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}}, {"matrix, axis 2, stack 2 (f32)", Float32, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []float32{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}}, {"matrix, axis 0, stack 3 (f32)", Float32, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []float32{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}}, {"matrix, axis 1, stack 3 (f32)", Float32, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []float32{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}}, {"matrix, axis 2, stack 3 (f32)", Float32, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []float32{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}}, // Size 2 {"vector, axis 0, stack 2 (i16)", Int16, Shape{2}, 0, 2, Shape{2, 2}, []int16{0, 1, 100, 101}}, {"vector, axis 1, stack 2 (i16)", Int16, Shape{2}, 1, 2, Shape{2, 2}, []int16{0, 100, 1, 101}}, {"matrix, axis 0, stack 2 (i16)", Int16, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []int16{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}}, {"matrix, axis 1, stack 2 (i16)", Int16, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []int16{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}}, {"matrix, axis 2, stack 2 (i16)", Int16, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []int16{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}}, {"matrix, axis 0, stack 3 (i16)", Int16, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []int16{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}}, {"matrix, axis 1, stack 3 (i16)", Int16, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []int16{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}}, {"matrix, axis 2, stack 3 (i16)", Int16, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []int16{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}}, // Size 1 {"vector, axis 0, stack 2 (u8)", Byte, Shape{2}, 0, 2, Shape{2, 2}, []byte{0, 1, 100, 101}}, {"vector, axis 1, stack 2 (u8)", Byte, Shape{2}, 1, 2, Shape{2, 2}, []byte{0, 100, 1, 101}}, {"matrix, axis 0, stack 2 (u8)", Byte, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []byte{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}}, {"matrix, axis 1, stack 2 (u8)", Byte, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []byte{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}}, {"matrix, axis 2, stack 2 (u8)", Byte, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []byte{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}}, {"matrix, axis 0, stack 3 (u8)", Byte, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []byte{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}}, {"matrix, axis 1, stack 3 (u8)", Byte, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []byte{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}}, {"matrix, axis 2, stack 3 (u8)", Byte, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []byte{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}}, } var viewStackTests = []struct { name string dt Dtype shape Shape transform []int slices []Slice axis int stackCount int correctShape Shape correctData interface{} }{ // Size 8 {"matrix(4x4)[1:3, 1:3] axis 0", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []float64{5, 6, 9, 10, 105, 106, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 1", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []float64{5, 6, 105, 106, 9, 10, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 2", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []float64{5, 105, 6, 106, 9, 109, 10, 110}}, // Size 4 {"matrix(4x4)[1:3, 1:3] axis 0 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []uint32{5, 6, 9, 10, 105, 106, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 1 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []uint32{5, 6, 105, 106, 9, 10, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 2 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []uint32{5, 105, 6, 106, 9, 109, 10, 110}}, // Size 2 {"matrix(4x4)[1:3, 1:3] axis 0 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []uint16{5, 6, 9, 10, 105, 106, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 1 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []uint16{5, 6, 105, 106, 9, 10, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 2 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []uint16{5, 105, 6, 106, 9, 109, 10, 110}}, // Size 1 {"matrix(4x4)[1:3, 1:3] axis 0 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []byte{5, 6, 9, 10, 105, 106, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 1 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []byte{5, 6, 105, 106, 9, 10, 109, 110}}, {"matrix(4x4)[1:3, 1:3] axis 2 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []byte{5, 105, 6, 106, 9, 109, 10, 110}}, } func TestDense_Stack(t *testing.T) { assert := assert.New(t) var err error for _, sts := range simpleStackTests { T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize()))) var stacked []*Dense for i := 0; i < sts.stackCount-1; i++ { offset := (i + 1) * 100 T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset))) stacked = append(stacked, T1) } T2, err := T.Stack(sts.axis, stacked...) if err != nil { t.Error(err) continue } assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correctData, T2.Data()) } for _, sts := range viewStackTests { T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize()))) switch { case sts.slices != nil && sts.transform == nil: var sliced Tensor if sliced, err = T.Slice(sts.slices...); err != nil { t.Error(err) continue } T = sliced.(*Dense) case sts.transform != nil && sts.slices == nil: T.T(sts.transform...) } var stacked []*Dense for i := 0; i < sts.stackCount-1; i++ { offset := (i + 1) * 100 T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset))) switch { case sts.slices != nil && sts.transform == nil: var sliced Tensor if sliced, err = T1.Slice(sts.slices...); err != nil { t.Error(err) continue } T1 = sliced.(*Dense) case sts.transform != nil && sts.slices == nil: T1.T(sts.transform...) } stacked = append(stacked, T1) } T2, err := T.Stack(sts.axis, stacked...) if err != nil { t.Error(err) continue } assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correctData, T2.Data(), "%q failed", sts.name) } // Repeat tests with masks for _, sts := range simpleStackTests { T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize()))) var stacked []*Dense for i := 0; i < sts.stackCount-1; i++ { offset := (i + 1) * 100 T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset))) T1.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt)) stacked = append(stacked, T1) } T2, err := T.Stack(sts.axis, stacked...) if err != nil { t.Error(err) continue } T3 := New(WithShape(sts.correctShape...), WithBacking(sts.correctData)) T3.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt)) assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correctData, T2.Data()) assert.Equal(T3.mask, T2.mask) } for _, sts := range viewStackTests { T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize()))) switch { case sts.slices != nil && sts.transform == nil: var sliced Tensor if sliced, err = T.Slice(sts.slices...); err != nil { t.Error(err) continue } T = sliced.(*Dense) case sts.transform != nil && sts.slices == nil: T.T(sts.transform...) } var stacked []*Dense for i := 0; i < sts.stackCount-1; i++ { offset := (i + 1) * 100 T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset))) T1.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt)) switch { case sts.slices != nil && sts.transform == nil: var sliced Tensor if sliced, err = T1.Slice(sts.slices...); err != nil { t.Error(err) continue } T1 = sliced.(*Dense) case sts.transform != nil && sts.slices == nil: T1.T(sts.transform...) } stacked = append(stacked, T1) } T2, err := T.Stack(sts.axis, stacked...) if err != nil { t.Error(err) continue } T3 := New(WithShape(sts.correctShape...), WithBacking(sts.correctData)) T3.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt)) assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correctData, T2.Data()) assert.Equal(T3.mask, T2.mask) } // arbitrary view slices T := New(WithShape(2, 2), WithBacking([]string{"hello", "world", "nihao", "sekai"})) var stacked []*Dense for i := 0; i < 1; i++ { T1 := New(WithShape(2, 2), WithBacking([]string{"blah1", "blah2", "blah3", "blah4"})) var sliced Tensor if sliced, err = T1.Slice(nil, nil); err != nil { t.Error(err) break } T1 = sliced.(*Dense) stacked = append(stacked, T1) } T2, err := T.Stack(0, stacked...) if err != nil { t.Error(err) return } correctShape := Shape{2, 2, 2} correctData := []string{"hello", "world", "nihao", "sekai", "blah1", "blah2", "blah3", "blah4"} assert.True(correctShape.Eq(T2.Shape())) assert.Equal(correctData, T2.Data(), "%q failed", "arbitrary view slice") } tensor-0.9.24/dense_norms.go000066400000000000000000000174361426512615100160010ustar00rootroot00000000000000package tensor import ( "math" "github.com/chewxy/math32" "github.com/pkg/errors" ) func (t *Dense) multiSVDNorm(rowAxis, colAxis int) (retVal *Dense, err error) { if rowAxis > colAxis { rowAxis-- } dims := t.Dims() if retVal, err = t.RollAxis(colAxis, dims, true); err != nil { return } if retVal, err = retVal.RollAxis(rowAxis, dims, true); err != nil { return } // manual, since SVD only works on matrices. In the future, this needs to be fixed when gonum's lapack works for float32 // TODO: SVDFuture switch dims { case 2: retVal, _, _, err = retVal.SVD(false, false) case 3: toStack := make([]*Dense, retVal.Shape()[0]) for i := 0; i < retVal.Shape()[0]; i++ { var sliced, ithS *Dense if sliced, err = sliceDense(retVal, ss(i)); err != nil { return } if ithS, _, _, err = sliced.SVD(false, false); err != nil { return } toStack[i] = ithS } retVal, err = toStack[0].Stack(0, toStack[1:]...) return default: err = errors.Errorf("multiSVDNorm for dimensions greater than 3") } return } // Norm returns the p-ordered norm of the *Dense, given the axes. // // This implementation is directly adapted from Numpy, which is licenced under a BSD-like licence, and can be found here: https://docs.scipy.org/doc/numpy-1.9.1/license.html func (t *Dense) Norm(ord NormOrder, axes ...int) (retVal *Dense, err error) { var ret Tensor var ok bool var abs, norm0, normN interface{} var oneOverOrd interface{} switch t.t { case Float64: abs = math.Abs norm0 = func(x float64) float64 { if x != 0 { return 1 } return 0 } normN = func(x float64) float64 { return math.Pow(math.Abs(x), float64(ord)) } oneOverOrd = float64(1) / float64(ord) case Float32: abs = math32.Abs norm0 = func(x float32) float32 { if x != 0 { return 1 } return 0 } normN = func(x float32) float32 { return math32.Pow(math32.Abs(x), float32(ord)) } oneOverOrd = float32(1) / float32(ord) default: err = errors.Errorf("Norms only works on float types") return } dims := t.Dims() // simple case if len(axes) == 0 { if ord.IsUnordered() || (ord.IsFrobenius() && dims == 2) || (ord == Norm(2) && dims == 1) { backup := t.AP ap := makeAP(1) defer ap.zero() ap.unlock() ap.SetShape(t.Size()) ap.lock() t.AP = ap if ret, err = Dot(t, t); err != nil { // returns a scalar err = errors.Wrapf(err, opFail, "Norm-0") return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "Norm-0") } switch t.t { case Float64: retVal.SetF64(0, math.Sqrt(retVal.GetF64(0))) case Float32: retVal.SetF32(0, math32.Sqrt(retVal.GetF32(0))) } t.AP = backup return } axes = make([]int, dims) for i := range axes { axes[i] = i } } switch len(axes) { case 1: cloned := t.Clone().(*Dense) switch { case ord.IsUnordered() || ord == Norm(2): if ret, err = Square(cloned); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "UnorderedNorm-1") } if retVal, err = retVal.Sum(axes...); err != nil { return } if ret, err = Sqrt(retVal); err != nil { return } return assertDense(ret) case ord.IsInf(1): if ret, err = cloned.Apply(abs); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "InfNorm-1") } return retVal.Max(axes...) case ord.IsInf(-1): if ret, err = cloned.Apply(abs); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "-InfNorm-1") } return retVal.Min(axes...) case ord == Norm(0): if ret, err = cloned.Apply(norm0); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "Norm-0") } return retVal.Sum(axes...) case ord == Norm(1): if ret, err = cloned.Apply(abs); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "Norm-1") } return retVal.Sum(axes...) default: if ret, err = cloned.Apply(normN); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "Norm-N") } if retVal, err = retVal.Sum(axes...); err != nil { return } return retVal.PowScalar(oneOverOrd, true) } case 2: rowAxis := axes[0] colAxis := axes[1] // checks if rowAxis < 0 { return nil, errors.Errorf("Row Axis %d is < 0", rowAxis) } if colAxis < 0 { return nil, errors.Errorf("Col Axis %d is < 0", colAxis) } if rowAxis == colAxis { return nil, errors.Errorf("Duplicate axes found. Row Axis: %d, Col Axis %d", rowAxis, colAxis) } cloned := t.Clone().(*Dense) switch { case ord == Norm(2): // svd norm if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil { return nil, errors.Wrapf(err, opFail, "MultiSVDNorm, case 2 with Ord == Norm(2)") } dims := retVal.Dims() return retVal.Max(dims - 1) case ord == Norm(-2): // svd norm if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil { return nil, errors.Wrapf(err, opFail, "MultiSVDNorm, case 2 with Ord == Norm(-2)") } dims := retVal.Dims() return retVal.Min(dims - 1) case ord == Norm(1): if colAxis > rowAxis { colAxis-- } if ret, err = cloned.Apply(abs); err != nil { return nil, errors.Wrapf(err, opFail, "Apply abs in Norm. ord == Norm(1") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Norm-1, axis=2") } if retVal, err = retVal.Sum(rowAxis); err != nil { return } return retVal.Max(colAxis) case ord == Norm(-1): if colAxis > rowAxis { colAxis-- } if ret, err = cloned.Apply(abs); err != nil { return } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Norm-(-1), axis=2") } if retVal, err = retVal.Sum(rowAxis); err != nil { return } return retVal.Min(colAxis) case ord == Norm(0): return nil, errors.Errorf("Norm of order 0 undefined for matrices") case ord.IsInf(1): if rowAxis > colAxis { rowAxis-- } if ret, err = cloned.Apply(abs); err != nil { return } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "InfNorm, axis=2") } if retVal, err = retVal.Sum(colAxis); err != nil { return nil, errors.Wrapf(err, "Sum in infNorm") } return retVal.Max(rowAxis) case ord.IsInf(-1): if rowAxis > colAxis { rowAxis-- } if ret, err = cloned.Apply(abs); err != nil { return } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "-InfNorm, axis=2") } if retVal, err = retVal.Sum(colAxis); err != nil { return nil, errors.Wrapf(err, opFail, "Sum with InfNorm") } return retVal.Min(rowAxis) case ord.IsUnordered() || ord.IsFrobenius(): if ret, err = cloned.Apply(abs); err != nil { return } if retVal, ok = ret.(*Dense); !ok { return nil, errors.Errorf(opFail, "Frobenius Norm, axis = 2") } if ret, err = Square(retVal); err != nil { return } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "Norm-0, axis=2") } if retVal, err = retVal.Sum(axes...); err != nil { return } if ret, err = Sqrt(retVal); err != nil { return } return assertDense(ret) case ord.IsNuclear(): // svd norm if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil { return } return retVal.Sum(len(t.Shape()) - 1) case ord == Norm(0): err = errors.Errorf("Norm order 0 undefined for matrices") return default: return nil, errors.Errorf("Not yet implemented: Norm for Axes %v, ord %v", axes, ord) } default: err = errors.Errorf(dimMismatch, 2, len(axes)) return } panic("Unreachable") } tensor-0.9.24/dense_norms_test.go000066400000000000000000000150721426512615100170320ustar00rootroot00000000000000package tensor import ( "fmt" "math" "testing" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) func testNormVal(T *Dense, ord NormOrder, want float64) error { retVal, err := T.Norm(ord) if err != nil { err = errors.Wrap(err, "testNormVal") return err } if !retVal.IsScalar() { return errors.New("Expected Scalar") } got := retVal.ScalarValue().(float64) if !closef64(want, got) && !(math.IsNaN(want) && alikef64(want, got)) { return errors.New(fmt.Sprintf("Norm %v, Backing %v: Want %f, got %f instead", ord, T.Data(), want, got)) } return nil } func TestTensor_Norm(t *testing.T) { var T *Dense var err error var backing, backing1, backing2 []float64 var corrects map[NormOrder]float64 var wrongs []NormOrder // empty backing = make([]float64, 0) T = New(WithBacking(backing)) //TODO // vecktor backing = []float64{1, 2, 3, 4} backing1 = []float64{-1, -2, -3, -4} backing2 = []float64{-1, 2, -3, 4} corrects = map[NormOrder]float64{ UnorderedNorm(): math.Pow(30, 0.5), // Unordered FrobeniusNorm(): math.NaN(), // Frobenius NuclearNorm(): math.NaN(), // Nuclear InfNorm(): 4, // Inf NegInfNorm(): 1, // -Inf Norm(0): 4, // 0 Norm(1): 10, // 1 Norm(-1): 12.0 / 25.0, // -1 Norm(2): math.Pow(30, 0.5), // 2 Norm(-2): math.Pow((205.0 / 144.0), -0.5), // -2 } backings := [][]float64{backing, backing1, backing2} for ord, want := range corrects { for _, b := range backings { T = New(WithShape(len(backing)), WithBacking(b)) if err = testNormVal(T, ord, want); err != nil { t.Error(errors.Cause(err)) } } } // 2x2 mat backing = []float64{1, 3, 5, 7} corrects = map[NormOrder]float64{ UnorderedNorm(): math.Pow(84, 0.5), // Unordered FrobeniusNorm(): math.Pow(84, 0.5), // Frobenius NuclearNorm(): 10, // Nuclear InfNorm(): 12, // Inf NegInfNorm(): 4, // -Inf Norm(1): 10, // 1 Norm(-1): 6, // -1 Norm(2): 9.1231056256176615, // 2 Norm(-2): 0.87689437438234041, // -2 } T = New(WithShape(2, 2), WithBacking(backing)) for ord, want := range corrects { if err = testNormVal(T, ord, want); err != nil { t.Errorf("ORD %v: %v", ord, err) } } // impossible values wrongs = []NormOrder{ Norm(-3), Norm(0), } for _, ord := range wrongs { if err = testNormVal(T, ord, math.NaN()); err == nil { t.Errorf("Expected an error when finding norm of order %v", ord) } } // 3x3 mat // this test is added because the 2x2 example happens to have equal nuclear norm and induced 1-norm. // the 1/10 scaling factor accommodates the absolute tolerance used. backing = []float64{0.1, 0.2, 0.3, 0.6, 0, 0.5, 0.3, 0.2, 0.1} corrects = map[NormOrder]float64{ FrobeniusNorm(): (1.0 / 10.0) * math.Pow(89, 0.5), NuclearNorm(): 1.3366836911774836, InfNorm(): 1.1, NegInfNorm(): 0.6, Norm(1): 1, Norm(-1): 0.4, Norm(2): 0.88722940323461277, Norm(-2): 0.19456584790481812, } T = New(WithShape(3, 3), WithBacking(backing)) for ord, want := range corrects { if err = testNormVal(T, ord, want); err != nil { t.Error(err) } } } func TestTensor_Norm_Axis(t *testing.T) { assert := assert.New(t) var T, s, expected, retVal *Dense var sliced Tensor var err error var backing []float64 var ords []NormOrder t.Log("Vector Norm Tests: compare the use of axis with computing of each row or column separately") ords = []NormOrder{ UnorderedNorm(), InfNorm(), NegInfNorm(), Norm(-1), Norm(0), Norm(1), Norm(2), Norm(3), } backing = []float64{1, 2, 3, 4, 5, 6} T = New(WithShape(2, 3), WithBacking(backing)) for _, ord := range ords { var expecteds []*Dense for k := 0; k < T.Shape()[1]; k++ { sliced, _ = T.Slice(nil, ss(k)) s = sliced.(View).Materialize().(*Dense) expected, _ = s.Norm(ord) expecteds = append(expecteds, expected) } if retVal, err = T.Norm(ord, 0); err != nil { t.Error(err) continue } assert.Equal(len(expecteds), retVal.Shape()[0]) for i, e := range expecteds { sliced, _ = retVal.Slice(ss(i)) sliced = sliced.(View).Materialize() if !allClose(e.Data(), sliced.Data()) { t.Errorf("Axis = 0; Ord = %v; Expected %v. Got %v instead. ret %v, i: %d", ord, e.Data(), sliced.Data(), retVal, i) } } // reset and do axis = 1 expecteds = expecteds[:0] for k := 0; k < T.Shape()[0]; k++ { sliced, _ = T.Slice(ss(k)) s = sliced.(*Dense) expected, _ = s.Norm(ord) expecteds = append(expecteds, expected) } if retVal, err = T.Norm(ord, 1); err != nil { t.Error(err) continue } assert.Equal(len(expecteds), retVal.Shape()[0]) for i, e := range expecteds { sliced, _ = retVal.Slice(ss(i)) sliced = sliced.(View).Materialize().(*Dense) if !allClose(e.Data(), sliced.Data()) { t.Errorf("Axis = 1; Ord = %v; Expected %v. Got %v instead", ord, e.Data(), sliced.Data()) } } } t.Log("Matrix Norms") ords = []NormOrder{ UnorderedNorm(), FrobeniusNorm(), InfNorm(), NegInfNorm(), Norm(-2), Norm(-1), Norm(1), Norm(2), } axeses := [][]int{ {0, 0}, {0, 1}, {0, 2}, {1, 0}, {1, 1}, {1, 2}, {2, 0}, {2, 1}, {2, 2}, } backing = Range(Float64, 1, 25).([]float64) T = New(WithShape(2, 3, 4), WithBacking(backing)) dims := T.Dims() for _, ord := range ords { for _, axes := range axeses { rowAxis := axes[0] colAxis := axes[1] if rowAxis < 0 { rowAxis += dims } if colAxis < 0 { colAxis += dims } if rowAxis == colAxis { } else { kthIndex := dims - (rowAxis + colAxis) var expecteds []*Dense for k := 0; k < T.Shape()[kthIndex]; k++ { var slices []Slice for s := 0; s < kthIndex; s++ { slices = append(slices, nil) } slices = append(slices, ss(k)) sliced, _ = T.Slice(slices...) if rowAxis > colAxis { sliced.T() } sliced = sliced.(View).Materialize().(*Dense) s = sliced.(*Dense) expected, _ = s.Norm(ord) expecteds = append(expecteds, expected) } if retVal, err = T.Norm(ord, rowAxis, colAxis); err != nil { t.Error(err) continue } for i, e := range expecteds { sliced, _ = retVal.Slice(ss(i)) assert.Equal(e.Data(), sliced.Data(), "ord %v, rowAxis: %v, colAxis %v", ord, rowAxis, colAxis) } } } } } tensor-0.9.24/dense_reduction_methods.go000066400000000000000000000016301426512615100203470ustar00rootroot00000000000000package tensor import "github.com/pkg/errors" func (t *Dense) Sum(along ...int) (retVal *Dense, err error) { var e Engine = t.e if sumer, ok := e.(Sumer); ok { var ret Tensor if ret, err = sumer.Sum(t, along...); err != nil { return } return ret.(*Dense), nil } return nil, errors.Errorf("Engine does not support Sum") } func (t *Dense) Max(along ...int) (retVal *Dense, err error) { var e Engine = t.e if maxer, ok := e.(Maxer); ok { var ret Tensor if ret, err = maxer.Max(t, along...); err != nil { return } return ret.(*Dense), nil } return nil, errors.Errorf("Engine does not support Max") } func (t *Dense) Min(along ...int) (retVal *Dense, err error) { var e Engine = t.e if miner, ok := e.(Miner); ok { var ret Tensor if ret, err = miner.Min(t, along...); err != nil { return } return ret.(*Dense), nil } return nil, errors.Errorf("Engine does not support Min") } tensor-0.9.24/dense_reduction_test.go000066400000000000000000001057211426512615100176710ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "testing" "github.com/stretchr/testify/assert" "gorgonia.org/tensor/internal/execution" ) var denseReductionTests = []struct { of Dtype fn interface{} def interface{} axis int correct interface{} correctShape Shape }{ // int {Int, execution.AddI, int(0), 0, []int{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Int, execution.AddI, int(0), 1, []int{6, 9, 24, 27}, Shape{2, 2}}, {Int, execution.AddI, int(0), 2, []int{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // int8 {Int8, execution.AddI8, int8(0), 0, []int8{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Int8, execution.AddI8, int8(0), 1, []int8{6, 9, 24, 27}, Shape{2, 2}}, {Int8, execution.AddI8, int8(0), 2, []int8{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // int16 {Int16, execution.AddI16, int16(0), 0, []int16{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Int16, execution.AddI16, int16(0), 1, []int16{6, 9, 24, 27}, Shape{2, 2}}, {Int16, execution.AddI16, int16(0), 2, []int16{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // int32 {Int32, execution.AddI32, int32(0), 0, []int32{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Int32, execution.AddI32, int32(0), 1, []int32{6, 9, 24, 27}, Shape{2, 2}}, {Int32, execution.AddI32, int32(0), 2, []int32{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // int64 {Int64, execution.AddI64, int64(0), 0, []int64{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Int64, execution.AddI64, int64(0), 1, []int64{6, 9, 24, 27}, Shape{2, 2}}, {Int64, execution.AddI64, int64(0), 2, []int64{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // uint {Uint, execution.AddU, uint(0), 0, []uint{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Uint, execution.AddU, uint(0), 1, []uint{6, 9, 24, 27}, Shape{2, 2}}, {Uint, execution.AddU, uint(0), 2, []uint{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // uint8 {Uint8, execution.AddU8, uint8(0), 0, []uint8{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Uint8, execution.AddU8, uint8(0), 1, []uint8{6, 9, 24, 27}, Shape{2, 2}}, {Uint8, execution.AddU8, uint8(0), 2, []uint8{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // uint16 {Uint16, execution.AddU16, uint16(0), 0, []uint16{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Uint16, execution.AddU16, uint16(0), 1, []uint16{6, 9, 24, 27}, Shape{2, 2}}, {Uint16, execution.AddU16, uint16(0), 2, []uint16{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // uint32 {Uint32, execution.AddU32, uint32(0), 0, []uint32{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Uint32, execution.AddU32, uint32(0), 1, []uint32{6, 9, 24, 27}, Shape{2, 2}}, {Uint32, execution.AddU32, uint32(0), 2, []uint32{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // uint64 {Uint64, execution.AddU64, uint64(0), 0, []uint64{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Uint64, execution.AddU64, uint64(0), 1, []uint64{6, 9, 24, 27}, Shape{2, 2}}, {Uint64, execution.AddU64, uint64(0), 2, []uint64{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // float32 {Float32, execution.AddF32, float32(0), 0, []float32{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Float32, execution.AddF32, float32(0), 1, []float32{6, 9, 24, 27}, Shape{2, 2}}, {Float32, execution.AddF32, float32(0), 2, []float32{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // float64 {Float64, execution.AddF64, float64(0), 0, []float64{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Float64, execution.AddF64, float64(0), 1, []float64{6, 9, 24, 27}, Shape{2, 2}}, {Float64, execution.AddF64, float64(0), 2, []float64{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // complex64 {Complex64, execution.AddC64, complex64(0), 0, []complex64{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Complex64, execution.AddC64, complex64(0), 1, []complex64{6, 9, 24, 27}, Shape{2, 2}}, {Complex64, execution.AddC64, complex64(0), 2, []complex64{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, // complex128 {Complex128, execution.AddC128, complex128(0), 0, []complex128{6, 8, 10, 12, 14, 16}, Shape{3, 2}}, {Complex128, execution.AddC128, complex128(0), 1, []complex128{6, 9, 24, 27}, Shape{2, 2}}, {Complex128, execution.AddC128, complex128(0), 2, []complex128{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, } func TestDense_Reduce(t *testing.T) { assert := assert.New(t) for _, drt := range denseReductionTests { T := New(WithShape(2, 3, 2), WithBacking(Range(drt.of, 0, 2*3*2))) T2, err := T.Reduce(drt.fn, drt.axis, drt.def) if err != nil { t.Error(err) continue } assert.True(drt.correctShape.Eq(T2.Shape())) assert.Equal(drt.correct, T2.Data()) // stupids: _, err = T.Reduce(drt.fn, 1000, drt.def) assert.NotNil(err) // wrong function type var f interface{} f = func(a, b float64) float64 { return 0 } if drt.of == Float64 { f = func(a, b int) int { return 0 } } _, err = T.Reduce(f, 0, drt.correct) assert.NotNil(err) // wrong default value type var def2 interface{} def2 = 3.14 if drt.of == Float64 { def2 = int(1) } _, err = T.Reduce(drt.fn, 3, def2) // only last axis requires a default value assert.NotNil(err) } } var sumTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {"common case: T.Sum() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(15)}, {"A.Sum(0) for int", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{3, 5, 7}}, {"A.Sum(1) for int", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{3, 12}}, {"A.Sum(0,1) for int", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(15)}, {"A.Sum(1,0) for int", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(15)}, {"3T.Sum(1,2) for int", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{66, 210}}, {"4T.Sum() for int", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(120)}, {"4T.Sum(1,3) for int", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for int", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{44, 76}}, {"common case: T.Sum() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(15)}, {"A.Sum(0) for int8", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{3, 5, 7}}, {"A.Sum(1) for int8", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{3, 12}}, {"A.Sum(0,1) for int8", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(15)}, {"A.Sum(1,0) for int8", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(15)}, {"3T.Sum(1,2) for int8", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{66, -46}}, {"4T.Sum() for int8", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(120)}, {"4T.Sum(1,3) for int8", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for int8", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{44, 76}}, {"common case: T.Sum() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(15)}, {"A.Sum(0) for int16", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{3, 5, 7}}, {"A.Sum(1) for int16", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{3, 12}}, {"A.Sum(0,1) for int16", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(15)}, {"A.Sum(1,0) for int16", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(15)}, {"3T.Sum(1,2) for int16", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{66, 210}}, {"4T.Sum() for int16", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(120)}, {"4T.Sum(1,3) for int16", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for int16", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{44, 76}}, {"common case: T.Sum() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(15)}, {"A.Sum(0) for int32", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{3, 5, 7}}, {"A.Sum(1) for int32", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{3, 12}}, {"A.Sum(0,1) for int32", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(15)}, {"A.Sum(1,0) for int32", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(15)}, {"3T.Sum(1,2) for int32", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{66, 210}}, {"4T.Sum() for int32", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(120)}, {"4T.Sum(1,3) for int32", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for int32", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{44, 76}}, {"common case: T.Sum() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(15)}, {"A.Sum(0) for int64", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{3, 5, 7}}, {"A.Sum(1) for int64", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{3, 12}}, {"A.Sum(0,1) for int64", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(15)}, {"A.Sum(1,0) for int64", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(15)}, {"3T.Sum(1,2) for int64", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{66, 210}}, {"4T.Sum() for int64", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(120)}, {"4T.Sum(1,3) for int64", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for int64", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{44, 76}}, {"common case: T.Sum() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(15)}, {"A.Sum(0) for uint", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{3, 5, 7}}, {"A.Sum(1) for uint", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{3, 12}}, {"A.Sum(0,1) for uint", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(15)}, {"A.Sum(1,0) for uint", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(15)}, {"3T.Sum(1,2) for uint", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{66, 210}}, {"4T.Sum() for uint", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(120)}, {"4T.Sum(1,3) for uint", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for uint", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{44, 76}}, {"common case: T.Sum() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(15)}, {"A.Sum(0) for uint8", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{3, 5, 7}}, {"A.Sum(1) for uint8", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{3, 12}}, {"A.Sum(0,1) for uint8", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(15)}, {"A.Sum(1,0) for uint8", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(15)}, {"3T.Sum(1,2) for uint8", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{66, 210}}, {"4T.Sum() for uint8", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(120)}, {"4T.Sum(1,3) for uint8", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for uint8", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{44, 76}}, {"common case: T.Sum() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(15)}, {"A.Sum(0) for uint16", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{3, 5, 7}}, {"A.Sum(1) for uint16", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{3, 12}}, {"A.Sum(0,1) for uint16", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(15)}, {"A.Sum(1,0) for uint16", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(15)}, {"3T.Sum(1,2) for uint16", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{66, 210}}, {"4T.Sum() for uint16", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(120)}, {"4T.Sum(1,3) for uint16", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for uint16", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{44, 76}}, {"common case: T.Sum() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(15)}, {"A.Sum(0) for uint32", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{3, 5, 7}}, {"A.Sum(1) for uint32", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{3, 12}}, {"A.Sum(0,1) for uint32", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(15)}, {"A.Sum(1,0) for uint32", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(15)}, {"3T.Sum(1,2) for uint32", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{66, 210}}, {"4T.Sum() for uint32", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(120)}, {"4T.Sum(1,3) for uint32", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for uint32", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{44, 76}}, {"common case: T.Sum() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(15)}, {"A.Sum(0) for uint64", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{3, 5, 7}}, {"A.Sum(1) for uint64", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{3, 12}}, {"A.Sum(0,1) for uint64", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(15)}, {"A.Sum(1,0) for uint64", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(15)}, {"3T.Sum(1,2) for uint64", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{66, 210}}, {"4T.Sum() for uint64", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(120)}, {"4T.Sum(1,3) for uint64", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for uint64", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{44, 76}}, {"common case: T.Sum() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(15)}, {"A.Sum(0) for float32", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{3, 5, 7}}, {"A.Sum(1) for float32", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{3, 12}}, {"A.Sum(0,1) for float32", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(15)}, {"A.Sum(1,0) for float32", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(15)}, {"3T.Sum(1,2) for float32", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{66, 210}}, {"4T.Sum() for float32", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(120)}, {"4T.Sum(1,3) for float32", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for float32", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{44, 76}}, {"common case: T.Sum() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(15)}, {"A.Sum(0) for float64", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{3, 5, 7}}, {"A.Sum(1) for float64", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{3, 12}}, {"A.Sum(0,1) for float64", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(15)}, {"A.Sum(1,0) for float64", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(15)}, {"3T.Sum(1,2) for float64", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{66, 210}}, {"4T.Sum() for float64", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(120)}, {"4T.Sum(1,3) for float64", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for float64", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{44, 76}}, {"common case: T.Sum() for complex64", Complex64, Shape{2, 3}, []int{}, ScalarShape(), complex64(15)}, {"A.Sum(0) for complex64", Complex64, Shape{2, 3}, []int{0}, Shape{3}, []complex64{3, 5, 7}}, {"A.Sum(1) for complex64", Complex64, Shape{2, 3}, []int{1}, Shape{2}, []complex64{3, 12}}, {"A.Sum(0,1) for complex64", Complex64, Shape{2, 3}, []int{0, 1}, ScalarShape(), complex64(15)}, {"A.Sum(1,0) for complex64", Complex64, Shape{2, 3}, []int{1, 0}, ScalarShape(), complex64(15)}, {"3T.Sum(1,2) for complex64", Complex64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []complex64{66, 210}}, {"4T.Sum() for complex64", Complex64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), complex64(120)}, {"4T.Sum(1,3) for complex64", Complex64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []complex64{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for complex64", Complex64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []complex64{44, 76}}, {"common case: T.Sum() for complex128", Complex128, Shape{2, 3}, []int{}, ScalarShape(), complex128(15)}, {"A.Sum(0) for complex128", Complex128, Shape{2, 3}, []int{0}, Shape{3}, []complex128{3, 5, 7}}, {"A.Sum(1) for complex128", Complex128, Shape{2, 3}, []int{1}, Shape{2}, []complex128{3, 12}}, {"A.Sum(0,1) for complex128", Complex128, Shape{2, 3}, []int{0, 1}, ScalarShape(), complex128(15)}, {"A.Sum(1,0) for complex128", Complex128, Shape{2, 3}, []int{1, 0}, ScalarShape(), complex128(15)}, {"3T.Sum(1,2) for complex128", Complex128, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []complex128{66, 210}}, {"4T.Sum() for complex128", Complex128, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), complex128(120)}, {"4T.Sum(1,3) for complex128", Complex128, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []complex128{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for complex128", Complex128, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []complex128{44, 76}}, } func TestDense_Sum(t *testing.T) { assert := assert.New(t) var T, T2 *Dense var err error for _, sts := range sumTests { T = New(WithShape(sts.shape...), WithBacking(Range(sts.of, 0, sts.shape.TotalSize()))) if T2, err = T.Sum(sts.along...); err != nil { t.Error(err) continue } assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correct, T2.Data()) } // idiots _, err = T.Sum(1000) assert.NotNil(err) } var maxTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {"common case: T.Max() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(5)}, {"A.Max(0)", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{3, 4, 5}}, {"A.Max(1)", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{2, 5}}, {"A.Max(0,1)", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(5)}, {"A.Max(1,0)", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(5)}, {"3T.Max(1,2)", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{11, 23}}, {"4T.Max()", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(15)}, {"4T.Max(1,3)", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{11, 15}}, {"common case: T.Max() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(5)}, {"A.Max(0)", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{3, 4, 5}}, {"A.Max(1)", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{2, 5}}, {"A.Max(0,1)", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(5)}, {"A.Max(1,0)", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(5)}, {"3T.Max(1,2)", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{11, 23}}, {"4T.Max()", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(15)}, {"4T.Max(1,3)", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{11, 15}}, {"common case: T.Max() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(5)}, {"A.Max(0)", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{3, 4, 5}}, {"A.Max(1)", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{2, 5}}, {"A.Max(0,1)", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(5)}, {"A.Max(1,0)", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(5)}, {"3T.Max(1,2)", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{11, 23}}, {"4T.Max()", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(15)}, {"4T.Max(1,3)", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{11, 15}}, {"common case: T.Max() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(5)}, {"A.Max(0)", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{3, 4, 5}}, {"A.Max(1)", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{2, 5}}, {"A.Max(0,1)", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(5)}, {"A.Max(1,0)", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(5)}, {"3T.Max(1,2)", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{11, 23}}, {"4T.Max()", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(15)}, {"4T.Max(1,3)", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{11, 15}}, {"common case: T.Max() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(5)}, {"A.Max(0)", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{3, 4, 5}}, {"A.Max(1)", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{2, 5}}, {"A.Max(0,1)", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(5)}, {"A.Max(1,0)", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(5)}, {"3T.Max(1,2)", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{11, 23}}, {"4T.Max()", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(15)}, {"4T.Max(1,3)", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{11, 15}}, {"common case: T.Max() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(5)}, {"A.Max(0)", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{3, 4, 5}}, {"A.Max(1)", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{2, 5}}, {"A.Max(0,1)", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(5)}, {"A.Max(1,0)", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(5)}, {"3T.Max(1,2)", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{11, 23}}, {"4T.Max()", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(15)}, {"4T.Max(1,3)", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{11, 15}}, {"common case: T.Max() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(5)}, {"A.Max(0)", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{3, 4, 5}}, {"A.Max(1)", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{2, 5}}, {"A.Max(0,1)", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(5)}, {"A.Max(1,0)", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(5)}, {"3T.Max(1,2)", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{11, 23}}, {"4T.Max()", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(15)}, {"4T.Max(1,3)", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{11, 15}}, {"common case: T.Max() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(5)}, {"A.Max(0)", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{3, 4, 5}}, {"A.Max(1)", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{2, 5}}, {"A.Max(0,1)", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(5)}, {"A.Max(1,0)", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(5)}, {"3T.Max(1,2)", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{11, 23}}, {"4T.Max()", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(15)}, {"4T.Max(1,3)", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{11, 15}}, {"common case: T.Max() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(5)}, {"A.Max(0)", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{3, 4, 5}}, {"A.Max(1)", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{2, 5}}, {"A.Max(0,1)", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(5)}, {"A.Max(1,0)", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(5)}, {"3T.Max(1,2)", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{11, 23}}, {"4T.Max()", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(15)}, {"4T.Max(1,3)", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{11, 15}}, {"common case: T.Max() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(5)}, {"A.Max(0)", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{3, 4, 5}}, {"A.Max(1)", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{2, 5}}, {"A.Max(0,1)", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(5)}, {"A.Max(1,0)", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(5)}, {"3T.Max(1,2)", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{11, 23}}, {"4T.Max()", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(15)}, {"4T.Max(1,3)", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{11, 15}}, {"common case: T.Max() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(5)}, {"A.Max(0)", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{3, 4, 5}}, {"A.Max(1)", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{2, 5}}, {"A.Max(0,1)", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(5)}, {"A.Max(1,0)", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(5)}, {"3T.Max(1,2)", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{11, 23}}, {"4T.Max()", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(15)}, {"4T.Max(1,3)", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{11, 15}}, {"common case: T.Max() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(5)}, {"A.Max(0)", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{3, 4, 5}}, {"A.Max(1)", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{2, 5}}, {"A.Max(0,1)", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(5)}, {"A.Max(1,0)", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(5)}, {"3T.Max(1,2)", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{11, 23}}, {"4T.Max()", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(15)}, {"4T.Max(1,3)", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{11, 15}}, } func TestDense_Max(t *testing.T) { assert := assert.New(t) var T, T2 *Dense var err error for _, mts := range maxTests { T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize()))) if T2, err = T.Max(mts.along...); err != nil { t.Error(err) continue } assert.True(mts.correctShape.Eq(T2.Shape())) assert.Equal(mts.correct, T2.Data()) } /* IDIOT TESTING TIME */ _, err = T.Max(1000) assert.NotNil(err) } var minTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {"common case: T.Min() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(0)}, {"A.Min(0)", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{0, 1, 2}}, {"A.Min(1)", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{0, 3}}, {"A.Min(0,1)", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(0)}, {"A.Min(1,0)", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(0)}, {"3T.Min(1,2)", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{0, 12}}, {"4T.Min()", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(0)}, {"4T.Min(1,3)", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{0, 4}}, {"common case: T.Min() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(0)}, {"A.Min(0)", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{0, 1, 2}}, {"A.Min(1)", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{0, 3}}, {"A.Min(0,1)", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(0)}, {"A.Min(1,0)", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(0)}, {"3T.Min(1,2)", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{0, 12}}, {"4T.Min()", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(0)}, {"4T.Min(1,3)", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{0, 4}}, {"common case: T.Min() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(0)}, {"A.Min(0)", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{0, 1, 2}}, {"A.Min(1)", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{0, 3}}, {"A.Min(0,1)", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(0)}, {"A.Min(1,0)", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(0)}, {"3T.Min(1,2)", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{0, 12}}, {"4T.Min()", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(0)}, {"4T.Min(1,3)", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{0, 4}}, {"common case: T.Min() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(0)}, {"A.Min(0)", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{0, 1, 2}}, {"A.Min(1)", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{0, 3}}, {"A.Min(0,1)", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(0)}, {"A.Min(1,0)", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(0)}, {"3T.Min(1,2)", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{0, 12}}, {"4T.Min()", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(0)}, {"4T.Min(1,3)", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{0, 4}}, {"common case: T.Min() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(0)}, {"A.Min(0)", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{0, 1, 2}}, {"A.Min(1)", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{0, 3}}, {"A.Min(0,1)", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(0)}, {"A.Min(1,0)", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(0)}, {"3T.Min(1,2)", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{0, 12}}, {"4T.Min()", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(0)}, {"4T.Min(1,3)", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{0, 4}}, {"common case: T.Min() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(0)}, {"A.Min(0)", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{0, 1, 2}}, {"A.Min(1)", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{0, 3}}, {"A.Min(0,1)", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(0)}, {"A.Min(1,0)", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(0)}, {"3T.Min(1,2)", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{0, 12}}, {"4T.Min()", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(0)}, {"4T.Min(1,3)", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{0, 4}}, {"common case: T.Min() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(0)}, {"A.Min(0)", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{0, 1, 2}}, {"A.Min(1)", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{0, 3}}, {"A.Min(0,1)", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(0)}, {"A.Min(1,0)", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(0)}, {"3T.Min(1,2)", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{0, 12}}, {"4T.Min()", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(0)}, {"4T.Min(1,3)", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{0, 4}}, {"common case: T.Min() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(0)}, {"A.Min(0)", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{0, 1, 2}}, {"A.Min(1)", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{0, 3}}, {"A.Min(0,1)", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(0)}, {"A.Min(1,0)", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(0)}, {"3T.Min(1,2)", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{0, 12}}, {"4T.Min()", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(0)}, {"4T.Min(1,3)", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{0, 4}}, {"common case: T.Min() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(0)}, {"A.Min(0)", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{0, 1, 2}}, {"A.Min(1)", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{0, 3}}, {"A.Min(0,1)", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(0)}, {"A.Min(1,0)", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(0)}, {"3T.Min(1,2)", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{0, 12}}, {"4T.Min()", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(0)}, {"4T.Min(1,3)", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{0, 4}}, {"common case: T.Min() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(0)}, {"A.Min(0)", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{0, 1, 2}}, {"A.Min(1)", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{0, 3}}, {"A.Min(0,1)", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(0)}, {"A.Min(1,0)", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(0)}, {"3T.Min(1,2)", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{0, 12}}, {"4T.Min()", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(0)}, {"4T.Min(1,3)", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{0, 4}}, {"common case: T.Min() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(0)}, {"A.Min(0)", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{0, 1, 2}}, {"A.Min(1)", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{0, 3}}, {"A.Min(0,1)", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(0)}, {"A.Min(1,0)", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(0)}, {"3T.Min(1,2)", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{0, 12}}, {"4T.Min()", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(0)}, {"4T.Min(1,3)", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{0, 4}}, {"common case: T.Min() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(0)}, {"A.Min(0)", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{0, 1, 2}}, {"A.Min(1)", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{0, 3}}, {"A.Min(0,1)", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(0)}, {"A.Min(1,0)", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(0)}, {"3T.Min(1,2)", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{0, 12}}, {"4T.Min()", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(0)}, {"4T.Min(1,3)", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{0, 4}}, } func TestDense_Min(t *testing.T) { assert := assert.New(t) var T, T2 *Dense var err error for _, mts := range minTests { T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize()))) if T2, err = T.Min(mts.along...); err != nil { t.Error(err) continue } assert.True(mts.correctShape.Eq(T2.Shape())) assert.Equal(mts.correct, T2.Data()) } /* IDIOT TESTING TIME */ _, err = T.Min(1000) assert.NotNil(err) } tensor-0.9.24/dense_selbyidx_test.go000066400000000000000000000105651426512615100175210ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) type selByIndicesTest struct { Name string Data interface{} Shape Shape Indices []int Axis int WillErr bool Correct interface{} CorrectShape Shape } var selByIndicesTests = []selByIndicesTest{ {Name: "Basic", Data: Range(Float64, 0, 4), Shape: Shape{2, 2}, Indices: []int{0, 1}, Axis: 0, WillErr: false, Correct: []float64{0, 1, 2, 3}, CorrectShape: Shape{2, 2}, }, {Name: "3-tensor, axis 0", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 0, WillErr: false, Correct: []float64{8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15}, CorrectShape: Shape{2, 2, 4}}, {Name: "3-tensor, axis 1", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 1, WillErr: false, Correct: []float64{4, 5, 6, 7, 4, 5, 6, 7, 12, 13, 14, 15, 12, 13, 14, 15, 20, 21, 22, 23, 20, 21, 22, 23}, CorrectShape: Shape{3, 2, 4}}, {Name: "3-tensor, axis 2", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 2, WillErr: false, Correct: []float64{1, 1, 5, 5, 9, 9, 13, 13, 17, 17, 21, 21}, CorrectShape: Shape{3, 2, 2}}, {Name: "Vector, axis 0", Data: Range(Int, 0, 5), Shape: Shape{5}, Indices: []int{1, 1}, Axis: 0, WillErr: false, Correct: []int{1, 1}, CorrectShape: Shape{2}}, {Name: "Vector, axis 1", Data: Range(Int, 0, 5), Shape: Shape{5}, Indices: []int{1, 1}, Axis: 1, WillErr: true, Correct: []int{1, 1}, CorrectShape: Shape{2}}, {Name: "(4,2) Matrix, with (10) indices", Data: Range(Float32, 0, 8), Shape: Shape{4, 2}, Indices: []int{1, 1, 1, 1, 0, 2, 2, 2, 2, 0}, Axis: 0, WillErr: false, Correct: []float32{2, 3, 2, 3, 2, 3, 2, 3, 0, 1, 4, 5, 4, 5, 4, 5, 4, 5, 0, 1}, CorrectShape: Shape{10, 2}}, {Name: "(2,1) Matrx (colvec) with (10) indices", Data: Range(Float64, 0, 2), Shape: Shape{2, 1}, Indices: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, Axis: 0, WillErr: false, Correct: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, CorrectShape: Shape{10}, }, } func TestDense_SelectByIndices(t *testing.T) { assert := assert.New(t) for i, tc := range selByIndicesTests { T := New(WithShape(tc.Shape...), WithBacking(tc.Data)) indices := New(WithBacking(tc.Indices)) ret, err := ByIndices(T, indices, tc.Axis) if checkErr(t, tc.WillErr, err, tc.Name, i) { continue } assert.Equal(tc.Correct, ret.Data()) assert.True(tc.CorrectShape.Eq(ret.Shape())) } } var selByIndicesBTests = []struct { selByIndicesTest CorrectGrad interface{} CorrectGradShape Shape }{ // Basic { CorrectGrad: []float64{1, 1, 1, 1}, }, // 3-tensor, axis 0 { CorrectGrad: []float64{0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0}, }, // 3-tensor, axis 1 { CorrectGrad: []float64{0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2}, }, // 3-tensor, axis 2 { CorrectGrad: []float64{0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0}, }, // vector, axis 0 { CorrectGrad: []int{0, 2, 0, 0, 0}, }, // vector, axis 1 { CorrectGrad: []float32{4, 6, 8, 12, 8, 12, 0, 0}, }, // (4,2) Matrix with (10) indices { CorrectGrad: []float32{2, 2, 4, 4, 4, 4, 0, 0}, }, // (2, 1) Matrix (colvec) with (10) indices { CorrectGrad: []float64{0, 10}, }, } func init() { for i := range selByIndicesBTests { selByIndicesBTests[i].selByIndicesTest = selByIndicesTests[i] selByIndicesBTests[i].CorrectGradShape = selByIndicesTests[i].Shape } } func TestDense_SelectByIndicesB(t *testing.T) { assert := assert.New(t) for i, tc := range selByIndicesBTests { T := New(WithShape(tc.Shape...), WithBacking(tc.Data)) indices := New(WithBacking(tc.Indices)) ret, err := ByIndices(T, indices, tc.Axis) if checkErr(t, tc.WillErr, err, tc.Name, i) { continue } outGrad := ret.Clone().(*Dense) switch outGrad.Dtype() { case Float64: outGrad.Memset(1.0) case Float32: outGrad.Memset(float32(1.0)) } grad, err := ByIndicesB(T, outGrad, indices, tc.Axis) if checkErr(t, tc.WillErr, err, tc.Name, i) { continue } assert.Equal(tc.CorrectGrad, grad.Data(), "%v - x:\n%v\nindices:\n%#v\ny:\n%#v\ngrad:\n%v", tc.Name, T, indices, ret, grad) assert.True(tc.CorrectGradShape.Eq(grad.Shape()), "%v - Grad shape should be %v. Got %v instead.\n\nx:\n%v\nindices:\n%#v\ny:\n%#v\ngrad:\n%v", tc.Name, tc.CorrectGradShape, grad.Shape(), T, indices, ret, grad) } } tensor-0.9.24/dense_softmax_test.go000066400000000000000000000313321426512615100173520ustar00rootroot00000000000000package tensor import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestSoftMax(t *testing.T) { testCases := []struct { fn func(x Tensor, axis int, opts ...FuncOpt) (Tensor, error) x Tensor axis int expectedOutput interface{} }{ { fn: LogSoftMax, x: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float64{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}, }, { fn: LogSoftMax, x: New( Of(Float32), WithShape(3, 4), WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float32{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}, }, { fn: LogSoftMax, x: New( Of(Float32), WithShape(3, 2, 2), WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float32{-0.7443967, -0.64439666, -0.7443967, -0.64439666, -0.7443967, -0.64439666, -0.7443966, -0.64439666, -0.7443966, -0.64439666, -0.7443967, -0.64439666}, }, { fn: LogSoftMax, x: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: 1, expectedOutput: []float64{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918}, }, { fn: SoftMax, x: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: 1, expectedOutput: []float64{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478}, }, { fn: SoftMax, x: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float64{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894}, }, { fn: SoftMax, x: New( Of(Float32), WithShape(3, 4), WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float32{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}, }, { fn: SoftMax, x: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}), ), axis: -1, expectedOutput: []float64{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}, }, } for i, tC := range testCases { t.Run(fmt.Sprintf("Example #%d - %v %v", i+1, tC.x.Shape(), tC.x.Dtype()), func(t *testing.T) { c := assert.New(t) output, err := tC.fn(tC.x, tC.axis) t.Logf("output: %#v", output.Data()) c.NoError(err) c.NotNil(output) c.Equal(tC.x.Shape(), output.Shape()) c.InDeltaSlice(tC.expectedOutput, output.Data(), 1e-6) }) } } func TestSoftMaxB(t *testing.T) { testCases := []struct { fn func(output, grad Tensor, axis int, opts ...FuncOpt) (Tensor, error) output Tensor grad Tensor axis int expectedOutput interface{} }{ { fn: SoftMaxB, output: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}), ), grad: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float64{-0.003474116568224552, -0.0014762147035963322, 0.0009803563066858392, 0.00396997522759976, -0.003474116880376028, -0.001476214931490494, 0.0009803561238580223, 0.003969975025543781, -0.0034741159267098936, -0.0014762139946130218, 0.0009803570151630109, 0.003969976093553957}, }, { fn: LogSoftMaxB, output: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}), ), grad: New( Of(Float64), WithShape(3, 4), WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float64{-0.011383822036598441, -0.003632778232153768, 0.0038817407844924366, 0.01113485948425977, -0.005597937295155945, -0.001445223403599799, 0.0020925260396803457, 0.004950634659075405, 0.00018794744628654992, 0.0007423314249541871, 0.00030331129486827163, -0.0012335901661089598}, }, { fn: SoftMaxB, output: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894}), ), grad: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float64{-0.002493760401928919, 0.0024937604019289205, -0.0024937604019289183, 0.002493760401928922, -0.002493760401928915, 0.002493760401928922, -0.002493760401928912, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289183}, }, { fn: SoftMaxB, output: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478}), ), grad: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: 1, expectedOutput: []float64{-0.004950331454237199, -0.004950331454237198, 0.004950331454237199, 0.0049503314542372, -0.004950331454237196, -0.004950331454237193, 0.004950331454237203, 0.0049503314542372065, -0.004950331454237193, -0.0049503314542372, 0.0049503314542372065, 0.004950331454237193}, }, { fn: LogSoftMaxB, output: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918}), ), grad: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: 1, expectedOutput: []float64{-0.008006640107500884, -0.007009960161251325, 0.00800664010750088, 0.007009960161251332, -0.004019920322502654, -0.003023240376253103, 0.004019920322502661, 0.0030232403762530968, -3.32005375044292e-05, 0.0009634794087451421, 3.320053750442642e-05, -0.0009634794087451543}, }, { fn: LogSoftMaxB, output: New( Of(Float32), WithShape(3, 2, 2), WithBacking([]float32{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918}), ), grad: New( Of(Float32), WithShape(3, 2, 2), WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: 1, expectedOutput: []float64{-0.008006640107500884, -0.007009960161251325, 0.00800664010750088, 0.007009960161251332, -0.004019920322502654, -0.003023240376253103, 0.004019920322502661, 0.0030232403762530968, -3.32005375044292e-05, 0.0009634794087451421, 3.320053750442642e-05, -0.0009634794087451543}, }, { fn: SoftMaxB, output: New( Of(Float32), WithShape(3, 2, 2), WithBacking([]float32{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478}), ), grad: New( Of(Float32), WithShape(3, 2, 2), WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: 1, expectedOutput: []float32{-0.004950331454237199, -0.004950331454237198, 0.004950331454237199, 0.0049503314542372, -0.004950331454237196, -0.004950331454237193, 0.004950331454237203, 0.0049503314542372065, -0.004950331454237193, -0.0049503314542372, 0.0049503314542372065, 0.004950331454237193}, }, { fn: SoftMaxB, output: New( Of(Float32), WithShape(3, 4), WithBacking([]float32{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}), ), grad: New( Of(Float64), WithShape(3, 4), WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float32{-0.003474116568224552, -0.0014762147035963322, 0.0009803563066858392, 0.00396997522759976, -0.003474116880376028, -0.001476214931490494, 0.0009803561238580223, 0.003969975025543781, -0.0034741159267098936, -0.0014762139946130218, 0.0009803570151630109, 0.003969976093553957}, }, { fn: LogSoftMaxB, output: New( Of(Float64), WithShape(3, 4), WithBacking([]float32{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}), ), grad: New( Of(Float64), WithShape(3, 4), WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float32{-0.011383822036598441, -0.003632778232153768, 0.0038817407844924366, 0.01113485948425977, -0.005597937295155945, -0.001445223403599799, 0.0020925260396803457, 0.004950634659075405, 0.00018794744628654992, 0.0007423314249541871, 0.00030331129486827163, -0.0012335901661089598}, }, { fn: SoftMaxB, output: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float32{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894}), ), grad: New( Of(Float64), WithShape(3, 2, 2), WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}), ), axis: -1, expectedOutput: []float32{-0.002493760401928919, 0.0024937604019289205, -0.0024937604019289183, 0.002493760401928922, -0.002493760401928915, 0.002493760401928922, -0.002493760401928912, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289183}, }, } for i, tC := range testCases { t.Run(fmt.Sprintf("Example #%d - %v %v", i+1, tC.output.Shape(), tC.output.Dtype()), func(t *testing.T) { c := assert.New(t) dx, err := tC.fn(tC.output, tC.grad, tC.axis) t.Logf("output: %#v", tC.output.Data()) c.NoError(err) c.NotNil(dx) c.Equal(tC.output.Shape(), dx.Shape()) c.InDeltaSlice(tC.expectedOutput, dx.Data(), 1e-6) }) } } tensor-0.9.24/dense_svd_test.go000066400000000000000000000134661426512615100164750ustar00rootroot00000000000000package tensor import ( "fmt" "testing" "github.com/pkg/errors" "gonum.org/v1/gonum/mat" ) // tests for SVD adapted from Gonum's SVD tests. // Gonum's licence is listed at https://gonum.org/v1/gonum/license var svdtestsThin = []struct { data []float64 shape Shape correctSData []float64 correctSShape Shape correctUData []float64 correctUShape Shape correctVData []float64 correctVShape Shape }{ { []float64{2, 4, 1, 3, 0, 0, 0, 0}, Shape{4, 2}, []float64{5.464985704219041, 0.365966190626258}, Shape{2}, []float64{-0.8174155604703632, -0.5760484367663209, -0.5760484367663209, 0.8174155604703633, 0, 0, 0, 0}, Shape{4, 2}, []float64{-0.4045535848337571, -0.9145142956773044, -0.9145142956773044, 0.4045535848337571}, Shape{2, 2}, }, { []float64{1, 1, 0, 1, 0, 0, 0, 0, 0, 11, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 12, 2, 1, 1, 0, 0, 0, 0, 0, 0, 1, 13, 3}, Shape{3, 11}, []float64{21.259500881097434, 1.5415021616856566, 1.2873979074613628}, Shape{3}, []float64{-0.5224167862273765, 0.7864430360363114, 0.3295270133658976, -0.5739526766688285, -0.03852203026050301, -0.8179818935216693, -0.6306021141833781, -0.6164603833618163, 0.4715056408282468}, Shape{3, 3}, []float64{ -0.08123293141915189, 0.08528085505260324, -0.013165501690885152, -0.05423546426886932, 0.1102707844980355, 0.622210623111631, 0, 0, 0, -0.0245733326078166, 0.510179651760153, 0.25596360803140994, 0, 0, 0, 0, 0, 0, -0.026997467150282436, -0.024989929445430496, -0.6353761248025164, 0, 0, 0, -0.029662131661052707, -0.3999088672621176, 0.3662470150802212, -0.9798839760830571, 0.11328174160898856, -0.047702613241813366, -0.16755466189153964, -0.7395268089170608, 0.08395240366704032}, Shape{11, 3}, }, } var svdtestsFull = []Shape{ {5, 5}, {5, 3}, {3, 5}, {150, 150}, {200, 150}, {150, 200}, } // calculate corrects func calcSigma(s, T *Dense, shape Shape) (sigma *Dense, err error) { sigma = New(Of(Float64), WithShape(shape...)) for i := 0; i < MinInt(shape[0], shape[1]); i++ { var idx int if idx, err = Ltoi(sigma.Shape(), sigma.Strides(), i, i); err != nil { return } sigma.Float64s()[idx] = s.Float64s()[i] } return } // test svd by doing the SVD, then calculating the corrects func testSVD(T, T2, s, u, v *Dense, t string, i int) (err error) { var sigma, reconstructed *Dense if !allClose(T2.Data(), T.Data(), closeenoughf64) { return errors.Errorf("A call to SVD modified the underlying data! %s Test %d", t, i) } shape := T2.Shape() if t == "thin" { shape = Shape{MinInt(shape[0], shape[1]), MinInt(shape[0], shape[1])} } if sigma, err = calcSigma(s, T, shape); err != nil { return } v.T() if reconstructed, err = u.MatMul(sigma, UseSafe()); err != nil { return } if reconstructed, err = reconstructed.MatMul(v, UseSafe()); err != nil { return } if !allClose(T2.Data(), reconstructed.Data(), closeenoughf64) { return errors.Errorf("Expected reconstructed to be %v. Got %v instead", T2.Data(), reconstructed.Data()) } return nil } func ExampleDense_SVD() { T := New( WithShape(4, 5), WithBacking([]float64{1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0}), ) _, u, _, _ := T.SVD(true, true) uT := u.Clone().(*Dense) uT.T() eye, err := u.MatMul(uT) fmt.Println(eye) fmt.Println(err) // Output: // ⎡1 0 0 0⎤ // ⎢0 1 0 0⎥ // ⎢0 0 1 0⎥ // ⎣0 0 0 1⎦ // // } func TestDense_SVD(t *testing.T) { var T, T2, s, u, v *Dense var err error // gonum specific thin special cases for i, stts := range svdtestsThin { T = New(WithShape(stts.shape...), WithBacking(stts.data)) T2 = T.Clone().(*Dense) if s, u, v, err = T.SVD(true, false); err != nil { t.Error(err) continue } if !allClose(T2.Data(), T.Data(), closeenoughf64) { t.Errorf("A call to SVD modified the underlying data! Thin Test %d", i) continue } if !allClose(stts.correctSData, s.Data(), closeenoughf64) { t.Errorf("Expected s = %v. Got %v instead", stts.correctSData, s.Data()) } if !allClose(stts.correctUData, u.Data(), closeenoughf64) { t.Errorf("Expected u = %v. Got %v instead", stts.correctUData, u.Data()) } if !allClose(stts.correctVData, v.Data(), closeenoughf64) { t.Errorf("Expected v = %v. Got %v instead", stts.correctVData, v.Data()) } } // standard tests for i, stfs := range svdtestsFull { T = New(WithShape(stfs...), WithBacking(Random(Float64, stfs.TotalSize()))) T2 = T.Clone().(*Dense) // full if s, u, v, err = T.SVD(true, true); err != nil { t.Error(err) fmt.Println(err) continue } if err = testSVD(T, T2, s, u, v, "full", i); err != nil { t.Error(err) fmt.Println(err) continue } // thin if s, u, v, err = T.SVD(true, false); err != nil { t.Error(err) continue } if err = testSVD(T, T2, s, u, v, "thin", i); err != nil { t.Error(err) continue } // none if s, u, v, err = T.SVD(false, false); err != nil { t.Error(err) continue } var svd mat.SVD var m *mat.Dense if m, err = ToMat64(T); err != nil { t.Error(err) continue } if !svd.Factorize(m, mat.SVDFull) { t.Errorf("Unable to factorise %v", m) continue } if !allClose(s.Data(), svd.Values(nil), closeenoughf64) { t.Errorf("Singular value mismatch between Full and None decomposition. Expected %v. Got %v instead", svd.Values(nil), s.Data()) } } // this is illogical T = New(Of(Float64), WithShape(2, 2)) if _, _, _, err = T.SVD(false, true); err == nil { t.Errorf("Expected an error!") } // if you do this, it is bad and you should feel bad T = New(Of(Float64), WithShape(2, 3, 4)) if _, _, _, err = T.SVD(true, true); err == nil { t.Errorf("Expecetd an error: cannot SVD() a Tensor > 2 dimensions") } T = New(Of(Float64), WithShape(2)) if _, _, _, err = T.SVD(true, true); err == nil { t.Errorf("Expecetd an error: cannot SVD() a Tensor < 2 dimensions") } } tensor-0.9.24/dense_test.go000066400000000000000000000064321426512615100156140ustar00rootroot00000000000000package tensor import ( "math/rand" "testing" "testing/quick" "time" "github.com/stretchr/testify/assert" ) func TestDense_ShallowClone(t *testing.T) { T := New(Of(Float64), WithBacking([]float64{1, 2, 3, 4})) T2 := T.ShallowClone() T2.slice(0, 2) T2.Float64s()[0] = 1000 assert.Equal(t, T.Data().([]float64)[0:2], T2.Data()) assert.Equal(t, T.Engine(), T2.Engine()) assert.Equal(t, T.oe, T2.oe) assert.Equal(t, T.flag, T2.flag) } func TestDense_Clone(t *testing.T) { assert := assert.New(t) cloneChk := func(q *Dense) bool { a := q.Clone().(*Dense) if !q.Shape().Eq(a.Shape()) { t.Errorf("Shape Difference: %v %v", q.Shape(), a.Shape()) return false } if len(q.Strides()) != len(a.Strides()) { t.Errorf("Stride Difference: %v %v", q.Strides(), a.Strides()) return false } for i, s := range q.Strides() { if a.Strides()[i] != s { t.Errorf("Stride Difference: %v %v", q.Strides(), a.Strides()) return false } } if q.o != a.o { t.Errorf("Data Order difference : %v %v", q.o, a.o) return false } if q.Δ != a.Δ { t.Errorf("Triangle Difference: %v %v", q.Δ, a.Δ) return false } if q.flag != a.flag { t.Errorf("Flag difference : %v %v", q.flag, a.flag) return false } if q.e != a.e { t.Errorf("Engine difference; %T %T", q.e, a.e) return false } if q.oe != a.oe { t.Errorf("Optimized Engine difference; %T %T", q.oe, a.oe) return false } if len(q.transposeWith) != len(a.transposeWith) { t.Errorf("TransposeWith difference: %v %v", q.transposeWith, a.transposeWith) return false } assert.Equal(q.mask, a.mask, "mask difference") assert.Equal(q.maskIsSoft, a.maskIsSoft, "mask is soft ") return true } r := rand.New(rand.NewSource(time.Now().UnixNano())) if err := quick.Check(cloneChk, &quick.Config{Rand: r}); err != nil { t.Error(err) } } func TestDenseMasked(t *testing.T) { T := New(Of(Float64), WithShape(3, 2)) T.ResetMask() assert.Equal(t, []bool{false, false, false, false, false, false}, T.mask) } func TestFromScalar(t *testing.T) { T := New(FromScalar(3.14)) data := T.Float64s() assert.Equal(t, []float64{3.14}, data) } func Test_recycledDense(t *testing.T) { T := recycledDense(Float64, ScalarShape()) assert.Equal(t, float64(0), T.Data()) assert.Equal(t, StdEng{}, T.e) assert.Equal(t, StdEng{}, T.oe) } func TestDense_unsqueeze(t *testing.T) { assert := assert.New(t) T := New(WithShape(3, 3, 2), WithBacking([]float64{ 1, 2, 3, 4, 5, 6, 60, 50, 40, 30, 20, 10, 100, 200, 300, 400, 500, 600, })) if err := T.unsqueeze(0); err != nil { t.Fatal(err) } assert.True(T.Shape().Eq(Shape{1, 3, 3, 2})) assert.Equal([]int{6, 6, 2, 1}, T.Strides()) // if you do shapes.CalcStrides() it'd be {18,6,2,1} // reset T.Reshape(3, 3, 2) if err := T.unsqueeze(1); err != nil { t.Fatal(err) } assert.True(T.Shape().Eq(Shape{3, 1, 3, 2})) assert.Equal([]int{6, 2, 2, 1}, T.Strides()) // reset T.Reshape(3, 3, 2) if err := T.unsqueeze(2); err != nil { t.Fatal(err) } t.Logf("%v", T) assert.True(T.Shape().Eq(Shape{3, 3, 1, 2})) assert.Equal([]int{6, 2, 1, 1}, T.Strides()) // reset T.Reshape(3, 3, 2) if err := T.unsqueeze(3); err != nil { t.Fatal(err) } t.Logf("%v", T) assert.True(T.Shape().Eq(Shape{3, 3, 2, 1})) assert.Equal([]int{6, 2, 1, 1}, T.Strides()) } tensor-0.9.24/dense_views.go000066400000000000000000000007721426512615100157730ustar00rootroot00000000000000package tensor // a View is a *Tensor with customized strides. The reason for not splitting them up into different types is complicated // this file contains all the methods that deals with Views // Materialize takes a view, copies its data and puts it in a new *Tensor. func (t *Dense) Materialize() Tensor { if !t.IsMaterializable() { return t } retVal := recycledDense(t.t, t.shape.Clone(), WithEngine(t.e)) copyDenseIter(retVal, t, nil, nil) retVal.e = t.e retVal.oe = t.oe return retVal } tensor-0.9.24/divmod_amd64.s000066400000000000000000000005071426512615100155660ustar00rootroot00000000000000// +build !noasm #include "textflag.h" TEXT ·divmod(SB),NOSPLIT,$0 MOVQ a+0(FP), SI MOVQ b+8(FP), CX MOVQ SI, AX CMPQ CX, $-1 JEQ $1, denomIsOne // if denominator is 1, then jump to end CQO IDIVQ CX MOVQ AX, q+16(FP) MOVQ DX, r+24(FP) bye: RET denomIsOne: NEGQ AX MOVQ AX, q+16(FP) MOVQ $0, r+24(FP) JMP bye tensor-0.9.24/engine.go000066400000000000000000000355101426512615100147230ustar00rootroot00000000000000package tensor // Memory is a representation of memory of the value. // // The main reason for requiring both Uintptr() and Pointer() methods is because while Go currently does not have a compacting // garbage collector, from the docs of `unsafe`: // Even if a uintptr holds the address of some object, the garbage collector, will not update that uintptr's value if the object moves, // nor will that uintptr keep the object from being reclaimed. type Memory interface { Uintptr() uintptr MemSize() uintptr } // Engine is a representation of an execution engine. // While different execution engines can have different capabilities, all execution engines must be able to allocate and free memory type Engine interface { AllocAccessible() bool // AllocAccessible returns true if the engine return Go-accessible memory pointers? Alloc(size int64) (Memory, error) // Alloc allocates memory Free(mem Memory, size int64) error // Free rees memory Memset(mem Memory, val interface{}) error // Memset - duh Memclr(mem Memory) // Memclr - duh Memcpy(dst, src Memory) error // Memcpy - duh Accessible(mem Memory) (Memory, error) // Accessible returns Go-accesible memory pointers, or errors, if it cannot be done WorksWith(order DataOrder) bool // WorksWith returns true if the data order can be directly worked with } type standardEngine interface { Engine Adder Suber Muler Diver Power Moder FMAer MatMuler MatVecMuler OuterProder Dotter SVDer Lter Lteer Gter Gteer ElEqer MinBetweener MaxBetweener // Anything that returns interface{} cannot be added here because they will likely have additional // optimized versions of the functions for types. // For example: Tracer and InnerProder both have optimized interfaces for Float32 and Float64 which returns those types specifically. } type arrayMaker interface { makeArray(arr *array, t Dtype, size int) } // NonStdEngine are any engines that do not allocate using the default built in allocator type NonStdEngine interface { NonStdAlloc() // noop } /* Data Agnostic Execution Engine Methods */ // Transposer is any engine that can perform an unsafe transpose of a tensor. type Transposer interface { Transpose(t Tensor, expStrides []int) error } // Concater is any enegine that can concatenate multiple Tensors together type Concater interface { Concat(t Tensor, axis int, others ...Tensor) (Tensor, error) } // Stacker is any engine that can stack multiple Tenosrs along an axis type Stacker interface { Stack(t Tensor, axis int, others ...Tensor) (Tensor, error) } // DenseStacker is any engine that can stack DenseTensors along an axis. This is a specialization of Stacker. type DenseStacker interface { StackDense(t DenseTensor, axis int, others ...DenseTensor) (retVal DenseTensor, err error) } // Repeater is any engine that can repeat values along the given axis. type Repeater interface { Repeat(t Tensor, axis int, repeats ...int) (Tensor, error) RepeatReuse(t Tensor, reuse Tensor, axis int, repeeats ...int) (Tensor, error) } // Diager is any engine that can return a tensor that only contains the diagonal values of the input type Diager interface { Diag(a Tensor) (Tensor, error) } /* NUMBER INTERFACES All these are expected to be unsafe on the first tensor */ // Adder is any engine that can perform elementwise addition. type Adder interface { // Add performs a + b Add(a, b Tensor, opts ...FuncOpt) (Tensor, error) // AddScalar adds a scalar to the tensor. leftTensor indicates if the tensor is the left operand. // Whether or not the input tensor is clobbered is left to the implementation AddScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // Suber is any engine that can perform elementwise subtraction. type Suber interface { // Sub performs a - b Sub(a, b Tensor, opts ...FuncOpt) (Tensor, error) // SubScalar subtracts a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand. // Whether or not the input tensor is clobbered is left to the implementation SubScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // Muler is any engine that can perform elementwise multiplication. // For matrix multiplication, an engine should implement MatMul() or MatVecMul() or Inner() type Muler interface { // Mul performs a * b Mul(a, b Tensor, opts ...FuncOpt) (Tensor, error) // MulScalar multiplies a scalar to the tensor. leftTensor indicates if the tensor is the left operand. // Whether or not the input tensor is clobbered is left to the implementation MulScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // Diver is any engine that can perform elementwise division. type Diver interface { // Div performs a / b Div(a, b Tensor, opts ...FuncOpt) (Tensor, error) // DivScalar divides a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand. // Whether or not the input tensor is clobbered is left to the implementation DivScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // Power is any engine that can perform elementwise Pow() type Power interface { // Pow performs a ^ b Pow(a, b Tensor, opts ...FuncOpt) (Tensor, error) // PowScalar exponentiates a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand. // Whether or not the input tensor is clobbered is left to the implementation PowScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // Moder is any engine that can perform elementwise Mod() type Moder interface { // Mod performs a % b Mod(a, b Tensor, opts ...FuncOpt) (Tensor, error) // ModScalar performs a % b where one of the operands is scalar. leftTensor indicates if the tensor is the left operand. // Whether or not hte input tensor is clobbered is left to the implementation ModScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // MinBetweener is any engine that can perform an elementwise min=between. type MinBetweener interface { MinBetween(a, b Tensor, opts ...FuncOpt) (Tensor, error) MinBetweenScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error) } // MaxBetweener is any engine that can perform an elementwise ma T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.Gt(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Gt(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Gt(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 > T2 // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Safe slicing // ============ // T3: // ⎡false false⎤ // ⎣false false⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡0 0⎤ // ⎣0 0⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_Gt_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.Gt(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.Gt(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡0 0 2⎤ // ⎢0 0 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_Gt_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.Gt(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.Gt(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.Gt(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.Gt(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Reuse With Same Type // ===================== // T3: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡false false⎤ // ⎣ true true⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡0 0⎤ // ⎣1 1⎦ } /* GTE */ // Comparison functions return a Tensor of bool by default. To return the same type, simply pass in the AsSameType function option func ExampleDense_Gte_basic() { var T1, T2, T3, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3, _ = T1.Gte(T2) fmt.Println("Basic operations are safe\n=========================\nT3 = T1 >= T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.Gte(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Gte(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Gte(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 >= T2 // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Safe slicing // ============ // T3: // ⎡false false⎤ // ⎣ true true⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡0 0⎤ // ⎣1 1⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_Gte_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.Gte(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.Gte(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡0 0 2⎤ // ⎢1 1 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_Gte_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.Gte(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.Gte(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.Gte(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.Gte(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Reuse With Same Type // ===================== // T3: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡true true⎤ // ⎣true true⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡1 1⎤ // ⎣1 1⎦ } /* LT */ // Comparison functions return a Tensor of bool by default. To return the same type, simply pass in the AsSameType function option func ExampleDense_Lt_basic() { var T1, T2, T3, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3, _ = T1.Lt(T2) fmt.Println("Basic operations are safe\n=========================\nT3 = T1 < T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.Lt(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Lt(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Lt(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 < T2 // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Safe slicing // ============ // T3: // ⎡ true true⎤ // ⎣false false⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡1 1⎤ // ⎣0 0⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_Lt_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.Lt(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.Lt(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡1 1 2⎤ // ⎢0 0 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_Lt_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.Lt(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.Lt(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.Lt(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.Lt(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Reuse With Same Type // ===================== // T3: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡false false⎤ // ⎣false false⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡0 0⎤ // ⎣0 0⎦ } /* LTE */ // Comparison functions return a Tensor of bool by default. To return the same type, simply pass in the AsSameType function option func ExampleDense_Lte_basic() { var T1, T2, T3, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3, _ = T1.Lte(T2) fmt.Println("Basic operations are safe\n=========================\nT3 = T1 <= T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.Lte(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Lte(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.Lte(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 <= T2 // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Safe slicing // ============ // T3: // ⎡true true⎤ // ⎣true true⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡1 1⎤ // ⎣1 1⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_Lte_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.Lte(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.Lte(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡1 1 2⎤ // ⎢1 1 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_Lte_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.Lte(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.Lte(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.Lte(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.Lte(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Reuse With Same Type // ===================== // T3: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡ true true⎤ // ⎣false false⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡1 1⎤ // ⎣0 0⎦ } /* ELEQ */ // Comparison functions return a Tensor of bool by default. To return the same type, simply pass in the AsSameType function option func ExampleDense_ElEq_basic() { var T1, T2, T3, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3, _ = T1.ElEq(T2) fmt.Println("Basic operations are safe\n=========================\nT3 = T1 == T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.ElEq(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.ElEq(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.ElEq(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 == T2 // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Safe slicing // ============ // T3: // ⎡false false⎤ // ⎣ true true⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡0 0⎤ // ⎣1 1⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_ElEq_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.ElEq(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.ElEq(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡0 0 2⎤ // ⎢1 1 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_ElEq_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.ElEq(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.ElEq(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.ElEq(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.ElEq(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡true true true⎤ // ⎢true true true⎥ // ⎣true true true⎦ // // Reuse With Same Type // ===================== // T3: // ⎡1 1 1⎤ // ⎢1 1 1⎥ // ⎣1 1 1⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡ true true⎤ // ⎣false false⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡1 1⎤ // ⎣0 0⎦ } /* ELNE */ // Comparison functions return a Tensor of bool by default. To return the same type, simply pass in the AsSameType function option func ExampleDense_ElNe_basic() { var T1, T2, T3, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3, _ = T1.ElNe(T2) fmt.Println("Basic operations are safe\n=========================\nT3 = T1 != T2") fmt.Printf("T3:\n%v\n", T3) // To return the same type, use the AsSameType function option T3, _ = T1.ElNe(T2, AsSameType()) fmt.Println("Returning same type\n===================") fmt.Printf("T3 (Returns Same Type):\n%v\n", T3) // Sliced tensors are safe too T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.ElNe(T2) fmt.Printf("Safe slicing\n============\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Similarly for tensors that return the same type T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) T3, _ = V.ElNe(T2, AsSameType()) // AsSameType returns a tensor of the same type fmt.Printf("Safe slicing (Same type)\n========================\nT3:\n%v\nT1 remains unchanged:\n%v\n", T3, T1) // Output: // Basic operations are safe // ========================= // T3 = T1 != T2 // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Returning same type // =================== // T3 (Returns Same Type): // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Safe slicing // ============ // T3: // ⎡ true true⎤ // ⎣false false⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // Safe slicing (Same type) // ======================== // T3: // ⎡1 1⎤ // ⎣0 0⎦ // // T1 remains unchanged: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ } // If the UseUnsafe function option is passed into the call, the assumption is made that it will be returning the same type func ExampleDense_ElNe_unsafe() { var T1, T2, V *Dense var sliced Tensor T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1.ElNe(T2, UseUnsafe()) fmt.Printf("Unsafe operation\n================\nT1:\n%v\n", T1) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 1, 5)), WithShape(2, 2)) V.ElNe(T2, UseUnsafe()) fmt.Printf("Unsafe operation, with a sliced Tensor\n======================================\nT1:\n%v", T1) // Output: // Unsafe operation // ================ // T1: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Unsafe operation, with a sliced Tensor // ====================================== // T1: // ⎡1 1 2⎤ // ⎢0 0 5⎥ // ⎣6 7 8⎦ } // The WithReuse function option can be used to pass in reuse tensors. But be sure to also use the AsSameType() function option // or else funny results will happen func ExampleDense_ElNe_reuse() { var T1, T2, T3, V *Dense var sliced Tensor // The reuse tensor is a Tensor of bools... T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking([]bool{ true, false, true, false, true, false, true, false, true}), WithShape(3, 3)) T1.ElNe(T2, WithReuse(T3)) // note that AsSameType is not used here fmt.Printf("Default behaviour: Reuse tensor is expected to be of Bools\n==========================================================\nT3:\n%v\n", T3) // If you want to use a Reuse tensor of the same type, then besure to also pass in the AsSameType() flag T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T2 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) T3 = New(WithBacking(Range(Float64, 100, 109)), WithShape(3, 3)) // The reuse tensor is a Tensor of Float64... T1.ElNe(T2, WithReuse(T3), AsSameType()) // AsSameType is used to return float64s fmt.Printf("Reuse With Same Type\n=====================\nT3:\n%v\n", T3) // Slicing is similar: T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking([]bool{true, true, true, true}), WithShape(2, 2)) V.ElNe(T2, WithReuse(T3)) fmt.Printf("Reuse on sliced tensors\n======================\nT3\n%v\n", T3) // Again, bear in mind same types T1 = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) sliced, _ = T1.Slice(makeRS(0, 2), makeRS(0, 2)) V = sliced.(*Dense) T2 = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T3 = New(WithBacking(Range(Float64, 100, 104)), WithShape(2, 2)) V.ElNe(T2, WithReuse(T3), AsSameType()) fmt.Printf("Reuse on sliced tensors (same type)\n=================================\nT3\n%v\n", T3) // Output: // Default behaviour: Reuse tensor is expected to be of Bools // ========================================================== // T3: // ⎡false false false⎤ // ⎢false false false⎥ // ⎣false false false⎦ // // Reuse With Same Type // ===================== // T3: // ⎡0 0 0⎤ // ⎢0 0 0⎥ // ⎣0 0 0⎦ // // Reuse on sliced tensors // ====================== // T3 // ⎡false false⎤ // ⎣ true true⎦ // // Reuse on sliced tensors (same type) // ================================= // T3 // ⎡0 0⎤ // ⎣1 1⎦ } tensor-0.9.24/example_dense_compat_test.go000066400000000000000000000011441426512615100206650ustar00rootroot00000000000000package tensor_test import ( "fmt" "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/memory" "gorgonia.org/tensor" ) func ExampleDense_Arrow() { pool := memory.NewGoAllocator() b := array.NewFloat64Builder(pool) defer b.Release() b.AppendValues( []float64{1, 2, 3, -1, 4, 5}, []bool{true, true, true, false, true, true}, ) arr := b.NewFloat64Array() defer arr.Release() fmt.Printf("arrow array = %v\n", arr) a := tensor.FromArrowArray(arr) fmt.Printf("tensor = %v\n", a) // Output: // arrow array = [1 2 3 (null) 4 5] // tensor = C[ 1 2 3 -- 4 5] } tensor-0.9.24/example_dense_linalg_test.go000066400000000000000000000075311426512615100206560ustar00rootroot00000000000000package tensor import ( "fmt" ) func ExampleDense_MatMul() { handleErr := func(err error) { if err != nil { panic(err) } } T0 := New(WithShape(10, 15), WithBacking(Range(Float64, 0, 150))) T1 := New(WithShape(15, 10), WithBacking(Range(Float64, 150, 0))) T2, err := MatMul(T0, T1) handleErr(err) fmt.Printf("T2:\n%v", T2) // Output: // T2: // ⎡ 5600 5495 5390 5285 ... 4970 4865 4760 4655⎤ // ⎢ 23600 23270 22940 22610 ... 21620 21290 20960 20630⎥ // ⎢ 41600 41045 40490 39935 ... 38270 37715 37160 36605⎥ // ⎢ 59600 58820 58040 57260 ... 54920 54140 53360 52580⎥ // . // . // . // ⎢113600 112145 110690 109235 ... 104870 103415 101960 100505⎥ // ⎢131600 129920 128240 126560 ... 121520 119840 118160 116480⎥ // ⎢149600 147695 145790 143885 ... 138170 136265 134360 132455⎥ // ⎣167600 165470 163340 161210 ... 154820 152690 150560 148430⎦ } func ExampleDense_MatVecMul() { handleErr := func(err error) { if err != nil { panic(err) } } T0 := New(WithShape(2, 3), WithBacking(Range(Float64, 1, 7))) T1 := New(WithShape(3), WithBacking(Range(Float64, 0, 3))) T2, err := T0.MatVecMul(T1) handleErr(err) fmt.Printf("T2:\n%v\n", T2) // Output: // T2: // [ 8 17] } func ExampleDense_MatVecMul_rowMajorSliced() { // ASPIRATIONAL TODO: IncX and incY of differering values handleErr := func(err error) { if err != nil { panic(err) } } T0 := New(WithShape(10, 12), WithBacking(Range(Float64, 1, 121))) T1 := New(WithShape(3, 3), WithBacking(Range(Float64, 1, 10))) T2, err := T0.Slice(makeRS(1, 3), makeRS(3, 6)) handleErr(err) T3, err := T1.Slice(nil, makeRS(1, 2)) handleErr(err) // here the + formatting option is used because you should know that after this particular slice, the result will be a vector fmt.Printf("T2:\n%+v", T2) fmt.Printf("T3:\n%+v\n", T3) // here we print the underlying slice of T3 just to show that it's actually a much larger slice fmt.Printf("Underlying Slice: %v\n", T3.Data()) T4, err := T2.(*Dense).MatVecMul(T3) handleErr(err) fmt.Printf("T4:\n%v\n", T4) // Outputz: // T2: // Matrix (2, 3) [10 1] // ⎡14 15 16⎤ // ⎣24 25 26⎦ // T3: // Vector (3) [3] // [2 5 8] // Underlying Slice: [2 3 4 5 6 7 8] // T4: // [261 441] } func ExampleDense_MatMul_sliced() { //ASPIRATIONAL TODO: incX and incY of different sizes handleErr := func(err error) { if err != nil { panic(err) } } T0 := New(WithShape(10, 15), WithBacking(Range(Float64, 0, 150))) T1 := New(WithShape(15, 10), WithBacking(Range(Float64, 150, 0))) T2, err := MatMul(T0, T1) handleErr(err) fmt.Printf("T2:\n%v", T2) // Slice T0 to only take a (2, 3) on the upper quadrant // T3 := T0[0:3, 0:2] T3, err := T0.Slice(makeRS(0, 3), makeRS(0, 2)) handleErr(err) fmt.Printf("T3:\n%v", T3) T4, err := T1.Slice(makeRS(13, 15), makeRS(8, 10)) handleErr(err) fmt.Printf("T4:\n%v", T4) T5, err := T3.(*Dense).MatMul(T4) handleErr(err) fmt.Printf("T3xT4:\n%v", T5) // Outputz: // T2: // ⎡ 5600 5495 5390 5285 ... 4970 4865 4760 4655⎤ // ⎢ 23600 23270 22940 22610 ... 21620 21290 20960 20630⎥ // ⎢ 41600 41045 40490 39935 ... 38270 37715 37160 36605⎥ // ⎢ 59600 58820 58040 57260 ... 54920 54140 53360 52580⎥ // . // . // . // ⎢113600 112145 110690 109235 ... 104870 103415 101960 100505⎥ // ⎢131600 129920 128240 126560 ... 121520 119840 118160 116480⎥ // ⎢149600 147695 145790 143885 ... 138170 136265 134360 132455⎥ // ⎣167600 165470 163340 161210 ... 154820 152690 150560 148430⎦ // T3: // ⎡ 0 1⎤ // ⎢15 16⎥ // ⎣30 31⎦ // T4: // ⎡12 11⎤ // ⎣ 2 1⎦ // T3xT4: // ⎡ 2 1⎤ // ⎢212 181⎥ // ⎣422 361⎦ } tensor-0.9.24/example_dense_matop_test.go000066400000000000000000000266231426512615100205330ustar00rootroot00000000000000package tensor import ( "fmt" ) func ExampleDense_Slice() { var T Tensor T = New(WithBacking(Range(Float64, 0, 9)), WithShape(3, 3)) fmt.Printf("T:\n%v\n", T) // T[0:2, 0:2] T, _ = T.Slice(makeRS(0, 2), makeRS(0, 2)) // makeRS is an unexported function that creates a Slice. fmt.Printf("T[0:2, 0:2]:\n%v\n", T) // T[:, 1] T, _ = T.(Slicer).Slice(nil, ss(1)) // ss is unexported fmt.Printf("T[:, 1]:\n%v\n", T) // Output: // T: // ⎡0 1 2⎤ // ⎢3 4 5⎥ // ⎣6 7 8⎦ // // T[0:2, 0:2]: // ⎡0 1⎤ // ⎣3 4⎦ // // T[:, 1]: // [1 4] } // Slicing works on one dimensional arrays too: func ExampleDense_Slice_oneDimension() { var T Tensor T = New(WithBacking(Range(Float64, 0, 9))) fmt.Printf("T:\n%v\n\n", T) T, _ = T.Slice(makeRS(0, 5)) fmt.Printf("T[0:5]:\n%v\n", T) // Output: // T: // [0 1 2 3 ... 5 6 7 8] // // T[0:5]: // [0 1 2 3 4] } // Any modifications to the sliced value modifies the original slice as well func ExampleDense_Slice_viewMutation() { var T, V Tensor T = New(WithBacking(Range(Int, 0, 16)), WithShape(4, 4)) fmt.Printf("T:\n%v\n", T) V, _ = T.Slice(makeRS(1, 3), makeRS(1, 3)) fmt.Printf("V:\n%v\n", V) // Now we modify V's 0th value V.(*Dense).Set(0, 1000) fmt.Printf("V[0] = 1000:\n%v\n", V) fmt.Printf("T is also mutated:\n%v", T) // Output: // T: // ⎡ 0 1 2 3⎤ // ⎢ 4 5 6 7⎥ // ⎢ 8 9 10 11⎥ // ⎣12 13 14 15⎦ // // V: // ⎡ 5 6⎤ // ⎣ 9 10⎦ // // V[0] = 1000: // ⎡1000 6⎤ // ⎣ 9 10⎦ // // T is also mutated: // ⎡ 0 1 2 3⎤ // ⎢ 4 1000 6 7⎥ // ⎢ 8 9 10 11⎥ // ⎣ 12 13 14 15⎦ // } func ExampleView() { // Slicing creates a "view" on the original tensor T := New(WithBacking(Range(Int, 0, 16)), WithShape(4, 4)) fmt.Printf("T:\n%v\n", T) V, _ := T.Slice(makeRS(1, 3), makeRS(1, 3)) fmt.Printf("V:\n%v\n", V) // Now we modify V's 0th value V.(*Dense).Set(0, 1000) fmt.Printf("V[0] = 1000:\n%v\n", V) fmt.Printf("T is also mutated:\n%v\n", T) // Now we materialize the views fmt.Printf("V is Materializable: %v\n", V.IsMaterializable()) T2 := V.Materialize() fmt.Printf("T2 == V:\n%v\n", T2) // Once materialized, it is decoupled from the original tensor T2.(*Dense).Set(0, 999) fmt.Printf("T2 is mutated:\n%v\nBut T is not mutated:\n%v\nNeither is V:\n%v", T2, T, V) // Output: // T: // ⎡ 0 1 2 3⎤ // ⎢ 4 5 6 7⎥ // ⎢ 8 9 10 11⎥ // ⎣12 13 14 15⎦ // // V: // ⎡ 5 6⎤ // ⎣ 9 10⎦ // // V[0] = 1000: // ⎡1000 6⎤ // ⎣ 9 10⎦ // // T is also mutated: // ⎡ 0 1 2 3⎤ // ⎢ 4 1000 6 7⎥ // ⎢ 8 9 10 11⎥ // ⎣ 12 13 14 15⎦ // // V is Materializable: true // T2 == V: // ⎡1000 6⎤ // ⎣ 9 10⎦ // // T2 is mutated: // ⎡999 6⎤ // ⎣ 9 10⎦ // // But T is not mutated: // ⎡ 0 1 2 3⎤ // ⎢ 4 1000 6 7⎥ // ⎢ 8 9 10 11⎥ // ⎣ 12 13 14 15⎦ // // Neither is V: // ⎡1000 6⎤ // ⎣ 9 10⎦ } func ExampleDense_Hstack() { var T, T1, T2, T3 *Dense var err error T = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T1 = New(WithBacking([]float64{1000, 2000}), WithShape(2, 1)) // Simple example if T2, err = T.Hstack(T1); err == nil { fmt.Printf("T.Hstack(T1):\n%v\n", T2) } // This fails, because they are not the same shape T1.Reshape(2) if _, err = T.Hstack(T1); err != nil { fmt.Printf("Error: %v\n\n", err) } // You can stack more than one, as long as all the tensors have the same shape T1.Reshape(2, 1) T3 = T1.Clone().(*Dense) if T2, err = T.Hstack(T1, T3); err == nil { fmt.Printf("T.Hstack(T1, T3):\n%v\n", T2) } // Compatible shapes can be stacked T1 = New(Of(Float64), WithShape(2, 3)) if T2, err = T.Hstack(T1); err == nil { fmt.Printf("Hstacking (2,2) with (2,3):\n%v\n", T2) } // Special attention to vectors - vectors can only be stacked with vectors T = New(WithBacking([]float64{1000, 2000})) T1 = New(WithBacking([]float64{0, 1}), WithShape(1, 2)) if _, err = T.Hstack(T1); err != nil { fmt.Printf("Hstacking (2) with (1,2): %v\n", err) } // Now let's look at failure conditions, or unhandled situations // Incompatible shapes cannot be stacked T1.Reshape(3, 2) if _, err = T.Hstack(T1); err != nil { fmt.Printf("Hstacking (2,2) with (3,2): %v\n", err) } // Obviously you can't stack a scalar onto tensors (or the other way around) T1 = New(FromScalar(1.0)) if _, err = T.Hstack(T1); err != nil { fmt.Printf("Hstacking a scalar onto a tensor: %v\n", err) } if _, err = T1.Hstack(T); err != nil { fmt.Printf("Hstacking a tensor onto a scalar: %v\n", err) } // Output: // T.Hstack(T1): // ⎡ 0 1 1000⎤ // ⎣ 2 3 2000⎦ // // Error: Failed to perform Concat: Unable to find new shape that results from concatenation: Dimension mismatch. Expected 2, got 1 // // T.Hstack(T1, T3): // ⎡ 0 1 1000 1000⎤ // ⎣ 2 3 2000 2000⎦ // // Hstacking (2,2) with (2,3): // ⎡0 1 0 0 0⎤ // ⎣2 3 0 0 0⎦ // // Hstacking (2) with (1,2): Failed to perform Concat: Unable to find new shape that results from concatenation: Dimension mismatch. Expected 1, got 2 // Hstacking (2,2) with (3,2): Failed to perform Concat: Unable to find new shape that results from concatenation: Dimension mismatch. Expected 1, got 2 // Hstacking a scalar onto a tensor: Tensor has to be at least 1 dimensions // Hstacking a tensor onto a scalar: Tensor has to be at least 1 dimensions } func ExampleDense_Vstack() { var T, T1, T2, T3 *Dense var err error T = New(WithBacking(Range(Float64, 0, 4)), WithShape(2, 2)) T1 = New(WithBacking([]float64{1000, 2000}), WithShape(1, 2)) // Simple example if T2, err = T.Vstack(T1); err == nil { fmt.Printf("T.Vstack(T1):\n%v\n", T2) } else { fmt.Printf("%+v", err) } // You can stack more than one, as long as all the tensors have the same shape T3 = T1.Clone().(*Dense) if T2, err = T.Vstack(T1, T3); err == nil { fmt.Printf("T.Vstack(T1, T3):\n%v\n", T2) } else { fmt.Printf("====\nerr %v\n%v\n===\n", err, T3.Shape()) } // Let's look at failure conditions // All tensors must be at least 2D T.Reshape(4) if _, err = T.Vstack(T1); err != nil { fmt.Printf("Vstacking (4) with (1, 2): %v\n", err) } if _, err = T1.Vstack(T); err != nil { fmt.Printf("Vstacking (1, 2) with (4): %v\n", err) } // Output: // T.Vstack(T1): // ⎡ 0 1⎤ // ⎢ 2 3⎥ // ⎣1000 2000⎦ // // T.Vstack(T1, T3): // ⎡ 0 1⎤ // ⎢ 2 3⎥ // ⎢1000 2000⎥ // ⎣1000 2000⎦ // // Vstacking (4) with (1, 2): Tensor has to be at least 2 dimensions // Vstacking (1, 2) with (4): Tensor has to be at least 2 dimensions } func ExampleRepeatReuse() { var T, T1 *Dense T = New(WithBacking([]float64{1, 2, 3, 4}), WithShape(1, 4)) T1 = New(Of(Float64), WithShape(3, 4)) var T2 Tensor var err error if T2, err = RepeatReuse(T, T1, 0, 3); err != nil { fmt.Printf("Err %v", err) } fmt.Printf("RepeatReuse(T, T1):\n%v", T2) fmt.Printf("T1 == T2: %t\n", T1 == T2) // But if your reuse is wrongly shaped, an error occurs T1 = New(Of(Float64), WithShape(1, 4)) // too small if _, err = RepeatReuse(T, T1, 0, 3); err != nil { fmt.Printf("Expected Error: %v\n", err) } // Output: // RepeatReuse(T, T1): // ⎡1 2 3 4⎤ // ⎢1 2 3 4⎥ // ⎣1 2 3 4⎦ // T1 == T2: true // Expected Error: Reuse shape is (1, 4). Expected shape is (3, 4) } func ExampleRepeat_uncommonUses() { T := New(WithBacking([]int{1, 2, 3, 4, 5, 6}), WithShape(2, 3)) fmt.Printf("T:\n%v", T) fmt.Println("Axis 0 has 2 elements. So we will need to write the number of times each element is to be repeated") fmt.Println("Here, Repeat(T, 0, 3, 2) results in this:") T1, err := Repeat(T, 0, 3, 2) if err != nil { fmt.Printf("Err %v", err) } fmt.Printf("%v", T1) fmt.Println("Observe the 0th element ([1 2 3]) has been repeated 3 times, and the 1st element ([4 5 6]) has been repeated twice") fmt.Println("") fmt.Println("We can also repeat on Axis 1. Now along Axis 1 there are 3 elements: ([1 4], [2 5], [3 6])") fmt.Println("So we have to specify how many times to repeat each element.") fmt.Println("Repeat(T, 1, 2, 3, 2) yields the following result:") T1, err = Repeat(T, 1, 2, 3, 2) if err != nil { fmt.Printf("Err %v", err) } fmt.Printf("%v", T1) fmt.Println("Once again, observe that the 1st element ([2 5]) has been repeated 3 times, while the rest have been repeated twice") /* // TODO break this out to another example T1, err = Repeat(T, AllAxes, 2, 3, 2, 2, 2, 2) if err != nil { fmt.Printf("Err %v", err) } fmt.Printf("%#v", T1) */ // Output: // T: // ⎡1 2 3⎤ // ⎣4 5 6⎦ // Axis 0 has 2 elements. So we will need to write the number of times each element is to be repeated // Here, Repeat(T, 0, 3, 2) results in this: // ⎡1 2 3⎤ // ⎢1 2 3⎥ // ⎢1 2 3⎥ // ⎢4 5 6⎥ // ⎣4 5 6⎦ // Observe the 0th element ([1 2 3]) has been repeated 3 times, and the 1st element ([4 5 6]) has been repeated twice // // We can also repeat on Axis 1. Now along Axis 1 there are 3 elements: ([1 4], [2 5], [3 6]) // So we have to specify how many times to repeat each element. // Repeat(T, 1, 2, 3, 2) yields the following result: // ⎡1 1 2 2 2 3 3⎤ // ⎣4 4 5 5 5 6 6⎦ // Once again, observe that the 1st element ([2 5]) has been repeated 3 times, while the rest have been repeated twice } func ExampleT() { // Usual example of 2D matrix being transposed: M := New(WithBacking([]int{1, 2, 3, 4, 5, 6}), WithShape(2, 3)) M2, err := T(M) if err != nil { fmt.Printf("Err: %v\n", err) } fmt.Printf("M:\n%v\nM2:\n%v\n", M, M2) // T accepts optional parameters describing the permutation of axes. // In a 2D case, there are only two options: (0, 1) or (1, 0). // The latter is default if no parameters are passed in. // The former is a no-op as rearranging a matrix so that the 0th axis becomes the 0th axis // and the first axis becomes the first axis is not going to do anything. // // However, note that M3 is a different result. M3, err := T(M, 0, 1) if err != nil { fmt.Printf("Err: %v\n", err) } fmt.Printf("M3:\n%v\nM == M3: %t", M3, M == M3) // Output: // M: // ⎡1 2 3⎤ // ⎣4 5 6⎦ // // M2: // ⎡1 4⎤ // ⎢2 5⎥ // ⎣3 6⎦ // // M3: // ⎡1 2 3⎤ // ⎣4 5 6⎦ // // M == M3: false } func ExampleT_scalarlike() { // Be aware when dealing with scalarlike tensors // scalar/scalarlikes have no effect when calling T() // but the result is put into a new tensor S := New(WithBacking([]float32{3.14}), WithShape()) S2, err := T(S) if err != nil { fmt.Printf("Err %v", err) } fmt.Printf("S: %v S2 %v S == S2: %t\n", S, S2, S == S2) // however do note that scalars and scalarlikes are not the same thing. // for example, consider this: _, err = T(S, 1, 0) fmt.Printf("error when the axes are more than the shape's dims: %v\n", err) // but if you have a tensor that is a scalar-like: S.Reshape(1, 1) S2, err = T(S, 1, 0) if err != nil { fmt.Printf("Err: %v\n", err) } fmt.Printf("S:\n%v\nS2:\n%v\nS == S2: %t\n", S, S2, S == S2) // Output: // S: 3.14 S2 3.14 S == S2: false // error when the axes are more than the shape's dims: Dimension mismatch. Expected 0, got 2 // S: // [[3.14]] // S2: // [[3.14]] // S == S2: false } tensor-0.9.24/example_dense_reduction_test.go000066400000000000000000000006731426512615100214040ustar00rootroot00000000000000package tensor import "fmt" func Example_sum_Sliced() { T := New(WithShape(4, 4), WithBacking([]int{ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, })) s, _ := T.Slice(S(1, 3), S(1, 3)) sum, _ := Sum(s) fmt.Printf("T:\n%v\nsliced:\n%v\nSum: %v", T, s, sum) // Output: // T: // ⎡1 2 3 4⎤ // ⎢5 6 7 8⎥ // ⎢1 2 3 4⎥ // ⎣5 6 7 8⎦ // // sliced: // ⎡6 7⎤ // ⎣2 3⎦ // // Sum: 18 } tensor-0.9.24/example_extension_matop_test.go000066400000000000000000000047611426512615100214500ustar00rootroot00000000000000package tensor_test import ( "fmt" "gorgonia.org/tensor" ) // In this example, we want to handle basic tensor operations for arbitray types (slicing, stacking, transposing) // LongStruct is a type that is an arbitrarily long struct type LongStruct struct { a, b, c, d, e uint64 } // Format implements fmt.Formatter for easier-to-read output of data func (ls LongStruct) Format(s fmt.State, c rune) { fmt.Fprintf(s, "{a: %d, b: %d, c: %d, d: %d, e: %d}", ls.a, ls.b, ls.c, ls.d, ls.e) } type s int func (ss s) Start() int { return int(ss) } func (ss s) End() int { return int(ss) + 1 } func (ss s) Step() int { return 1 } func ExampleTranspose_extension() { // For documentation if you're reading this on godoc: // // type LongStruct struct { // a, b, c, d, e uint64 // } T := tensor.New(tensor.WithShape(2, 2), tensor.WithBacking([]LongStruct{ LongStruct{0, 0, 0, 0, 0}, LongStruct{1, 1, 1, 1, 1}, LongStruct{2, 2, 2, 2, 2}, LongStruct{3, 3, 3, 3, 3}, }), ) fmt.Printf("Before:\n%v\n", T) retVal, _ := tensor.Transpose(T) // an alternative would be to use T.T(); T.Transpose() fmt.Printf("After:\n%v\n", retVal) // Output: // Before: // ⎡{a: 0, b: 0, c: 0, d: 0, e: 0} {a: 1, b: 1, c: 1, d: 1, e: 1}⎤ // ⎣{a: 2, b: 2, c: 2, d: 2, e: 2} {a: 3, b: 3, c: 3, d: 3, e: 3}⎦ // // After: // ⎡{a: 0, b: 0, c: 0, d: 0, e: 0} {a: 2, b: 2, c: 2, d: 2, e: 2}⎤ // ⎣{a: 1, b: 1, c: 1, d: 1, e: 1} {a: 3, b: 3, c: 3, d: 3, e: 3}⎦ } func Example_stackExtension() { // For documentation if you're reading this on godoc: // // type LongStruct struct { // a, b, c, d, e uint64 // } T := tensor.New(tensor.WithShape(2, 2), tensor.WithBacking([]LongStruct{ LongStruct{0, 0, 0, 0, 0}, LongStruct{1, 1, 1, 1, 1}, LongStruct{2, 2, 2, 2, 2}, LongStruct{3, 3, 3, 3, 3}, }), ) S, _ := T.Slice(nil, s(1)) // s is a type that implements tensor.Slice T2 := tensor.New(tensor.WithShape(2, 2), tensor.WithBacking([]LongStruct{ LongStruct{10, 10, 10, 10, 10}, LongStruct{11, 11, 11, 11, 11}, LongStruct{12, 12, 12, 12, 12}, LongStruct{13, 13, 13, 13, 13}, }), ) S2, _ := T2.Slice(nil, s(0)) // an alternative would be something like this // T3, _ := S.(*tensor.Dense).Stack(1, S2.(*tensor.Dense)) T3, _ := tensor.Stack(1, S, S2) fmt.Printf("Stacked:\n%v", T3) // Output: // Stacked: // ⎡ {a: 1, b: 1, c: 1, d: 1, e: 1} {a: 10, b: 10, c: 10, d: 10, e: 10}⎤ // ⎣ {a: 3, b: 3, c: 3, d: 3, e: 3} {a: 12, b: 12, c: 12, d: 12, e: 12}⎦ } tensor-0.9.24/example_extension_test.go000066400000000000000000000043111426512615100202370ustar00rootroot00000000000000package tensor_test import ( //"errors" "fmt" "reflect" "github.com/pkg/errors" "gorgonia.org/tensor" ) // In this example, we want to create and handle a tensor of *MyType // First, define MyType // MyType is defined type MyType struct { x, y int } func (T MyType) Format(s fmt.State, c rune) { fmt.Fprintf(s, "(%d, %d)", T.x, T.y) } // MyDtype this the dtype of MyType. This value is populated in the init() function below var MyDtype tensor.Dtype // MyEngine supports additions of MyType, as well as other Dtypes type MyEngine struct { tensor.StdEng } // For simplicity's sake, we'd only want to handle MyType-MyType or MyType-Int interactions // Also, we only expect Dense tensors // You're of course free to define your own rules // Add adds two tensors func (e MyEngine) Add(a, b tensor.Tensor, opts ...tensor.FuncOpt) (retVal tensor.Tensor, err error) { switch a.Dtype() { case MyDtype: switch b.Dtype() { case MyDtype: data := a.Data().([]*MyType) datb := b.Data().([]*MyType) for i, v := range data { v.x += datb[i].x v.y += datb[i].y } return a, nil case tensor.Int: data := a.Data().([]*MyType) datb := b.Data().([]int) for i, v := range data { v.x += datb[i] v.y += datb[i] } return a, nil } case tensor.Int: switch b.Dtype() { case MyDtype: data := a.Data().([]int) datb := b.Data().([]*MyType) for i, v := range datb { v.x += data[i] v.y += data[i] } default: return e.StdEng.Add(a, b, opts...) } default: return e.StdEng.Add(a, b, opts...) } return nil, errors.New("Unreachable") } func init() { MyDtype = tensor.Dtype{reflect.TypeOf(&MyType{})} } func Example_extension() { T := tensor.New(tensor.WithEngine(MyEngine{}), tensor.WithShape(2, 2), tensor.WithBacking([]*MyType{ &MyType{0, 0}, &MyType{0, 1}, &MyType{1, 0}, &MyType{1, 1}, })) ones := tensor.New(tensor.WithShape(2, 2), tensor.WithBacking([]int{1, 1, 1, 1}), tensor.WithEngine(MyEngine{})) T2, _ := T.Add(ones) fmt.Printf("T:\n%+v", T) fmt.Printf("T2:\n%+v", T2) // output: //T: // Matrix (2, 2) [2 1] // ⎡(1, 1) (1, 2)⎤ // ⎣(2, 1) (2, 2)⎦ // T2: // Matrix (2, 2) [2 1] // ⎡(1, 1) (1, 2)⎤ // ⎣(2, 1) (2, 2)⎦ } tensor-0.9.24/example_iterator_test.go000066400000000000000000000034301426512615100200550ustar00rootroot00000000000000package tensor import "fmt" // This is an example of how to use `IteratorFromDense` from a row-major Dense tensor func Example_iteratorRowmajor() { T := New(WithShape(2, 3), WithBacking([]float64{0, 1, 2, 3, 4, 5})) it := IteratorFromDense(T) fmt.Printf("T:\n%v\n", T) for i, err := it.Start(); err == nil; i, err = it.Next() { fmt.Printf("i: %d, coord: %v\n", i, it.Coord()) } // Output: // T: // ⎡0 1 2⎤ // ⎣3 4 5⎦ // // i: 0, coord: [0 1] // i: 1, coord: [0 2] // i: 2, coord: [1 0] // i: 3, coord: [1 1] // i: 4, coord: [1 2] // i: 5, coord: [0 0] } // This is an example of using `IteratorFromDense` on a col-major Dense tensor. More importantly // this example shows the order of the iteration. func Example_iteratorcolMajor() { T := New(WithShape(2, 3), WithBacking([]float64{0, 1, 2, 3, 4, 5}), AsFortran(nil)) it := IteratorFromDense(T) fmt.Printf("T:\n%v\n", T) for i, err := it.Start(); err == nil; i, err = it.Next() { fmt.Printf("i: %d, coord: %v\n", i, it.Coord()) } // Output: // T: // ⎡0 2 4⎤ // ⎣1 3 5⎦ // // i: 0, coord: [0 1] // i: 2, coord: [0 2] // i: 4, coord: [1 0] // i: 1, coord: [1 1] // i: 3, coord: [1 2] // i: 5, coord: [0 0] } func ExampleSliceIter() { T := New(WithShape(3, 3), WithBacking(Range(Float64, 0, 9))) S, err := T.Slice(makeRS(1, 3), makeRS(1, 3)) if err != nil { fmt.Printf("Err %v\n", err) return } fmt.Printf("S (requires iterator? %t)\n%v\n", S.(*Dense).RequiresIterator(), S) it := IteratorFromDense(S.(*Dense)) for i, err := it.Start(); err == nil; i, err = it.Next() { fmt.Printf("i %d, coord %v\n", i, it.Coord()) } // Output: // S (requires iterator? true) // ⎡4 5⎤ // ⎣7 8⎦ // // i 0, coord [0 1] // i 1, coord [1 0] // i 3, coord [1 1] // i 4, coord [0 0] } tensor-0.9.24/example_mapreduce_test.go000066400000000000000000000040611426512615100201720ustar00rootroot00000000000000package tensor import "fmt" func ExampleSum() { T := New(WithBacking([]float64{0, 1, 2, 3}), WithShape(2, 2)) fmt.Printf("T:\n%v\n", T) // sum along axis 0 summed, _ := Sum(T, 0) fmt.Printf("Summed:\n%v\n", summed) // to keep dims, simply reshape summed.Reshape(1, 2) fmt.Printf("Summed (Kept Dims - Shape: %v):\n%v\n\n", summed.Shape(), summed) // summing along multiple axes summed, _ = Sum(T, 1, 0) fmt.Printf("Summed along (1, 0): %v", summed) // Output: // T: // ⎡0 1⎤ // ⎣2 3⎦ // // Summed: // [2 4] // Summed (Kept Dims - Shape: (1, 2)): // R[2 4] // // Summed along (1, 0): 6 } func ExampleSum_sliced() { T := New(WithBacking([]float64{0, 1, 2, 3}), WithShape(2, 2)) fmt.Printf("T:\n%v\n", T) V, _ := T.Slice(nil, S(1)) fmt.Printf("V:\n%v\n", V) Σ, _ := Sum(V) fmt.Printf("Σ: %v", Σ) // Output: // T: // ⎡0 1⎤ // ⎣2 3⎦ // // V: // [1 3] // Σ: 4 } func ExampleArgmax() { T := New(WithBacking([]float64{0, 100, 200, 3}), WithShape(2, 2)) fmt.Printf("T:\n%v\n", T) // argmax along the x-axis am, _ := Argmax(T, 0) fmt.Printf("Argmax: %v\n", am) fmt.Printf("Argmax is %T of %v", am, am.Dtype()) // Output: // T: // ⎡ 0 100⎤ // ⎣200 3⎦ // // Argmax: [1 0] // Argmax is *tensor.Dense of int } func ExampleArgmax_sliced() { T := New(WithBacking([]float64{0, 100, 200, 3}), WithShape(2, 2)) fmt.Printf("T:\n%v\n", T) // slice creates a view V, _ := T.Slice(nil, S(1)) // argmax along the x-axis am, _ := Argmax(V, 0) fmt.Printf("Argmax: %v\n", am) fmt.Printf("Argmax is %T of %v", am, am.Dtype()) // Output: // T: // ⎡ 0 100⎤ // ⎣200 3⎦ // // Argmax: 0 // Argmax is *tensor.Dense of int } func ExampleArgmin() { T := New(WithBacking([]float64{0, 100, 200, 3}), WithShape(2, 2)) fmt.Printf("T:\n%v\n", T) // argmax along the x-axis am, _ := Argmin(T, 0) fmt.Printf("Argmin: %v\n", am) fmt.Printf("Argmin is %T of %v", am, am.Dtype()) // Output: // T: // ⎡ 0 100⎤ // ⎣200 3⎦ // // Argmin: [0 1] // Argmin is *tensor.Dense of int } tensor-0.9.24/example_sparse_test.go000066400000000000000000000044501426512615100175240ustar00rootroot00000000000000package tensor import "fmt" func Example_sparse_basics() { xs := []int{1, 2, 6, 8} ys := []int{1, 2, 1, 6} vals := []float32{3, 1, 4, 1} S := CSCFromCoord(Shape{9, 7}, xs, ys, vals) T := New(WithShape(9, 7), Of(Float32)) // dense Result, _ := Add(S, T) fmt.Printf("When adding a sparse tensor to a dense tensor, the result is of %T:\n=============================================================================\n%+#s\n", Result, Result) Result, _ = Add(T, S) fmt.Printf("And vice versa - %T\n=========================\n%+#s\n", Result, Result) // Output: // When adding a sparse tensor to a dense tensor, the result is of *tensor.Dense: // ============================================================================= // Matrix (9, 7) [7 1] // ⎡0 0 0 0 0 0 0⎤ // ⎢0 3 0 0 0 0 0⎥ // ⎢0 0 1 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 4 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎣0 0 0 0 0 0 1⎦ // // And vice versa - *tensor.Dense // ========================= // Matrix (9, 7) [7 1] // ⎡0 0 0 0 0 0 0⎤ // ⎢0 3 0 0 0 0 0⎥ // ⎢0 0 1 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 4 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎣0 0 0 0 0 0 1⎦ } func Example_sparse_advanced() { xs := []int{1, 2, 6, 8} ys := []int{1, 2, 1, 6} vals := []int16{3, 1, 4, 1} S := CSCFromCoord(Shape{9, 7}, xs, ys, vals) T := New(WithShape(9, 7), Of(Int16)) // dense Reuse := New(WithShape(9, 7), Of(Int16)) // reuse must be a *Dense because the result will always be a dense Result, _ := Add(S, T, WithReuse(Reuse)) fmt.Printf("Operations involving sparse tensors also do take the usual function options like Reuse:\n%+#s\nResult == Reuse: %t", Result, Result == Reuse) // Output: // Operations involving sparse tensors also do take the usual function options like Reuse: // Matrix (9, 7) [7 1] // ⎡0 0 0 0 0 0 0⎤ // ⎢0 3 0 0 0 0 0⎥ // ⎢0 0 1 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎢0 4 0 0 0 0 0⎥ // ⎢0 0 0 0 0 0 0⎥ // ⎣0 0 0 0 0 0 1⎦ // // Result == Reuse: true } tensor-0.9.24/example_tensor_basics_test.go000066400000000000000000000111611426512615100210620ustar00rootroot00000000000000package tensor import "fmt" // This example showcases the very basics of the package. func Example_basics() { // Create a (2, 2)-Matrix of integers a := New(WithShape(2, 2), WithBacking([]int{1, 2, 3, 4})) fmt.Printf("a:\n%v\n", a) // Create a (2, 3, 4)-tensor of float32s b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4)) fmt.Printf("b:\n%1.1f", b) // Accessing data x, _ := b.At(0, 1, 2) // in Numpy syntax: b[0,1,2] fmt.Printf("x: %1.1f\n\n", x) // Setting data b.SetAt(float32(1000), 0, 1, 2) fmt.Printf("b:\n%v", b) // Output: // a: // ⎡1 2⎤ // ⎣3 4⎦ // // b: // ⎡ 0.0 1.0 2.0 3.0⎤ // ⎢ 4.0 5.0 6.0 7.0⎥ // ⎣ 8.0 9.0 10.0 11.0⎦ // // ⎡12.0 13.0 14.0 15.0⎤ // ⎢16.0 17.0 18.0 19.0⎥ // ⎣20.0 21.0 22.0 23.0⎦ // // x: 6.0 // // b: // ⎡ 0 1 2 3⎤ // ⎢ 4 5 1000 7⎥ // ⎣ 8 9 10 11⎦ // // ⎡ 12 13 14 15⎤ // ⎢ 16 17 18 19⎥ // ⎣ 20 21 22 23⎦ } // This example showcases interactions between different data orders func Example_differingDataOrders() { T0 := New(WithShape(2, 3), WithBacking(Range(Int, 0, 6))) // Create a (2, 3)-matrix with the standard row-major backing T1 := New(WithShape(2, 3), WithBacking(Range(Int, 0, 6)), AsFortran(nil)) // Create a (2, 3)-matrix with a col-major backing T2, _ := Add(T0, T1) fmt.Printf("T0:\n%vT1:\n%vT2:\n%vT2 Data Order: %v\n\n", T0, T1, T2, T2.DataOrder()) // the result's data order is highly dependent on the order of operation. It will take after the first operand T0 = New(WithShape(2, 3), WithBacking(Range(Int, 1, 7)), AsFortran(nil)) // Create a (2, 3)-matrix with a col-major backing T1 = New(WithShape(2, 3), WithBacking(Range(Int, 1, 7))) // Create a (2, 3)-matrix with the standard row-major backing T2, _ = Add(T0, T1) fmt.Printf("T0:\n%vT1:\n%vT2:\n%vT2 Data Order: %v\n\n", T0, T1, T2, T2.DataOrder()) reuse := New(WithShape(2, 3), WithBacking([]int{1000, 1000, 1000, 1000, 1000, 1000})) fmt.Printf("reuse Data Order: %v\n", reuse.DataOrder()) T2, _ = Add(T0, T1, WithReuse(reuse)) fmt.Printf("T2:\n%vT2 Data Order: %v\n\n", T2, T2.DataOrder()) // Output: // T0: // ⎡0 1 2⎤ // ⎣3 4 5⎦ // T1: // ⎡0 2 4⎤ // ⎣1 3 5⎦ // T2: // ⎡ 0 3 6⎤ // ⎣ 4 7 10⎦ // T2 Data Order: Contiguous, RowMajor // // // T0: // ⎡1 3 5⎤ // ⎣2 4 6⎦ // T1: // ⎡1 2 3⎤ // ⎣4 5 6⎦ // T2: // ⎡ 2 5 8⎤ // ⎣ 6 9 12⎦ // T2 Data Order: Contiguous, ColMajor // // // reuse Data Order: Contiguous, RowMajor // T2: // ⎡ 2 5 8⎤ // ⎣ 6 9 12⎦ // T2 Data Order: Contiguous, ColMajor } // The AsFortran construction option is a bit finnicky. func Example_asFortran() { // Here the data is passed in and directly used without changing the underlying data T0 := New(WithShape(2, 3), WithBacking([]float64{0, 1, 2, 3, 4, 5}), AsFortran(nil)) fmt.Printf("T0:\n%vData: %v\n\n", T0, T0.Data()) // Here the data is passed into the AsFortran construction option, and it assumes that the data is already in // row-major form. Therefore a transpose will be performed. T1 := New(WithShape(2, 3), AsFortran([]float64{0, 1, 2, 3, 4, 5})) fmt.Printf("T1:\n%vData: %v\n\n", T1, T1.Data()) // Further example of how AsFortran works: orig := New(WithShape(2, 3), WithBacking([]float64{0, 1, 2, 3, 4, 5})) T2 := New(WithShape(2, 3), AsFortran(orig)) fmt.Printf("Original\n%vData: %v\n", orig, orig.Data()) fmt.Printf("T2:\n%vData: %v\n", T2, T2.Data()) // Output: // T0: // ⎡0 2 4⎤ // ⎣1 3 5⎦ // Data: [0 1 2 3 4 5] // // T1: // ⎡0 1 2⎤ // ⎣3 4 5⎦ // Data: [0 3 1 4 2 5] // // Original // ⎡0 1 2⎤ // ⎣3 4 5⎦ // Data: [0 1 2 3 4 5] // T2: // ⎡0 1 2⎤ // ⎣3 4 5⎦ // Data: [0 3 1 4 2 5] } // The AsDenseDiag construction option creates a dense diagonal matrix from the input, either a slice or a tensor. // The resulting shape is automatically inferred from the input vector. // // This is like Numpy's `diag()` function, except not stupid. Numpy's `diag()` has been a cause of errors because it's somewhat isometric: // >>> np.diag(np.diag(np.array([1,2,3]))) // array([1,2,3]) func Example_asDenseDiag() { T := New(WithShape(3), WithBacking([]int{1, 2, 3})) T1 := New(AsDenseDiag(T)) fmt.Printf("T1:\n%v", T1) T2 := New(AsDenseDiag([]float64{3.14, 6.28, 11111})) fmt.Printf("T2:\n%v", T2) // Output: // T1: //⎡1 0 0⎤ //⎢0 2 0⎥ //⎣0 0 3⎦ // T2: // ⎡ 3.14 0 0⎤ // ⎢ 0 6.28 0⎥ // ⎣ 0 0 11111⎦ } tensor-0.9.24/flags.go000066400000000000000000000122271426512615100145520ustar00rootroot00000000000000package tensor // DataOrder is a flag that indicates the order of data. The default DataOrder (0) // is what this package uses by default. type DataOrder byte const ( // ColMajor indicates that the data is stored in a col-major way. // A data can only be stored in either ColMajor(1) or RowMajor(0). // The way the DataOrder was designed causes the default to be RowMajor ColMajor DataOrder = 1 << iota // NonContiguous indicates that the data is not contiguous. // A data can either be Contiguous (0) or NonContiguous (2). // The way DataOrder was designed causes the default to be Contiguous. NonContiguous // Transposed indicates that the data has been transposed Transposed ) var dataOrderNames = []rune("NonContiguous, RowMajorᵀNonContiguous, ColMajorᵀ") // MakeDataOrder makes a data order. Typical examples: // MakeDataOrder(DataOrder(0)) // Row Major, contiguous // MakeDataOrder(NonContiguous // Row Major, non-contiguous // MakeDataOrder(ColMajor) // Col Major, contiguous // MakeDataOrder(ColMajor, NonContiguous) // what it says on the tin func MakeDataOrder(fs ...DataOrder) (retVal DataOrder) { if len(fs) == 1 { return fs[0] } for _, f := range fs { retVal |= f } return } // IsColMajor returns true if the data order describes a col-major data func (f DataOrder) IsColMajor() bool { return (f & ColMajor) != 0 } // IsRowMajor returns true if the data order describes a row-major data func (f DataOrder) IsRowMajor() bool { return !f.IsColMajor() } // IsContiguous returns true if the data order describes a contiguous data. func (f DataOrder) IsContiguous() bool { return !f.IsNotContiguous() } // IsNotContiguous returns true if the data order describes a noncontiguous data. func (f DataOrder) IsNotContiguous() bool { return (f & NonContiguous) != 0 } // IsTransposed returns true if the data order describes whether the data has been tranposed (but not moved) func (f DataOrder) IsTransposed() bool { return (f & Transposed) != 0 } func (f DataOrder) toggleColMajor() DataOrder { return f ^ (ColMajor) } func (f DataOrder) clearTransposed() DataOrder { return f &^ (Transposed) } // HasSameOrder returns true if both data orders are the same (either both are ColMajor or both are RowMajor) func (f DataOrder) HasSameOrder(other DataOrder) bool { return (f.IsColMajor() && other.IsColMajor()) || (f.IsRowMajor() && other.IsRowMajor()) } func (f DataOrder) String() string { var start, end int if f.IsRowMajor() { end = 23 if f.IsContiguous() { start = 3 } } else { end = 47 start = 24 if f.IsContiguous() { start = 27 } } if f.IsTransposed() { end++ } return string(dataOrderNames[start:end]) } // Triangle is a flag representing the "triangle"ness of a matrix type Triangle byte const ( NotTriangle Triangle = iota Upper Lower Symmetric ) // MemoryFlag is a flag representing the use possibilities of Memory type MemoryFlag byte const ( // NativelyInaccessible indicates that the data in the memory cannot be accessed by Go code. NativelyInaccessible MemoryFlag = 1 << iota // ManuallyManaged indicates that the memory is managed by something else. Any Tensor with // manually managed memory will not be returned to the pool. ManuallyManaged // IsOverallocated indicates that the memory for a given tensor is overallocated (i.e. the size-in-use is smaller than the size allocated) IsOverallocated ) func MakeMemoryFlag(fs ...MemoryFlag) (retVal MemoryFlag) { if len(fs) == 1 { return fs[0] } for _, f := range fs { retVal |= f } return } func (f MemoryFlag) nativelyAccessible() bool { return !((f & NativelyInaccessible) != 0) } func (f MemoryFlag) manuallyManaged() bool { return (f & ManuallyManaged) != 0 } func (f MemoryFlag) isOverallocated() bool { return (f & IsOverallocated) != 0 } // OpOpt are the options used to call ops type OpOpt struct { reuse Tensor incr Tensor unsafe bool same bool t Dtype } // ParseFuncOpts parses a list of FuncOpt into a single unified method call structure. func ParseFuncOpts(opts ...FuncOpt) *OpOpt { retVal := borrowOpOpt() for _, opt := range opts { opt(retVal) } return retVal } // Incr returns the tensor to be incremented in the call. Can be nil. func (fo *OpOpt) Incr() Tensor { return fo.incr } // Reuse returns the tensor to be reused in the call. Can be nil. func (fo *OpOpt) Reuse() Tensor { return fo.reuse } // IncReuse returns whether a reuse tensor is to be used as the incr Tensor func (fo *OpOpt) IncrReuse() (Tensor, bool) { if fo.incr != nil { return fo.incr, true } return fo.reuse, false } // Safe signals if the op is to be done safely func (fo *OpOpt) Safe() bool { return !fo.unsafe } // Same signals if the op is to return the same type as its inputs func (fo *OpOpt) Same() bool { return fo.same } // As returns the dtype of the return value of the method call. // For example: // a.Lt(b, As(Bool)) // indicates that the result of the `Lt()` should be a Tensor of Bool. // // Another example: // a.Add(b, As(Int)) // indicates that the result of `Add()` should be converted to a Tensor of Int. // Note that this function is not yet supported in most operations. func (fo *OpOpt) As() Dtype { return fo.t } tensor-0.9.24/flags_test.go000066400000000000000000000052061426512615100156100ustar00rootroot00000000000000package tensor import "testing" func TestMemoryFlag(t *testing.T) { var defaultFlag MemoryFlag if defaultFlag.manuallyManaged() || !defaultFlag.nativelyAccessible() { t.Errorf("Something went wrong with the creation of flags") } a := ManuallyManaged if !a.manuallyManaged() { t.Errorf("Expected ManuallyManaged to be true") } if !a.nativelyAccessible() { t.Errorf("Expected ManuallyManaged to be nativelyAccessible") } b := NativelyInaccessible if b.manuallyManaged() { t.Errorf("Expected NativelyInaccessible to not be manually managed") } if b.nativelyAccessible() { t.Errorf("Expected NativelyInaccessible to be false %v", b.nativelyAccessible()) } c := MakeMemoryFlag(ManuallyManaged, NativelyInaccessible) if !c.manuallyManaged() { t.Errorf("Expected c to be manually managed") } if c.nativelyAccessible() { t.Errorf("Expected c to be natively inaccessible") } } func TestDataOrder(t *testing.T) { var defaultFlag DataOrder if defaultFlag.IsColMajor() || defaultFlag.IsNotContiguous() || defaultFlag.IsTransposed() { t.Error("Expected default flag to be row major and contiguous and not transposed") } if !(defaultFlag.IsRowMajor() && defaultFlag.IsContiguous()) { t.Error("Expected default flag to be row major and contiguous") } if defaultFlag.String() != "Contiguous, RowMajor" { t.Errorf("Expected string is \"Contiguous, RowMajor\". Got %q", defaultFlag.String()) } ncrm := MakeDataOrder(NonContiguous) if ncrm.IsColMajor() || ncrm.IsContiguous() { t.Error("Expected noncontiguous row major.") } if ncrm.String() != "NonContiguous, RowMajor" { t.Errorf("Expected string is \"NonContiguous, RowMajor\". Got %q", defaultFlag.String()) } cm := ColMajor if cm.IsRowMajor() { t.Error("colMajor cannot be rowMajor") } if cm.IsNotContiguous() { t.Error("ColMajor by default is contiguous") } if cm.String() != "Contiguous, ColMajor" { t.Errorf(`Expected string is "Contiguous, ColMajor". Got %q`, cm.String()) } // check toggle rm := cm.toggleColMajor() if rm.IsColMajor() { t.Errorf("toggled cm should be rm") } cm = rm.toggleColMajor() if cm.IsRowMajor() { t.Errorf("toggled rm should be cm") } transposed := MakeDataOrder(Transposed) if !transposed.IsTransposed() { t.Error("Expected transposed flag to be set") } if transposed.String() != "Contiguous, RowMajorᵀ" { t.Errorf("Expected string is \"Contiguous, RowMajorᵀ\". Got %q", defaultFlag.String()) } untransposed := transposed.clearTransposed() if untransposed != defaultFlag { t.Error("Expected default flag after untransposing") } } tensor-0.9.24/generic_utils.go000066400000000000000000000126151426512615100163130ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "math/rand" "reflect" "github.com/pkg/errors" "gorgonia.org/vecf32" "gorgonia.org/vecf64" ) // Range creates a ranged array with a given type. It panics if the Dtype is not supported or does not represent a naturally orderable type (strings, pointers etc) // Do note that the range algorithm is very simple, and simply does increments or decrements of 1. This means for floating point types // you're not able to create a range with a 0.1 increment step, and for complex number types, the imaginary part will always be 0i func Range(dt Dtype, start, end int) interface{} { size := end - start incr := true if start > end { incr = false size = start - end } if size < 0 { panic("Cannot create a range that is negative in size") } switch dt.Kind() { case reflect.Int: retVal := make([]int, size) for i, v := 0, int(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Int8: retVal := make([]int8, size) for i, v := 0, int8(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Int16: retVal := make([]int16, size) for i, v := 0, int16(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Int32: retVal := make([]int32, size) for i, v := 0, int32(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Int64: retVal := make([]int64, size) for i, v := 0, int64(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Uint: retVal := make([]uint, size) for i, v := 0, uint(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Uint8: retVal := make([]uint8, size) for i, v := 0, uint8(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Uint16: retVal := make([]uint16, size) for i, v := 0, uint16(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Uint32: retVal := make([]uint32, size) for i, v := 0, uint32(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Uint64: retVal := make([]uint64, size) for i, v := 0, uint64(start); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Float32: return vecf32.Range(start, end) case reflect.Float64: return vecf64.Range(start, end) case reflect.Complex64: retVal := make([]complex64, size) for i, v := 0, complex(float32(start), float32(0.0)); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal case reflect.Complex128: retVal := make([]complex128, size) for i, v := 0, complex(float64(start), float64(0.0)); i < size; i++ { retVal[i] = v if incr { v++ } else { v-- } } return retVal default: err := errors.Errorf("Unrangeable Type %v", dt) panic(err) } } // Random creates an array of random numbers of the given type. // For complex Dtypes, the imaginary component will be 0. // // This function is only useful in cases where the randomness is not vital. func Random(dt Dtype, size int) interface{} { r := rand.New(rand.NewSource(1337)) switch dt.Kind() { case reflect.Int: retVal := make([]int, size) for i := range retVal { retVal[i] = int(r.Int()) } return retVal case reflect.Int8: retVal := make([]int8, size) for i := range retVal { retVal[i] = int8(r.Int()) } return retVal case reflect.Int16: retVal := make([]int16, size) for i := range retVal { retVal[i] = int16(r.Int()) } return retVal case reflect.Int32: retVal := make([]int32, size) for i := range retVal { retVal[i] = int32(r.Int()) } return retVal case reflect.Int64: retVal := make([]int64, size) for i := range retVal { retVal[i] = int64(r.Int()) } return retVal case reflect.Uint: retVal := make([]uint, size) for i := range retVal { retVal[i] = uint(r.Uint32()) } return retVal case reflect.Uint8: retVal := make([]uint8, size) for i := range retVal { retVal[i] = uint8(r.Uint32()) } return retVal case reflect.Uint16: retVal := make([]uint16, size) for i := range retVal { retVal[i] = uint16(r.Uint32()) } return retVal case reflect.Uint32: retVal := make([]uint32, size) for i := range retVal { retVal[i] = uint32(r.Uint32()) } return retVal case reflect.Uint64: retVal := make([]uint64, size) for i := range retVal { retVal[i] = uint64(r.Uint32()) } return retVal case reflect.Float32: retVal := make([]float32, size) for i := range retVal { retVal[i] = float32(r.NormFloat64()) } return retVal case reflect.Float64: retVal := make([]float64, size) for i := range retVal { retVal[i] = rand.NormFloat64() } return retVal case reflect.Complex64: retVal := make([]complex64, size) for i := range retVal { retVal[i] = complex(r.Float32(), float32(0)) } return retVal case reflect.Complex128: retVal := make([]complex128, size) for i := range retVal { retVal[i] = complex(r.Float64(), float64(0)) } return retVal } panic("unreachable") } tensor-0.9.24/genlib2/000077500000000000000000000000001426512615100144455ustar00rootroot00000000000000tensor-0.9.24/genlib2/agg1_body.go000066400000000000000000000442711426512615100166400ustar00rootroot00000000000000package main import "text/template" // level 1 aggregation (internal.E) templates const ( eArithRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} {{$isDiv := eq $name "Div" -}} {{$p := panicsDiv0 . -}} switch { case as && bs: Vec{{$name}}{{short .}}(at, bt) case as && !bs: {{if and $isDiv $p}} err = {{end}} {{$name}}SV{{short .}}(at[0], bt) case !as && bs: {{if and $isDiv $p}} err = {{end}} {{$name}}VS{{short .}}(at, bt[0]) default: {{if and $isDiv $p}} err = {{end}} Vec{{$name}}{{short .}}(at, bt) } return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eArithIncrRaw = `as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} it := incr.{{sliceOf .}} switch { case as && bs: Vec{{$name}}{{short .}}(at, bt) if !is { return e.Add(t, incr, a) } it[0]+= at[0] case as && !bs: {{$name}}IncrSV{{short .}}(at[0], bt, it) case !as && bs : {{$name}}IncrVS{{short .}}(at, bt[0], it) default: {{$name}}Incr{{short .}}(at, bt,it) } return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eArithIterRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs : Vec{{$name}}{{short .}}(at, bt) case as && !bs: {{$name}}IterSV{{short .}}(at[0], bt, bit) case !as && bs: {{$name}}IterVS{{short .}}(at, bt[0], ait) default: {{$name}}Iter{{short .}}(at, bt, ait, bit) } return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}Iter", t) } ` eArithIterIncrRaw = `as :=isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} it := incr.{{sliceOf .}} switch { case as && bs: Vec{{$name}}{{short .}}(at, bt) if !is { return e.{{$name}}Iter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return {{$name}}IterIncrSV{{short .}}(at[0], bt, it, bit, iit) case !as && bs: return {{$name}}IterIncrVS{{short .}}(at, bt[0], it, ait, iit) default: return {{$name}}IterIncr{{short .}}(at, bt, it, ait, bit, iit) } {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}IterIncr", t) } ` eArithRecvRaw = `as :=isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t{ {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} rt := recv.{{sliceOf .}} {{$name}}Recv{{short .}}(at, bt, rt) return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}Recv", t) } ` eMapRaw = `as := isScalar(a, t) switch t { {{range .Kinds -}} case {{reflectKind .}}: var f0 {{template "fntype0" .}} var f1 {{template "fntype1" .}} switch f := fn.(type){ case {{template "fntype0" .}}: f0 = f case {{template "fntype1" .}}: f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.{{sliceOf .}} {{if isAddable . -}} switch{ case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp {{asType .}} if tmp, err= f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncr{{short .}}(f0, at) case !as && incr && f0 == nil: err = MapIncrErr{{short .}}(f1, at) case !as && !incr && f0 == nil: err = MapErr{{short .}}(f1, at) default: Map{{short .}}(f0, at) } {{else -}} if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case as && f0 != nil: at[0] = f0(at[0]) case as && f0 == nil: at[0], err = f1(at[0]) case !as && f0 == nil: err = MapErr{{short .}}(f1, at) default: Map{{short .}}(f0, at) } {{end -}} {{end -}} default: return errors.Errorf("Cannot map t of %v", t) } ` eMapIterRaw = `switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} var f0 {{template "fntype0" .}} var f1 {{template "fntype1" .}} switch f := fn.(type){ case {{template "fntype0" .}}: f0 = f case {{template "fntype1" .}}: f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } {{if isAddable . -}} switch { case incr && f0 != nil: MapIterIncr{{short .}}(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErr{{short .}}(f1, at, ait) case !incr && f0 == nil: err = MapIterErr{{short .}}(f1, at, ait) default: MapIter{{short .}}(f0, at, ait) } {{else -}} if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case f0 == nil: err = MapIterErr{{short .}}(f1, at, ait) default: MapIter{{short .}}(f0, at, ait) } {{end -}} {{end -}} default: return errors.Errorf("Cannot map t of %v", t) } ` eCmpSameRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} {{if isBoolRepr . -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs: {{$name}}Same{{short .}}(at, bt) case as && !bs: {{$name}}SameSV{{short .}}(at[0], bt) case !as && bs: {{$name}}SameVS{{short .}}(at, bt[0]) default: {{$name}}Same{{short .}}(at, bt) } return {{end -}} {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) }` eCmpBoolRaw = `as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs: {{$name}}{{short .}}(at, bt, rt) case as && !bs: {{$name}}SV{{short .}}(at[0], bt, rt) case !as && bs : {{$name}}VS{{short .}}(at, bt[0], rt) default: {{$name}}{{short .}}(at, bt, rt) } return {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eCmpSameIterRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} {{if isBoolRepr . -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs : {{$name}}Same{{short .}}(at, bt) case as && !bs: {{$name}}SameIterSV{{short .}}(at[0], bt, bit) case !as && bs: {{$name}}SameIterVS{{short .}}(at, bt[0], ait) default: {{$name}}SameIter{{short .}}(at, bt, ait, bit) } return {{end -}} {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eCmpBoolIterRaw = `as :=isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } {{$name := .Name}} switch t { {{range .Kinds -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs: {{$name}}{{short .}}(at, bt, rt) return case as && !bs: return {{$name}}IterSV{{short .}}(at[0], bt, rt, bit, rit) case !as && bs: return {{$name}}IterVS{{short .}}(at, bt[0], rt, ait, rit) default: return {{$name}}Iter{{short .}}(at, bt, rt, ait, bit, rit) } {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eMinMaxSameRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} {{if isOrd . -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs: Vec{{$name}}{{short .}}(at, bt) case as && !bs: {{$name}}SV{{short .}}(at[0], bt) case !as && bs: {{$name}}VS{{short .}}(at, bt[0]) default: Vec{{$name}}{{short .}}(at, bt) } return {{end -}} {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eMinMaxSameIterRaw = `as := isScalar(a, t) bs := isScalar(b, t) {{$name := .Name}} switch t { {{range .Kinds -}} {{if isOrd . -}} case {{reflectKind .}}: at := a.{{sliceOf .}} bt := b.{{sliceOf .}} switch { case as && bs : Vec{{$name}}{{short .}}(at, bt) case as && !bs: {{$name}}IterSV{{short .}}(at[0], bt, bit) case !as && bs: {{$name}}IterVS{{short .}}(at, bt[0], ait) default: Vec{{$name}}Iter{{short .}}(at, bt, ait, bit) } return {{end -}} {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` complexAbs = `{{if eq .Kind.String "complex64" -}} {{else if eq .Kind.String "complex128" -}} {{end -}} ` eReduceFirstRaw = `{{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: dt := data.{{sliceOf .}} rt := retVal.{{sliceOf .}} switch f := fn.(type){ case func([]{{asType .}}, []{{asType .}}): {{$name | unexport}}{{short .}}(dt, rt, split, size, f) case func({{asType .}}, {{asType .}}) {{asType .}}: generic{{$name}}{{short .}}(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eReduceLastRaw = `var ok bool {{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: var def {{asType .}} if def, ok = defaultValue.({{asType .}}); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.{{sliceOf .}} rt := retVal.{{sliceOf .}} switch f := fn.(type){ case func([]{{asType .}}) {{asType .}}: {{$name | unexport}}{{short .}}(dt, rt, dimSize, def, f) case func({{asType .}}, {{asType .}}) {{asType .}}: generic{{$name}}{{short .}}(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eReduceDefaultRaw = `var ok bool {{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: var f func({{asType .}}, {{asType .}}) {{asType .}} if f, ok = fn.(func({{asType .}}, {{asType .}}) {{asType .}}); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.{{sliceOf .}} rt := retVal.{{sliceOf .}} {{$name | unexport}}{{short .}}(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eReduceRaw = `var ok bool switch t{ {{range .Kinds -}} case {{reflectKind .}}: var f func({{asType .}}, {{asType .}}) {{asType .}} var def {{asType .}} if f, ok = fn.(func({{asType .}}, {{asType .}}) {{asType .}}); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.({{asType .}}); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = Reduce{{short .}}(f, def, a.{{sliceOf .}}...) return {{end -}} default: return nil, errors.Errorf("Unsupported type %v for Reduce", t) } ` eUnaryRaw = `{{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: {{$name}}{{short .}}(a.{{sliceOf .}}) return nil {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eUnaryIterRaw = `{{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: return {{$name}}{{short .}}(a.{{sliceOf .}}, ait) {{end -}} default: return errors.Errorf("Unsupported type %v for {{$name}}", t) } ` eUnaryClampRaw = `var ok bool switch t { {{range .Kinds -}} case {{reflectKind .}}: var min, max {{asType .}} if min, ok = minVal.({{asType .}}); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.({{asType .}}); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } Clamp{{short .}}(a.{{sliceOf .}}, min, max) return nil {{end -}} default: return errors.Errorf("Unsupported type %v for Clamp", t) } ` eUnaryClampIterRaw = `var ok bool switch t { {{range .Kinds -}} case {{reflectKind .}}: var min, max {{asType .}} if min, ok = minVal.({{asType .}}); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.({{asType .}}); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIter{{short .}}(a.{{sliceOf .}}, ait, min, max) {{end -}} default: return errors.Errorf("Unsupported type %v for Clamp", t) } ` eArgmaxRaw = `var next int {{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: data := a.{{sliceOf .}} tmp := make([]{{asType .}}, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := Arg{{$name}}{{short .}}(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return {{end -}} default: return nil, errors.Errorf("Unsupported type %v for Arg{{.Name}}", t) } ` eArgmaxMaskedRaw = `newMask := make([]bool, 0, lastSize) var next int {{$name := .Name -}} switch t { {{range .Kinds -}} case {{reflectKind .}}: data := a.{{sliceOf .}} tmp := make([]{{asType .}}, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := Arg{{$name}}Masked{{short .}}(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return {{end -}} default: return nil, errors.Errorf("Unsupported type %v for Arg{{.Name}}", t) } ` eArgmaxFlatRaw = `switch t { {{$name := .Name -}} {{range .Kinds -}} case {{reflectKind .}}: return Arg{{$name}}{{short .}}(a.{{sliceOf .}}) {{end -}} default: return -1 } ` eArgmaxFlatMaskedRaw = `switch t { {{$name := .Name -}} {{range .Kinds -}} case {{reflectKind .}}: return Arg{{$name}}Masked{{short .}}(a.{{sliceOf .}}, mask) {{end -}} default: return -1 } ` ) var ( eArith *template.Template eArithIncr *template.Template eArithIter *template.Template eArithIterIncr *template.Template eArithRecv *template.Template eMap *template.Template eMapIter *template.Template eCmpBool *template.Template eCmpSame *template.Template eCmpBoolIter *template.Template eCmpSameIter *template.Template eMinMaxSame *template.Template eMinMaxIter *template.Template eReduce *template.Template eReduceFirst *template.Template eReduceLast *template.Template eReduceDefault *template.Template eUnary *template.Template eUnaryIter *template.Template eUnaryClamp *template.Template eUnaryClampIter *template.Template eArgmax *template.Template eArgmaxMasked *template.Template eArgmaxFlat *template.Template eArgmaxFlatMasked *template.Template ) func init() { eArith = template.Must(template.New("eArith").Funcs(funcs).Parse(eArithRaw)) eArithIncr = template.Must(template.New("eArithIncr").Funcs(funcs).Parse(eArithIncrRaw)) eArithIter = template.Must(template.New("eArithIter").Funcs(funcs).Parse(eArithIterRaw)) eArithIterIncr = template.Must(template.New("eArithIterIncr").Funcs(funcs).Parse(eArithIterIncrRaw)) eArithRecv = template.Must(template.New("eArithRecv").Funcs(funcs).Parse(eArithRecvRaw)) eMap = template.Must(template.New("eMap").Funcs(funcs).Parse(eMapRaw)) eMapIter = template.Must(template.New("eMapIter").Funcs(funcs).Parse(eMapIterRaw)) eCmpBool = template.Must(template.New("eCmpBool").Funcs(funcs).Parse(eCmpBoolRaw)) eCmpSame = template.Must(template.New("eCmpSame").Funcs(funcs).Parse(eCmpSameRaw)) eCmpBoolIter = template.Must(template.New("eCmpBoolIter").Funcs(funcs).Parse(eCmpBoolIterRaw)) eCmpSameIter = template.Must(template.New("eCmpSameIter").Funcs(funcs).Parse(eCmpSameIterRaw)) eMinMaxSame = template.Must(template.New("eMinMaxSame").Funcs(funcs).Parse(eMinMaxSameRaw)) eMinMaxIter = template.Must(template.New("eMinMaxSameIter").Funcs(funcs).Parse(eMinMaxSameIterRaw)) eReduce = template.Must(template.New("eReduce").Funcs(funcs).Parse(eReduceRaw)) eReduceFirst = template.Must(template.New("eReduceFirst").Funcs(funcs).Parse(eReduceFirstRaw)) eReduceLast = template.Must(template.New("eReduceLast").Funcs(funcs).Parse(eReduceLastRaw)) eReduceDefault = template.Must(template.New("eReduceDefault").Funcs(funcs).Parse(eReduceDefaultRaw)) eUnary = template.Must(template.New("eUnary").Funcs(funcs).Parse(eUnaryRaw)) eUnaryIter = template.Must(template.New("eUnaryIter").Funcs(funcs).Parse(eUnaryIterRaw)) eUnaryClamp = template.Must(template.New("eUnaryClamp").Funcs(funcs).Parse(eUnaryClampRaw)) eUnaryClampIter = template.Must(template.New("eUnaryClampIter").Funcs(funcs).Parse(eUnaryClampIterRaw)) eArgmax = template.Must(template.New("argmax").Funcs(funcs).Parse(eArgmaxRaw)) eArgmaxMasked = template.Must(template.New("argmaxMasked").Funcs(funcs).Parse(eArgmaxMaskedRaw)) eArgmaxFlat = template.Must(template.New("argmaxFlat").Funcs(funcs).Parse(eArgmaxFlatRaw)) eArgmaxFlatMasked = template.Must(template.New("argmaxFlatMasked").Funcs(funcs).Parse(eArgmaxFlatMaskedRaw)) } tensor-0.9.24/genlib2/agg2_body.go000066400000000000000000000316351426512615100166410ustar00rootroot00000000000000package main import "text/template" // level 2 aggregation (tensor.StdEng) templates const cmpPrepRaw = `var safe, same bool if reuse, safe, _, _, same, err = handleFuncOpts({{.VecVar}}.Shape(), {{.VecVar}}.Dtype(), {{.VecVar}}.DataOrder(),false, opts...); err != nil{ return nil, errors.Wrap(err, "Unable to handle funcOpts") } if !safe { same = true } ` const arithPrepRaw = `var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts({{.VecVar}}.Shape(), {{.VecVar}}.Dtype(), {{.VecVar}}.DataOrder(), true, opts...); err != nil{ return nil, errors.Wrap(err, "Unable to handle funcOpts") } ` const minmaxPrepRaw = `var safe bool if reuse, safe, _, _, _, err = handleFuncOpts({{.VecVar}}.Shape(), {{.VecVar}}.Dtype(), {{.VecVar}}.DataOrder(), true, opts...); err != nil{ return nil, errors.Wrap(err, "Unable to handle funcOpts") } ` const prepVVRaw = `if err = binaryCheck(a, b, {{.TypeClassCheck | lower}}Types); err != nil { return nil, errors.Wrapf(err, "{{.Name}} failed") } var reuse DenseTensor {{template "prep" . -}} typ := a.Dtype().Type var dataA, dataB, dataReuse *storage.Header var ait, bit, iit Iterator var useIter, swap bool if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil { return nil, errors.Wrapf(err, "StdEng.{{.Name}}") } ` const prepMixedRaw = `if err = unaryCheck(t, {{.TypeClassCheck | lower}}Types); err != nil { return nil, errors.Wrapf(err, "{{.Name}} failed") } if err = scalarDtypeCheck(t, s); err != nil { return nil, errors.Wrap(err, "{{.Name}} failed") } var reuse DenseTensor {{template "prep" . -}} a := t typ := t.Dtype().Type var ait, bit, iit Iterator var dataA, dataB, dataReuse, scalarHeader *storage.Header var useIter, newAlloc bool if leftTensor { if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.{{.Name}}") } scalarHeader = dataB } else { if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil { return nil, errors.Wrapf(err, opFail, "StdEng.{{.Name}}") } scalarHeader = dataA } ` const prepUnaryRaw = `if err = unaryCheck(a, {{.TypeClassCheck | lower}}Types); err != nil { err = errors.Wrapf(err, "{{.Name}} failed") return } var reuse DenseTensor var safe, toReuse, incr bool if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil { return nil, errors.Wrap(err, "Unable to handle funcOpts") } typ := a.Dtype().Type var ait, rit Iterator var dataA, dataReuse *storage.Header var useIter bool if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil{ return nil, errors.Wrapf(err, opFail, "StdEng.{{.Name}}") } ` const agg2BodyRaw = `if useIter { switch { case incr: err = e.E.{{.Name}}IterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse {{if .VV -}} case toReuse: storage.CopyIter(typ,dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}Iter(typ, dataReuse, dataB, iit, bit) retVal = reuse {{else -}} case toReuse && leftTensor: storage.CopyIter(typ, dataReuse, dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}Iter(typ, dataReuse, dataB, iit, bit) retVal = reuse case toReuse && !leftTensor: storage.CopyIter(typ, dataReuse, dataB, iit, bit) iit.Reset() bit.Reset() err = e.E.{{.Name}}Iter(typ, dataA, dataReuse, ait, iit) retVal = reuse {{end -}} case !safe: err = e.E.{{.Name}}Iter(typ, dataA, dataB, ait, bit) retVal = a default: {{if .VV -}} if swap { retVal = b.Clone().(Tensor) }else{ retVal = a.Clone().(Tensor) } err = e.E.{{.Name}}Iter(typ, retVal.hdr(), dataB, ait, bit) {{else -}} retVal = a.Clone().(Tensor) if leftTensor { err = e.E.{{.Name}}Iter(typ, retVal.hdr(), dataB, ait, bit) } else { err = e.E.{{.Name}}Iter(typ, dataA, retVal.hdr(), ait, bit) } {{end -}} } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return } switch { case incr: err = e.E.{{.Name}}Incr(typ, dataA, dataB, dataReuse) retVal = reuse {{if .VV -}} case toReuse: err = e.E.{{.Name}}Recv(typ, dataA, dataB, dataReuse) retVal = reuse {{else -}} case toReuse && leftTensor: storage.Copy(typ, dataReuse, dataA) err = e.E.{{.Name}}(typ, dataReuse, dataB) retVal = reuse case toReuse && !leftTensor: storage.Copy(typ, dataReuse, dataB) err = e.E.{{.Name}}(typ, dataA, dataReuse) {{if not .VV -}} if t.Shape().IsScalarEquiv() { storage.Copy(typ, dataReuse, dataA) } {{end -}} retVal = reuse {{end -}} case !safe: err = e.E.{{.Name}}(typ, dataA, dataB) {{if not .VV -}} if t.Shape().IsScalarEquiv() && !leftTensor { storage.Copy(typ, dataB, dataA) } {{end -}} retVal = a default: {{if .VV -}} if swap { retVal = b.Clone().(Tensor) }else{ retVal = a.Clone().(Tensor) } err = e.E.{{.Name}}(typ, retVal.hdr(), dataB) {{else -}} retVal = a.Clone().(Tensor) if !leftTensor { storage.Fill(typ, retVal.hdr(), dataA) } err = e.E.{{.Name}}(typ, retVal.hdr(), dataB) {{end -}} } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return ` const agg2CmpBodyRaw = `// check to see if anything needs to be created switch { case same && safe && reuse == nil: {{if .VV -}} if swap{ reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else{ reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } {{else -}} reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) {{end -}} dataReuse = reuse.hdr() if useIter{ iit = IteratorFromDense(reuse) } case !same && safe && reuse == nil: reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e)) dataReuse = reuse.hdr() if useIter{ iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && same && reuse == nil: err = e.E.{{.Name}}SameIter(typ, dataA, dataB, ait, bit) retVal = a {{if .VV -}} case same && safe && reuse != nil: storage.CopyIter(typ,dataReuse,dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}SameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse {{else -}} case same && safe && reuse != nil && !leftTensor: storage.CopyIter(typ,dataReuse,dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.{{.Name}}SameIter(typ, dataA, dataReuse, ait, bit) retVal = reuse case same && safe && reuse != nil && leftTensor: storage.CopyIter(typ,dataReuse,dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}SameIter(typ, dataReuse, dataB, iit, bit) retVal = reuse {{end -}} default: // safe && bool err = e.E.{{.Name}}Iter(typ, dataA, dataB, dataReuse, ait, bit, iit) retVal = reuse } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return } {{if not .VV -}} // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case same && safe && reuse != nil && leftTensor: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}Same(typ, dataReuse, dataB) retVal = reuse return case same && safe && reuse != nil && !leftTensor: storage.Copy(typ,dataReuse,dataB) err = e.E.{{.Inv}}Same(typ, dataReuse, dataA) retVal = reuse return } } {{end -}} // standard switch { case !safe && same && reuse == nil: err = e.E.{{.Name}}Same(typ, dataA, dataB) retVal = a {{if .VV -}} case same && safe && reuse != nil: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}Same(typ, dataReuse, dataB) retVal = reuse {{else -}} case same && safe && reuse != nil && leftTensor: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}Same(typ, dataReuse, dataB) retVal = reuse case same && safe && reuse != nil && !leftTensor: storage.Copy(typ,dataReuse,dataB) err = e.E.{{.Name}}Same(typ, dataA, dataReuse) retVal = reuse {{end -}} default: err = e.E.{{.Name}}(typ, dataA, dataB, dataReuse) retVal = reuse } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return ` const agg2MinMaxBodyRaw = `// check to see if anything needs to be created if reuse == nil{ {{if .VV -}} if swap{ reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e)) } else{ reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) } {{else -}} reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e)) {{end -}} dataReuse = reuse.hdr() if useIter{ iit = IteratorFromDense(reuse) } } if useIter { switch { case !safe && reuse == nil: err = e.E.{{.Name}}Iter(typ, dataA, dataB, ait, bit) retVal = a {{if .VV -}} case safe && reuse != nil: storage.CopyIter(typ,dataReuse,dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}Iter(typ, dataReuse, dataB, iit, bit) retVal = reuse {{else -}} case safe && reuse != nil && !leftTensor: storage.CopyIter(typ,dataReuse,dataB, iit, bit) bit.Reset() iit.Reset() err = e.E.{{.Name}}Iter(typ, dataA, dataReuse, ait, bit) retVal = reuse case safe && reuse != nil && leftTensor: storage.CopyIter(typ,dataReuse,dataA, iit, ait) ait.Reset() iit.Reset() err = e.E.{{.Name}}Iter(typ, dataReuse, dataB, iit, bit) retVal = reuse {{end -}} default: // safe && bool panic("Unreachable") } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return } {{if not .VV -}} // handle special case where A and B have both len 1 if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) { switch { case safe && reuse != nil && leftTensor: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}(typ, dataReuse, dataB) retVal = reuse return case safe && reuse != nil && !leftTensor: storage.Copy(typ,dataReuse,dataB) err = e.E.{{.Name}}(typ, dataReuse, dataA) retVal = reuse return } } {{end -}} // standard switch { case !safe && reuse == nil: err = e.E.{{.Name}}(typ, dataA, dataB) retVal = a {{if .VV -}} case safe && reuse != nil: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}(typ, dataReuse, dataB) retVal = reuse {{else -}} case safe && reuse != nil && leftTensor: storage.Copy(typ,dataReuse,dataA) err = e.E.{{.Name}}(typ, dataReuse, dataB) retVal = reuse case safe && reuse != nil && !leftTensor: storage.Copy(typ,dataReuse,dataB) err = e.E.{{.Name}}(typ, dataA, dataReuse) retVal = reuse {{end -}} default: panic("Unreachable") } {{if not .VV -}} if newAlloc{ freeScalar(scalarHeader.Raw) } returnHeader(scalarHeader) {{end -}} return ` const agg2UnaryBodyRaw = ` if useIter{ switch { case incr: cloned:= a.Clone().(Tensor) if err = e.E.{{.Name}}Iter(typ, cloned.hdr(), ait); err != nil { return nil, errors.Wrap(err, "Unable to perform {{.Name}}") } ait.Reset() err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait) retVal = reuse case toReuse: storage.CopyIter(typ, dataReuse, dataA, rit, ait) rit.Reset() err = e.E.{{.Name}}Iter(typ, dataReuse, rit) retVal = reuse case !safe: err = e.E.{{.Name}}Iter(typ, dataA, ait) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.{{.Name}}Iter(typ, cloned.hdr(), ait) retVal = cloned } return } switch { case incr: cloned := a.Clone().(Tensor) if err = e.E.{{.Name}}(typ, cloned.hdr()); err != nil { return nil, errors.Wrap(err, "Unable to perform {{.Name}}") } err = e.E.Add(typ, dataReuse, cloned.hdr()) retVal = reuse case toReuse: storage.Copy(typ, dataReuse, dataA) err = e.E.{{.Name}}(typ, dataReuse) retVal = reuse case !safe: err = e.E.{{.Name}}(typ, dataA) retVal = a default: // safe by default cloned := a.Clone().(Tensor) err = e.E.{{.Name}}(typ, cloned.hdr()) retVal = cloned } return ` var ( prepVV *template.Template prepMixed *template.Template prepUnary *template.Template agg2Body *template.Template agg2CmpBody *template.Template agg2UnaryBody *template.Template agg2MinMaxBody *template.Template ) func init() { prepVV = template.Must(template.New("prepVV").Funcs(funcs).Parse(prepVVRaw)) prepMixed = template.Must(template.New("prepMixed").Funcs(funcs).Parse(prepMixedRaw)) prepUnary = template.Must(template.New("prepUnary").Funcs(funcs).Parse(prepUnaryRaw)) agg2Body = template.Must(template.New("agg2body").Funcs(funcs).Parse(agg2BodyRaw)) agg2CmpBody = template.Must(template.New("agg2CmpBody").Funcs(funcs).Parse(agg2CmpBodyRaw)) agg2UnaryBody = template.Must(template.New("agg2UnaryBody").Funcs(funcs).Parse(agg2UnaryBodyRaw)) agg2MinMaxBody = template.Must(template.New("agg2MinMaxBody").Funcs(funcs).Parse(agg2MinMaxBodyRaw)) } tensor-0.9.24/genlib2/agg3_body.go000066400000000000000000000230401426512615100166310ustar00rootroot00000000000000package main import "text/template" // 3rd level function aggregation templates const denseArithBodyRaw = `{{$elne := eq .Name "Ne"}} {{$eleq := eq .Name "Eq"}} {{$eleqne := or $eleq $elne}} var ret Tensor if t.oe != nil { if ret, err = t.oe.{{if $eleqne}}El{{end}}{{.Name}}(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do {{.Name}}()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "{{.Name}}") } return } if {{interfaceName .Name | lower}}, ok := t.e.({{interfaceName .Name}}); ok { if ret, err = {{interfaceName .Name | lower}}.{{if $eleqne}}El{{end}}{{.Name}}(t, other, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do {{.Name}}()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "{{.Name}}") } return } return nil, errors.Errorf("Engine does not support {{.Name}}()") ` const denseArithScalarBodyRaw = `var ret Tensor if t.oe != nil { if ret, err = t.oe.{{.Name}}Scalar(t, other, leftTensor, opts...); err != nil{ return nil, errors.Wrapf(err, "Unable to do {{.Name}}Scalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "{{.Name}}Scalar") } return } if {{interfaceName .Name | lower}}, ok := t.e.({{interfaceName .Name}}); ok { if ret, err = {{interfaceName .Name | lower}}.{{.Name}}Scalar(t, other, leftTensor, opts...); err != nil { return nil, errors.Wrapf(err, "Unable to do {{.Name}}Scalar()") } if retVal, err = assertDense(ret); err != nil { return nil, errors.Wrapf(err, opFail, "{{.Name}}Scalar") } return } return nil, errors.Errorf("Engine does not support {{.Name}}Scalar()") ` const denseIdentityArithTestBodyRaw = `iden := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) {{if ne .Identity 0 -}} b.Memset(identityVal({{.Identity}}, a.t)) {{end -}} {{template "funcoptdecl" -}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := a.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call0" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}", a, b, we, err); retEarly{ if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil{ t.Errorf("Identity test for {{.Name}} failed: %v", err) } ` const denseIdentityArithScalarTestRaw = `iden1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal({{.Identity}}, q.t) {{template "funcoptdecl"}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call0" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}", a, b, we, err); retEarly{ if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for {{.Name}} (tensor as left, scalar as right) failed: %v", err) } {{if .IsCommutative -}} iden2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal({{.Identity}}, q.t) {{template "funcoptdecl" -}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call1" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}", a, b, we, err); retEarly{ if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Identity test for {{.Name}} (scalar as left, tensor as right) failed: %v", err) } {{end -}} ` const denseInvArithTestBodyRaw = `inv := func(a *Dense) bool { b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine())) {{if ne .Identity 0 -}} b.Memset(identityVal({{.Identity}}, a.t)) {{end -}} {{template "funcoptdecl" -}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := a.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call0" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "callInv" .}} if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil{ t.Errorf("Inv test for {{.Name}} failed: %v", err) } ` const denseInvArithScalarTestRaw = `inv1 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal({{.Identity}}, q.t) {{template "funcoptdecl"}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call0" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}VS", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "callInv0" .}} if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for {{.Name}} (tensor as left, scalar as right) failed: %v", err) } {{if .IsInvolutionary -}} {{if eq .FuncOpt "incr" -}} {{else -}} inv2 := func(q *Dense) bool { a := q.Clone().(*Dense) b := identityVal({{.Identity}}, q.t) {{template "funcoptdecl" -}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "call1" . }} if err, retEarly := qcErrCheck(t, "{{.Name}}SV", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "callInv1" .}} if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for {{.Name}} (scalar as left, tensor as right) failed: %v", err) } {{end -}} {{end -}} ` const denseArithScalarWrongTypeTestRaw = `type Foo int wt1 := func(a *Dense) bool{ b := Foo(0) {{template "call0" .}} if err == nil { return false } _ = ret return true } if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for {{.Name}} (tensor as left, scalar as right) failed: %v", err) } wt2 := func(a *Dense) bool{ b := Foo(0) {{template "call1" .}} if err == nil { return false } _ = ret return true } if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("WrongType test for {{.Name}} (tensor as right, scalar as left) failed: %v", err) } ` const denseArithReuseMutationTestRaw = `mut := func(a, b *Dense, reuseA bool) bool { // req because we're only testing on one kind of tensor/engine combo a.e = StdEng{} a.oe = StdEng{} a.flag = 0 b.e = StdEng{} b.oe = StdEng{} b.flag = 0 if a.Dtype() != b.Dtype(){ return true } if !a.Shape().Eq(b.Shape()){ return true } {{template "callVanilla" .}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := a.Engine().({{interfaceName .Name}}); we = we || !ok var ret, reuse {{template "retType" .}} if reuseA { {{template "call0" .}}, WithReuse(a)) reuse = a } else { {{template "call0" .}}, WithReuse(b)) reuse = b } if err, retEarly := qcErrCheck(t, "{{.Name}}", a, b, we, err); retEarly{ if err != nil { return false } return true } if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Reuse Mutation test for {{.Name}} failed: %v", err) } ` var ( denseArithBody *template.Template denseArithScalarBody *template.Template denseIdentityArithTest *template.Template denseIdentityArithScalarTest *template.Template denseArithScalarWrongTypeTest *template.Template ) func init() { denseArithBody = template.Must(template.New("dense arith body").Funcs(funcs).Parse(denseArithBodyRaw)) denseArithScalarBody = template.Must(template.New("dense arith body").Funcs(funcs).Parse(denseArithScalarBodyRaw)) denseIdentityArithTest = template.Must(template.New("dense identity test").Funcs(funcs).Parse(denseIdentityArithTestBodyRaw)) denseIdentityArithScalarTest = template.Must(template.New("dense scalar identity test").Funcs(funcs).Parse(denseIdentityArithScalarTestRaw)) denseArithScalarWrongTypeTest = template.Must(template.New("dense scalar wrongtype test").Funcs(funcs).Parse(denseArithScalarWrongTypeTestRaw)) } tensor-0.9.24/genlib2/api_unary.go000066400000000000000000000041431426512615100167650ustar00rootroot00000000000000package main import ( "io" "text/template" ) type APIUnary struct { UnaryOp } func (fn *APIUnary) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.UnaryOp.Name() == "Clamp": paramNames = []string{"a", "min", "max", "opts"} paramTemplates = []*template.Template{tensorType, interfaceType, interfaceType, splatFuncOptType} default: paramNames = []string{"a", "opts"} paramTemplates = []*template.Template{tensorType, splatFuncOptType} } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, RetVals: []string{"retVal"}, RetValTemplates: []*template.Template{tensorType}, Err: true, } } func (fn *APIUnary) WriteBody(w io.Writer) { body := `e := a.Engine() if {{interfaceName .Name | lower}}, ok := e.({{interfaceName .Name}}); ok { {{if eq .Name "Clamp" -}} return clamper.Clamp(a, min, max, opts...) {{else -}} return {{interfaceName .Name|lower}}.{{.Name}}(a, opts...) {{end -}} } err = errors.Errorf("Engine does not perform {{.Name}}") return ` T := template.Must(template.New("body").Funcs(funcs).Parse(body)) T.Execute(w, fn) } func (fn *APIUnary) Write(w io.Writer) { w.Write([]byte("func ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateUncondUnaryAPI(f io.Writer, kinds Kinds) { var unaries []*APIUnary for _, u := range unconditionalUnaries { fn := &APIUnary{ UnaryOp: u, } unaries = append(unaries, fn) } for _, u := range unaries { u.Write(f) } } func generateCondUnaryAPI(f io.Writer, kinds Kinds) { var unaries []*APIUnary for _, u := range conditionalUnaries { fn := &APIUnary{ UnaryOp: u, } unaries = append(unaries, fn) } for _, u := range unaries { u.Write(f) } } func generateSpecialUnaryAPI(f io.Writer, kinds Kinds) { var unaries []*APIUnary for _, u := range specialUnaries { fn := &APIUnary{ UnaryOp: u, } unaries = append(unaries, fn) } for _, u := range unaries { u.Write(f) } } tensor-0.9.24/genlib2/arith_tests.go000066400000000000000000000217101426512615100173260ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const ( APICallVVxRaw = `correct, err := {{.Name}}(a, b)` // no funcopt APICallVVReuseMutRaw = `ret, err = {{.Name}}(a, b` APICallVVRaw = `ret, err := {{.Name}}(a, b {{template "funcoptuse"}})` APICallVSRaw = `ret, err := {{.Name}}(a, b {{template "funcoptuse"}})` APICallSVRaw = `ret, err := {{.Name}}(b, a {{template "funcoptuse"}})` APIInvVVRaw = `ret, err = {{.Inv}}(ret, b, UseUnsafe())` APIInvVSRaw = `ret, err = {{.Inv}}(ret, b, UseUnsafe())` APIInvSVRaw = `ret, err = {{.Name}}(b, ret, UseUnsafe())` DenseMethodCallVVxRaw = `correct, err := a.{{.Name}}(b)` // no funcopt DenseMethodCallVVReuseMutRaw = `ret, err = a.{{.Name}}(b` DenseMethodCallVVRaw = `ret, err := a.{{.Name}}(b {{template "funcoptuse"}})` DenseMethodCallVSRaw = `ret, err := a.{{.Name}}Scalar(b, true {{template "funcoptuse"}})` DenseMethodCallSVRaw = `ret, err := a.{{.Name}}Scalar(b, false {{template "funcoptuse"}})` DenseMethodInvVVRaw = `ret, err = ret.{{.Inv}}(b, UseUnsafe())` DenseMethodInvVSRaw = `ret, err = ret.{{.Inv}}Scalar(b, true, UseUnsafe())` DenseMethodInvSVRaw = `ret, err = ret.{{.Name}}Scalar(b, false, UseUnsafe())` APIRetType = `Tensor` DenseRetType = `*Dense` ) type ArithTest struct { arithOp scalars bool lvl Level FuncOpt string EqFailTypeClassName string } func (fn *ArithTest) Signature() *Signature { var name string switch fn.lvl { case API: name = fmt.Sprintf("Test%s", fn.Name()) case Dense: name = fmt.Sprintf("TestDense_%s", fn.Name()) } if fn.scalars { name += "Scalar" } if fn.FuncOpt != "" { name += "_" + fn.FuncOpt } return &Signature{ Name: name, NameTemplate: plainName, ParamNames: []string{"t"}, ParamTemplates: []*template.Template{testingType}, } } func (fn *ArithTest) WriteBody(w io.Writer) { if fn.HasIdentity { fn.writeIdentity(w) fmt.Fprintf(w, "\n") } if fn.IsInv { fn.writeInv(w) } fn.WriteScalarWrongType(w) if fn.FuncOpt == "reuse" && fn.arithOp.Name() != "Pow" { fn.writeReuseMutate(w) } } func (fn *ArithTest) canWrite() bool { if fn.HasIdentity || fn.IsInv { return true } return false } func (fn *ArithTest) writeIdentity(w io.Writer) { var t *template.Template if fn.scalars { t = template.Must(template.New("dense identity test").Funcs(funcs).Parse(denseIdentityArithScalarTestRaw)) } else { t = template.Must(template.New("dense identity test").Funcs(funcs).Parse(denseIdentityArithTestBodyRaw)) } switch fn.lvl { case API: if fn.scalars { template.Must(t.New("call0").Parse(APICallVSRaw)) template.Must(t.New("call1").Parse(APICallSVRaw)) } else { template.Must(t.New("call0").Parse(APICallVVRaw)) } case Dense: if fn.scalars { template.Must(t.New("call0").Parse(DenseMethodCallVSRaw)) template.Must(t.New("call1").Parse(DenseMethodCallSVRaw)) } else { template.Must(t.New("call0").Parse(DenseMethodCallVVRaw)) } } template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *ArithTest) writeInv(w io.Writer) { var t *template.Template if fn.scalars { t = template.Must(template.New("dense involution test").Funcs(funcs).Parse(denseInvArithScalarTestRaw)) } else { t = template.Must(template.New("dense involution test").Funcs(funcs).Parse(denseInvArithTestBodyRaw)) } switch fn.lvl { case API: if fn.scalars { template.Must(t.New("call0").Parse(APICallVSRaw)) template.Must(t.New("call1").Parse(APICallSVRaw)) template.Must(t.New("callInv0").Parse(APIInvVSRaw)) template.Must(t.New("callInv1").Parse(APIInvSVRaw)) } else { template.Must(t.New("call0").Parse(APICallVVRaw)) template.Must(t.New("callInv").Parse(APIInvVVRaw)) } case Dense: if fn.scalars { template.Must(t.New("call0").Parse(DenseMethodCallVSRaw)) template.Must(t.New("call1").Parse(DenseMethodCallSVRaw)) template.Must(t.New("callInv0").Parse(DenseMethodInvVSRaw)) template.Must(t.New("callInv1").Parse(DenseMethodInvSVRaw)) } else { template.Must(t.New("call0").Parse(DenseMethodCallVVRaw)) template.Must(t.New("callInv").Parse(DenseMethodInvVVRaw)) } } template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *ArithTest) writeReuseMutate(w io.Writer) { t := template.Must(template.New("Reuse mutation test").Funcs(funcs).Parse(denseArithReuseMutationTestRaw)) switch fn.lvl { case API: return // tmp case Dense: template.Must(t.New("callVanilla").Parse(DenseMethodCallVVxRaw)) template.Must(t.New("retType").Parse(DenseRetType)) template.Must(t.New("call0").Parse(DenseMethodCallVVReuseMutRaw)) } template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *ArithTest) WriteScalarWrongType(w io.Writer) { if !fn.scalars { return } if fn.FuncOpt != "" { return } t := template.Must(template.New("dense scalar wrongtype test").Funcs(funcs).Parse(denseArithScalarWrongTypeTestRaw)) template.Must(t.New("call0").Parse(APICallVSRaw)) template.Must(t.New("call1").Parse(APICallSVRaw)) template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *ArithTest) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("}\n")) } func generateAPIArithTests(f io.Writer, ak Kinds) { var tests []*ArithTest for _, op := range arithBinOps { t := &ArithTest{ arithOp: op, lvl: API, EqFailTypeClassName: "nil", } if t.name == "Pow" { t.EqFailTypeClassName = "complexTypes" } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "unsafe" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "reuse" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "incr" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateAPIArithScalarTests(f io.Writer, ak Kinds) { var tests []*ArithTest for _, op := range arithBinOps { t := &ArithTest{ arithOp: op, scalars: true, lvl: API, EqFailTypeClassName: "nil", } switch t.name { case "Pow": t.EqFailTypeClassName = "complexTypes" case "Sub": t.EqFailTypeClassName = "unsignedTypes" } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "unsafe" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "reuse" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "incr" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateDenseMethodArithTests(f io.Writer, ak Kinds) { var tests []*ArithTest for _, op := range arithBinOps { t := &ArithTest{ arithOp: op, lvl: Dense, EqFailTypeClassName: "nil", } if t.name == "Pow" { t.EqFailTypeClassName = "complexTypes" } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "unsafe" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "reuse" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "incr" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateDenseMethodScalarTests(f io.Writer, ak Kinds) { var tests []*ArithTest for _, op := range arithBinOps { t := &ArithTest{ arithOp: op, scalars: true, lvl: Dense, EqFailTypeClassName: "nil", } switch t.name { case "Pow": t.EqFailTypeClassName = "complexTypes" case "Sub": t.EqFailTypeClassName = "unsignedTypes" } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "unsafe" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "reuse" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "incr" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } tensor-0.9.24/genlib2/array_getset.go000066400000000000000000000166611426512615100174770ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const asSliceRaw = `func (h *Header) {{asType . | strip | title}}s() []{{asType .}} {return (*(*[]{{asType .}})(unsafe.Pointer(&h.Raw)))[:h.TypedLen({{short . | unexport}}Type):h.TypedLen({{short . | unexport}}Type)]} ` const setBasicRaw = `func (h *Header) Set{{short . }}(i int, x {{asType . }}) { h.{{sliceOf .}}[i] = x } ` const getBasicRaw = `func (h *Header) Get{{short .}}(i int) {{asType .}} { return h.{{lower .String | clean | strip | title }}s()[i]} ` const getRaw = `// Get returns the ith element of the underlying array of the *Dense tensor. func (a *array) Get(i int) interface{} { switch a.t.Kind() { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case reflect.{{reflectKind .}}: return a.{{getOne .}}(i) {{end -}}; {{end -}} default: val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) return val.Interface() } } ` const setRaw = `// Set sets the value of the underlying array at the index i. func (a *array) Set(i int, x interface{}) { switch a.t.Kind() { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case reflect.{{reflectKind .}}: xv := x.({{asType .}}) a.{{setOne .}}(i, xv) {{end -}} {{end -}} default: xv := reflect.ValueOf(x) val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } } ` const memsetRaw = `// Memset sets all values in the array. func (a *array) Memset(x interface{}) error { switch a.t { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind .}}: if xv, ok := x.({{asType .}}); ok { data := a.{{sliceOf .}} for i := range data{ data[i] = xv } return nil } {{end -}} {{end -}} } xv := reflect.ValueOf(x) l := a.Len() for i := 0; i < l; i++ { val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } return nil } ` const arrayEqRaw = ` // Eq checks that any two arrays are equal func (a array) Eq(other interface{}) bool { if oa, ok := other.(*array); ok { if oa.t != a.t { return false } if oa.Len() != a.Len() { return false } /* if oa.C != a.C { return false } */ // same exact thing if uintptr(unsafe.Pointer(&oa.Header.Raw[0])) == uintptr(unsafe.Pointer(&a.Header.Raw[0])){ return true } switch a.t.Kind() { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case reflect.{{reflectKind .}}: for i, v := range a.{{sliceOf .}} { if oa.{{getOne .}}(i) != v { return false } } {{end -}} {{end -}} default: for i := 0; i < a.Len(); i++{ if !reflect.DeepEqual(a.Get(i), oa.Get(i)){ return false } } } return true } return false }` const copyArrayIterRaw = `func copyArrayIter(dst, src array, diter, siter Iterator) (count int, err error){ if dst.t != src.t { panic("Cannot copy arrays of different types") } if diter == nil && siter == nil { return copyArray(dst, src), nil } if (diter != nil && siter == nil) || (diter == nil && siter != nil) { return 0, errors.Errorf("Cannot copy array when only one iterator was passed in") } k := dest.t.Kind() var i, j int var validi, validj bool for { if i, validi, err = diter.NextValidity(); err != nil { if err = handleNoOp(err); err != nil { return count, err } break } if j, validj, err = siter.NextValidity(); err != nil { if err = handleNoOp(err); err != nil { return count, err } break } switch k { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case reflect.{{reflectKind .}}: dest.{{setOne .}}(i, src.{{getOne .}}(j)) {{end -}} {{end -}} default: dest.Set(i, src.Get(j)) } count++ } } ` const memsetIterRaw = ` func (a *array) memsetIter(x interface{}, it Iterator) (err error) { var i int switch a.t{ {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind .}}: xv, ok := x.({{asType .}}) if !ok { return errors.Errorf(dtypeMismatch, a.t, x) } data := a.{{sliceOf .}} for i, err = it.Next(); err == nil; i, err = it.Next(){ data[i] = xv } err = handleNoOp(err) {{end -}} {{end -}} default: xv := reflect.ValueOf(x) for i, err = it.Next(); err == nil; i, err = it.Next(){ val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(xv) } err = handleNoOp(err) } return } ` const zeroIterRaw = `func (a *array) zeroIter(it Iterator) (err error){ var i int switch a.t { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind .}}: data := a.{{sliceOf .}} for i, err = it.Next(); err == nil; i, err = it.Next(){ data[i] = {{if eq .String "bool" -}} false {{else if eq .String "string" -}}"" {{else if eq .String "unsafe.Pointer" -}}nil {{else -}}0{{end -}} } err = handleNoOp(err) {{end -}} {{end -}} default: for i, err = it.Next(); err == nil; i, err = it.Next(){ val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size())) val = reflect.Indirect(val) val.Set(reflect.Zero(a.t)) } err = handleNoOp(err) } return } ` const reflectConstTemplateRaw = `var ( {{range .Kinds -}} {{if isParameterized . -}} {{else -}} {{short . | unexport}}Type = reflect.TypeOf({{asType .}}({{if eq .String "bool" -}} false {{else if eq .String "string" -}}"" {{else if eq .String "unsafe.Pointer" -}}nil {{else -}}0{{end -}})) {{end -}} {{end -}} )` var ( AsSlice *template.Template SimpleSet *template.Template SimpleGet *template.Template Get *template.Template Set *template.Template Memset *template.Template MemsetIter *template.Template Eq *template.Template ZeroIter *template.Template ReflectType *template.Template ) func init() { AsSlice = template.Must(template.New("AsSlice").Funcs(funcs).Parse(asSliceRaw)) SimpleSet = template.Must(template.New("SimpleSet").Funcs(funcs).Parse(setBasicRaw)) SimpleGet = template.Must(template.New("SimpleGet").Funcs(funcs).Parse(getBasicRaw)) Get = template.Must(template.New("Get").Funcs(funcs).Parse(getRaw)) Set = template.Must(template.New("Set").Funcs(funcs).Parse(setRaw)) Memset = template.Must(template.New("Memset").Funcs(funcs).Parse(memsetRaw)) MemsetIter = template.Must(template.New("MemsetIter").Funcs(funcs).Parse(memsetIterRaw)) Eq = template.Must(template.New("ArrayEq").Funcs(funcs).Parse(arrayEqRaw)) ZeroIter = template.Must(template.New("Zero").Funcs(funcs).Parse(zeroIterRaw)) ReflectType = template.Must(template.New("ReflectType").Funcs(funcs).Parse(reflectConstTemplateRaw)) } func generateArrayMethods(f io.Writer, ak Kinds) { Set.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") Get.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") Memset.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") MemsetIter.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") Eq.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") ZeroIter.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") } func generateHeaderGetSet(f io.Writer, ak Kinds) { for _, k := range ak.Kinds { if !isParameterized(k) { fmt.Fprintf(f, "/* %v */\n\n", k) AsSlice.Execute(f, k) SimpleSet.Execute(f, k) SimpleGet.Execute(f, k) fmt.Fprint(f, "\n") } } } func generateReflectTypes(f io.Writer, ak Kinds) { ReflectType.Execute(f, ak) fmt.Fprintf(f, "\n\n\n") } tensor-0.9.24/genlib2/body.go000066400000000000000000000102411426512615100157270ustar00rootroot00000000000000package main import "text/template" // generic loop templates type LoopBody struct { TypedOp Range string Left string Right string Index0, Index1, Index2 string IterName0, IterName1, IterName2 string } const ( genericLoopRaw = `for i := range {{.Range}} { {{template "check" . -}} {{template "loopbody" . -}} }` genericUnaryIterLoopRaw = `var {{.Index0}} int var valid{{.Index0}} bool for { if {{.Index0}}, valid{{.Index0}}, err = {{.IterName0}}.NextValidity(); err != nil { err = handleNoOp(err) break } if valid{{.Index0}} { {{template "check" . -}} {{template "loopbody" . -}} } }` genericBinaryIterLoopRaw = `var {{.Index0}}, {{.Index1}} int var valid{{.Index0}}, valid{{.Index1}} bool for { if {{.Index0}}, valid{{.Index0}}, err = {{.IterName0}}.NextValidity(); err != nil { err = handleNoOp(err) break } if {{.Index1}}, valid{{.Index1}}, err = {{.IterName1}}.NextValidity(); err != nil { err = handleNoOp(err) break } if valid{{.Index0}} && valid{{.Index1}} { {{template "check" . -}} {{template "loopbody" . -}} } }` genericTernaryIterLoopRaw = `var {{.Index0}}, {{.Index1}}, {{.Index2}} int var valid{{.Index0}}, valid{{.Index1}}, valid{{.Index2}} bool for { if {{.Index0}}, valid{{.Index0}}, err = {{.IterName0}}.NextValidity(); err != nil { err = handleNoOp(err) break } if {{.Index1}}, valid{{.Index1}}, err = {{.IterName1}}.NextValidity(); err != nil { err = handleNoOp(err) break } if {{.Index2}}, valid{{.Index2}}, err = {{.IterName2}}.NextValidity(); err != nil { err = handleNoOp(err) break } if valid{{.Index0}} && valid{{.Index1}} && valid{{.Index2}} { {{template "check" . -}} {{template "loopbody" . -}} } }` // ALL THE SYNTACTIC ABSTRACTIONS! // did I mention how much I hate C-style macros? Now I'm doing them instead basicSet = `{{if .IsFunc -}} {{.Range}}[i] = {{ template "callFunc" . -}} {{else -}} {{.Range}}[i] = {{template "opDo" . -}} {{end -}}` basicIncr = `{{if .IsFunc -}} {{.Range}}[i] += {{template "callFunc" . -}} {{else -}} {{.Range}}[i] += {{template "opDo" . -}} {{end -}}` iterIncrLoopBody = `{{if .IsFunc -}} {{.Range}}[k] += {{template "callFunc" . -}} {{else -}} {{.Range}}[k] += {{template "opDo" . -}} {{end -}}` sameSet = `if {{template "opDo" . }} { {{.Range}}[i] = {{trueValue .Kind}} }else{ {{.Range}}[i] = {{falseValue .Kind}} }` clampBody = `if {{.Range}}[i] < min {{if eq .Kind.String "float64"}}|| math.IsInf({{.Range}}[i], -1){{else if eq .Kind.String "float32"}}|| math32.IsInf({{.Range}}[i], -1){{end}} { {{.Range}}[i] = min continue } if {{.Range}}[i] > max {{if eq .Kind.String "float64"}}|| math.IsInf({{.Range}}[i], 1){{else if eq .Kind.String "float32"}}|| math32.IsInf({{.Range}}[i], 1){{end}} { {{.Range}}[i] = max } ` ternaryIterSet = `{{.Range}}[k] = {{template "opDo" . -}}` binOpCallFunc = `{{if eq "complex64" .Kind.String -}} complex64({{template "symbol" .Kind}}(complex128({{.Left}}), complex128({{.Right}}))){{else -}} {{template "symbol" .Kind}}({{.Left}}, {{.Right}}){{end -}}` binOpDo = `{{.Left}} {{template "symbol" .Kind}} {{.Right}}` unaryOpDo = `{{template "symbol" .}}{{.Left}}[{{.Index0}}]` unaryOpCallFunc = `{{if eq "complex64" .Kind.String -}} complex64({{template "symbol" .}}(complex128({{.Left}}[{{.Index0}}]))){{else -}} {{template "symbol" .}}({{.Left}}[{{.Index0}}]){{end -}} ` check0 = `if {{.Right}} == 0 { errs = append(errs, i) {{.Range}}[i] = 0 continue } ` maskCheck = `if mask[i] { continue } ` genericArgmaxVarDeclRaw = `var set bool var f {{asType .Kind}} var {{.ArgX | lower}} int ` ) // renamed const ( vvLoopRaw = genericLoopRaw vvIncrLoopRaw = genericLoopRaw vvIterLoopRaw = genericBinaryIterLoopRaw vvIterIncrLoopRaw = genericTernaryIterLoopRaw mixedLoopRaw = genericLoopRaw mixedIncrLoopRaw = genericLoopRaw mixedIterLoopRaw = genericUnaryIterLoopRaw mixedIterIncrLoopRaw = genericBinaryIterLoopRaw ) var genericArgmaxVarDecl *template.Template func init() { genericArgmaxVarDecl = template.Must(template.New("genericArgmaxVarDecl").Funcs(funcs).Parse(genericArgmaxVarDeclRaw)) } tensor-0.9.24/genlib2/cmp_tests.go000066400000000000000000000303761426512615100170060ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const ( APICallVVaxbRaw = `axb, err := {{.Name}}(a, b {{template "funcoptuse" . -}})` APICallVVbxcRaw = `bxc, err := {{.Name}}(b, c {{template "funcoptuse" . -}})` APICallVVaxcRaw = `axc, err := {{.Name}}(a, c {{template "funcoptuse" . -}})` APICallVVbxaRaw = `bxa, err := {{.Name}}(b, a {{template "funcoptuse" . -}})` APICallMixedaxbRaw = `axb, err := {{.Name}}(a, b {{template "funcoptuse" . -}})` APICallMixedbxcRaw = `bxc, err := {{.Name}}(b, c {{template "funcoptuse" . -}})` APICallMixedaxcRaw = `axc, err := {{.Name}}(a, c {{template "funcoptuse" . -}})` APICallMixedbxaRaw = `bxa, err := {{.Name}}(b, a {{template "funcoptuse" . -}})` DenseMethodCallVVaxbRaw = `axb, err := a.{{.Name}}(b {{template "funcoptuse" . -}})` DenseMethodCallVVbxcRaw = `bxc, err := b.{{.Name}}(c {{template "funcoptuse" . -}})` DenseMethodCallVVaxcRaw = `axc, err := a.{{.Name}}(c {{template "funcoptuse" . -}})` DenseMethodCallVVbxaRaw = `bxa, err := b.{{.Name}}(a {{template "funcoptuse" . -}})` DenseMethodCallMixedaxbRaw = `axb, err := a.{{.Name}}Scalar(b, true {{template "funcoptuse" . -}})` DenseMethodCallMixedbxcRaw = `bxc, err := c.{{.Name}}Scalar(b, false {{template "funcoptuse" . -}})` DenseMethodCallMixedaxcRaw = `axc, err := a.{{.Name}}(c {{template "funcoptuse" . -}})` DenseMethodCallMixedbxaRaw = `bxa, err := a.{{.Name}}Scalar(b, false {{template "funcoptuse" . -}})` ) const transitivityCheckRaw = `{{if eq .FuncOpt "assame" -}} if !threewayEq(axb.Data(), bxc.Data(), axc.Data()){ t.Errorf("a: %-v", a) t.Errorf("b: %-v", b) t.Errorf("c: %-v", c) t.Errorf("axb.Data() %v", axb.Data()) t.Errorf("bxc.Data() %v", bxc.Data()) t.Errorf("axc.Data() %v", axc.Data()) return false } {{else -}} {{if eq .Level "API" -}} ab := axb.(*Dense).Bools() bc := bxc.(*Dense).Bools() ac := axc.(*Dense).Bools() {{else -}} ab := axb.Bools() bc := bxc.Bools() ac := axc.Bools() {{end -}} for i, vab := range ab { if vab && bc[i] { if !ac[i]{ return false } } } {{end -}} ` const transitivityBodyRaw = `transFn := func(q *Dense) bool { we, _ := willerr(q, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "funcoptdecl" . -}} r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) c := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) cv, _ := quick.Value(c.Dtype().Type, r) b.Memset(bv.Interface()) c.Memset(cv.Interface()) {{template "axb" .}} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙b", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "bxc" . }} if err, retEarly := qcErrCheck(t, "{{.Name}} - b∙c", b, c, we, err); retEarly{ if err != nil { return false } return true } {{template "axc" . }} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙c", a, c, we, err); retEarly{ if err != nil { return false } return true } {{template "transitivityCheck" .}} return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for {{.Name}} failed: %v", err) } ` const transitivityMixedBodyRaw = `transFn := func(q *Dense) bool { we, _ := willerr(q, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "funcoptdecl" . -}} r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() c := q.Clone().(*Dense) cv, _ := quick.Value(c.Dtype().Type, r) c.Memset(cv.Interface()) {{template "axb" . }} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙b", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "bxc" . }} if err, retEarly := qcErrCheck(t, "{{.Name}} - b∙c", c, b, we, err); retEarly{ if err != nil { return false } return true } {{template "axc" . }} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙c", a, c, we, err); retEarly{ if err != nil { return false } return true } {{template "transitivityCheck" .}} return true } if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for {{.Name}} failed: %v", err) } ` const symmetryBodyRaw = `symFn := func(q *Dense) bool { we, _ := willerr(q, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "funcoptdecl" . -}} r := newRand() a := q.Clone().(*Dense) b := q.Clone().(*Dense) bv, _ := quick.Value(b.Dtype().Type, r) b.Memset(bv.Interface()) {{template "axb" .}} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙b", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "bxa" .}} if err, retEarly := qcErrCheck(t, "{{.Name}} - b∙a", a, b, we, err); retEarly{ if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Transitivity test for {{.Name}} failed: %v", err) } ` const symmetryMixedBodyRaw = `symFn := func(q *Dense) bool { we, _ := willerr(q, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok {{template "funcoptdecl" . -}} r := newRand() a := q.Clone().(*Dense) bv, _ := quick.Value(a.Dtype().Type, r) b := bv.Interface() {{template "axb" .}} if err, retEarly := qcErrCheck(t, "{{.Name}} - a∙b", a, b, we, err); retEarly{ if err != nil { return false } return true } {{template "bxa" .}} if err, retEarly := qcErrCheck(t, "{{.Name}} - b∙a", a, b, we, err); retEarly{ if err != nil { return false } return true } return reflect.DeepEqual(axb.Data(), bxa.Data()) } if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Symmetry test for {{.Name}} failed: %v", err) } ` type CmpTest struct { cmpOp scalars bool lvl Level FuncOpt string EqFailTypeClassName string } func (fn *CmpTest) Name() string { if fn.cmpOp.Name() == "Eq" || fn.cmpOp.Name() == "Ne" { return "El" + fn.cmpOp.Name() } return fn.cmpOp.Name() } func (fn *CmpTest) Level() string { switch fn.lvl { case API: return "API" case Dense: return "Dense" } return "" } func (fn *CmpTest) Signature() *Signature { var name string switch fn.lvl { case API: name = fmt.Sprintf("Test%s", fn.cmpOp.Name()) case Dense: name = fmt.Sprintf("TestDense_%s", fn.Name()) } if fn.scalars { name += "Scalar" } if fn.FuncOpt != "" { name += "_" + fn.FuncOpt } return &Signature{ Name: name, NameTemplate: plainName, ParamNames: []string{"t"}, ParamTemplates: []*template.Template{testingType}, } } func (fn *CmpTest) canWrite() bool { return fn.IsTransitive || fn.IsSymmetric } func (fn *CmpTest) WriteBody(w io.Writer) { if fn.IsTransitive { fn.writeTransitivity(w) fmt.Fprintf(w, "\n") } if fn.IsSymmetric { fn.writeSymmetry(w) } } func (fn *CmpTest) writeTransitivity(w io.Writer) { var t *template.Template if fn.scalars { t = template.Must(template.New("dense cmp transitivity test").Funcs(funcs).Parse(transitivityMixedBodyRaw)) } else { t = template.Must(template.New("dense cmp transitivity test").Funcs(funcs).Parse(transitivityBodyRaw)) } switch fn.lvl { case API: if fn.scalars { template.Must(t.New("axb").Parse(APICallMixedaxbRaw)) template.Must(t.New("bxc").Parse(APICallMixedbxcRaw)) template.Must(t.New("axc").Parse(APICallMixedaxcRaw)) } else { template.Must(t.New("axb").Parse(APICallVVaxbRaw)) template.Must(t.New("bxc").Parse(APICallVVbxcRaw)) template.Must(t.New("axc").Parse(APICallVVaxcRaw)) } case Dense: if fn.scalars { template.Must(t.New("axb").Parse(DenseMethodCallMixedaxbRaw)) template.Must(t.New("bxc").Parse(DenseMethodCallMixedbxcRaw)) template.Must(t.New("axc").Parse(DenseMethodCallMixedaxcRaw)) } else { template.Must(t.New("axb").Parse(DenseMethodCallVVaxbRaw)) template.Must(t.New("bxc").Parse(DenseMethodCallVVbxcRaw)) template.Must(t.New("axc").Parse(DenseMethodCallVVaxcRaw)) } } template.Must(t.New("transitivityCheck").Parse(transitivityCheckRaw)) template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *CmpTest) writeSymmetry(w io.Writer) { var t *template.Template if fn.scalars { t = template.Must(template.New("dense cmp symmetry test").Funcs(funcs).Parse(symmetryMixedBodyRaw)) } else { t = template.Must(template.New("dense cmp symmetry test").Funcs(funcs).Parse(symmetryBodyRaw)) } switch fn.lvl { case API: if fn.scalars { template.Must(t.New("axb").Parse(APICallMixedaxbRaw)) template.Must(t.New("bxa").Parse(APICallMixedbxaRaw)) } else { template.Must(t.New("axb").Parse(APICallVVaxbRaw)) template.Must(t.New("bxa").Parse(APICallVVbxaRaw)) } case Dense: if fn.scalars { template.Must(t.New("axb").Parse(DenseMethodCallMixedaxbRaw)) template.Must(t.New("bxa").Parse(DenseMethodCallMixedbxaRaw)) } else { template.Must(t.New("axb").Parse(DenseMethodCallVVaxbRaw)) template.Must(t.New("bxa").Parse(DenseMethodCallVVbxaRaw)) } } template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *CmpTest) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("}\n")) } func generateAPICmpTests(f io.Writer, ak Kinds) { var tests []*CmpTest for _, op := range cmpBinOps { t := &CmpTest{ cmpOp: op, lvl: API, EqFailTypeClassName: "nil", } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "assame" fn.TypeClassName = "nonComplexNumberTypes" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateAPICmpMixedTests(f io.Writer, ak Kinds) { var tests []*CmpTest for _, op := range cmpBinOps { t := &CmpTest{ cmpOp: op, lvl: API, scalars: true, EqFailTypeClassName: "nil", } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "assame" fn.TypeClassName = "nonComplexNumberTypes" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateDenseMethodCmpTests(f io.Writer, ak Kinds) { var tests []*CmpTest for _, op := range cmpBinOps { t := &CmpTest{ cmpOp: op, lvl: Dense, EqFailTypeClassName: "nil", } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "assame" fn.TypeClassName = "nonComplexNumberTypes" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } func generateDenseMethodCmpMixedTests(f io.Writer, ak Kinds) { var tests []*CmpTest for _, op := range cmpBinOps { t := &CmpTest{ cmpOp: op, lvl: Dense, scalars: true, EqFailTypeClassName: "nil", } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "assame" fn.TypeClassName = "nonComplexNumberTypes" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } tensor-0.9.24/genlib2/declarations.go000066400000000000000000000276101426512615100174520ustar00rootroot00000000000000package main import ( "reflect" "strings" "text/template" ) var arithSymbolTemplates = [...]string{ "+", "-", "*", "/", "{{mathPkg .}}Pow", "{{if isFloatCmplx .}}{{mathPkg .}}Mod{{else}}%{{end}}", } var cmpSymbolTemplates = [...]string{ ">", ">=", "<", "<=", "==", "!=", } var nonFloatConditionalUnarySymbolTemplates = [...]string{ `{{if isFloat .Kind -}} {{.Range}}[{{.Index0}}] = {{mathPkg .Kind}}Abs({{.Range}}[{{.Index0}}]) {{else -}} if {{.Range}}[{{.Index0}}] < 0 { {{.Range}}[{{.Index0}}] = -{{.Range}}[{{.Index0}}] }{{end -}}`, // abs `if {{.Range}}[{{.Index0}}] < 0 { {{.Range}}[{{.Index0}}] = -1 } else if {{.Range}}[{{.Index0}}] > 0 { {{.Range}}[{{.Index0}}] = 1 }`, // sign } var unconditionalNumUnarySymbolTemplates = [...]string{ "-", // neg "1/", // inv "{{.Range}}[i]*", // square "{{.Range}}[i]*{{.Range}}[i]*", // cube } var unconditionalFloatUnarySymbolTemplates = [...]string{ "{{mathPkg .Kind}}Exp", "{{mathPkg .Kind}}Tanh", "{{mathPkg .Kind}}Log", "{{mathPkg .Kind}}Log2", "{{mathPkg .Kind}}Log10", "{{mathPkg .Kind}}Sqrt", "{{mathPkg .Kind}}Cbrt", `{{asType .Kind}}(1)/{{mathPkg .Kind}}Sqrt`, } var funcOptUse = map[string]string{ "reuse": ",WithReuse(reuse)", "incr": ",WithIncr(incr)", "unsafe": ",UseUnsafe()", "assame": ", AsSameType()", } var funcOptCheck = map[string]string{ "reuse": `if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } `, "incr": "", "unsafe": `if ret != a { t.Errorf("Expected ret to be the same as a") return false } `, } var funcOptDecl = map[string]string{ "reuse": "reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))\n", "incr": "incr := New(Of(a.t), WithShape(a.Shape().Clone()...))\n", "unsafe": "", "assame": `if err := typeclassCheck(q.Dtype(), {{.TypeClassName}}); err != nil { return true // we exit early if the generated type is not something we can handle } `, } var funcOptCorrect = map[string]string{ "reuse": "", "incr": `incr.Memset(identityVal(100, a.t)) correct.Add(incr, UseUnsafe()) `, "unsafe": "", } var stdTypes = [...]string{ "Bool", "Int", "Int8", "Int16", "Int32", "Int64", "Uint", "Uint8", "Uint16", "Uint32", "Uint64", "Float32", "Float64", "Complex64", "Complex128", "String", "Uintptr", "UnsafePointer", } var arrowBinaryTypes = []string{ "String", } var arrowFixedWidthTypes = []string{ "Boolean", } var arrowPrimitiveTypes = []string{ "Int8", "Int16", "Int32", "Int64", "Uint8", "Uint16", "Uint32", "Uint64", "Float32", "Float64", } var parameterizedKinds = [...]reflect.Kind{ reflect.Array, reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.Struct, } var number = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, } var rangeable = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, } var specialized = [...]reflect.Kind{ reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String, } var signedNumber = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, } var nonComplexNumber = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, } var elEq = [...]reflect.Kind{ reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String, reflect.UnsafePointer, } var elOrd = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, // reflect.Uintptr, // comparison of pointers is not that great an idea - it can technically be done but should not be encouraged reflect.Float32, reflect.Float64, // reflect.Complex64, // reflect.Complex128, reflect.String, // strings are orderable and the assumption is lexicographic sorting } var boolRepr = [...]reflect.Kind{ reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String, } var div0panics = [...]reflect.Kind{ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, } var funcs = template.FuncMap{ "lower": strings.ToLower, "title": strings.Title, "unexport": unexport, "hasPrefix": strings.HasPrefix, "hasSuffix": strings.HasSuffix, "isParameterized": isParameterized, "isRangeable": isRangeable, "isSpecialized": isSpecialized, "isNumber": isNumber, "isSignedNumber": isSignedNumber, "isNonComplexNumber": isNonComplexNumber, "isAddable": isAddable, "isFloat": isFloat, "isFloatCmplx": isFloatCmplx, "isEq": isEq, "isOrd": isOrd, "isBoolRepr": isBoolRepr, "panicsDiv0": panicsDiv0, "short": short, "clean": clean, "strip": strip, "reflectKind": reflectKind, "asType": asType, "sliceOf": sliceOf, "getOne": getOne, "setOne": setOne, "trueValue": trueValue, "falseValue": falseValue, "mathPkg": mathPkg, "vecPkg": vecPkg, "bitSizeOf": bitSizeOf, "getalias": getalias, "interfaceName": interfaceName, "isntFloat": isntFloat, } var shortNames = map[reflect.Kind]string{ reflect.Invalid: "Invalid", reflect.Bool: "B", reflect.Int: "I", reflect.Int8: "I8", reflect.Int16: "I16", reflect.Int32: "I32", reflect.Int64: "I64", reflect.Uint: "U", reflect.Uint8: "U8", reflect.Uint16: "U16", reflect.Uint32: "U32", reflect.Uint64: "U64", reflect.Uintptr: "Uintptr", reflect.Float32: "F32", reflect.Float64: "F64", reflect.Complex64: "C64", reflect.Complex128: "C128", reflect.Array: "Array", reflect.Chan: "Chan", reflect.Func: "Func", reflect.Interface: "Interface", reflect.Map: "Map", reflect.Ptr: "Ptr", reflect.Slice: "Slice", reflect.String: "Str", reflect.Struct: "Struct", reflect.UnsafePointer: "UnsafePointer", } var nameMaps = map[string]string{ "VecAdd": "Add", "VecSub": "Sub", "VecMul": "Mul", "VecDiv": "Div", "VecPow": "Pow", "VecMod": "Mod", "AddVS": "Trans", "AddSV": "TransR", "SubVS": "TransInv", "SubSV": "TransInvR", "MulVS": "Scale", "MulSV": "ScaleR", "DivVS": "ScaleInv", "DivSV": "ScaleInvR", "PowVS": "PowOf", "PowSV": "PowOfR", "AddIncr": "IncrAdd", "SubIncr": "IncrSub", "MulIncr": "IncrMul", "DivIncr": "IncrDiv", "PowIncr": "IncrPow", "ModIncr": "IncrMod", } var arithBinOps []arithOp var cmpBinOps []cmpOp var typedAriths []TypedBinOp var typedCmps []TypedBinOp var conditionalUnaries []unaryOp var unconditionalUnaries []unaryOp var specialUnaries []UnaryOp var typedCondUnaries []TypedUnaryOp var typedUncondUnaries []TypedUnaryOp var typedSpecialUnaries []TypedUnaryOp var allKinds []reflect.Kind func init() { // kinds for k := reflect.Invalid + 1; k < reflect.UnsafePointer+1; k++ { allKinds = append(allKinds, k) } // ops arithBinOps = []arithOp{ {basicBinOp{"", "Add", false, isAddable}, "numberTypes", true, 0, false, "", true, false}, {basicBinOp{"", "Sub", false, isNumber}, "numberTypes", false, 0, true, "Add", false, true}, {basicBinOp{"", "Mul", false, isNumber}, "numberTypes", true, 1, false, "", true, false}, {basicBinOp{"", "Div", false, isNumber}, "numberTypes", false, 1, true, "Mul", false, false}, {basicBinOp{"", "Pow", true, isFloatCmplx}, "floatcmplxTypes", true, 1, false, "", false, false}, {basicBinOp{"", "Mod", false, isNonComplexNumber}, "nonComplexNumberTypes", false, 0, false, "", false, false}, } for i := range arithBinOps { arithBinOps[i].symbol = arithSymbolTemplates[i] } cmpBinOps = []cmpOp{ {basicBinOp{"", "Gt", false, isOrd}, "ordTypes", "Lt", true, false}, {basicBinOp{"", "Gte", false, isOrd}, "ordTypes", "Lte", true, false}, {basicBinOp{"", "Lt", false, isOrd}, "ordTypes", "Gt", true, false}, {basicBinOp{"", "Lte", false, isOrd}, "ordTypes", "Gte", true, false}, {basicBinOp{"", "Eq", false, isEq}, "eqTypes", "Eq", true, true}, {basicBinOp{"", "Ne", false, isEq}, "eqTypes", "Ne", false, true}, } for i := range cmpBinOps { cmpBinOps[i].symbol = cmpSymbolTemplates[i] } conditionalUnaries = []unaryOp{ {"", "Abs", false, isSignedNumber, "signedTypes", ""}, {"", "Sign", false, isSignedNumber, "signedTypes", ""}, } for i := range conditionalUnaries { conditionalUnaries[i].symbol = nonFloatConditionalUnarySymbolTemplates[i] } unconditionalUnaries = []unaryOp{ {"", "Neg", false, isNumber, "numberTypes", "Neg"}, {"", "Inv", false, isNumber, "numberTypes", ""}, {"", "Square", false, isNumber, "numberTypes", "Sqrt"}, {"", "Cube", false, isNumber, "numberTypes", "Cbrt"}, {"", "Exp", true, isFloatCmplx, "floatcmplxTypes", "Log"}, {"", "Tanh", true, isFloatCmplx, "floatcmplxTypes", ""}, {"", "Log", true, isFloatCmplx, "floatcmplxTypes", "Exp"}, {"", "Log2", true, isFloat, "floatTypes", ""}, {"", "Log10", true, isFloatCmplx, "floatcmplxTypes", ""}, {"", "Sqrt", true, isFloatCmplx, "floatcmplxTypes", "Square"}, {"", "Cbrt", true, isFloat, "floatTypes", "Cube"}, {"", "InvSqrt", true, isFloat, "floatTypes", ""}, // TODO: cmplx requires to much finagling to the template. Come back to it later } nonF := len(unconditionalNumUnarySymbolTemplates) for i := range unconditionalNumUnarySymbolTemplates { unconditionalUnaries[i].symbol = unconditionalNumUnarySymbolTemplates[i] } for i := range unconditionalFloatUnarySymbolTemplates { unconditionalUnaries[i+nonF].symbol = unconditionalFloatUnarySymbolTemplates[i] } specialUnaries = []UnaryOp{ specialUnaryOp{unaryOp{clampBody, "Clamp", false, isNonComplexNumber, "nonComplexNumberTypes", ""}, []string{"min", "max"}}, } // typed operations for _, bo := range arithBinOps { for _, k := range allKinds { tb := TypedBinOp{ BinOp: bo, k: k, } typedAriths = append(typedAriths, tb) } } for _, bo := range cmpBinOps { for _, k := range allKinds { tb := TypedBinOp{ BinOp: bo, k: k, } typedCmps = append(typedCmps, tb) } } for _, uo := range conditionalUnaries { for _, k := range allKinds { tu := TypedUnaryOp{ UnaryOp: uo, k: k, } typedCondUnaries = append(typedCondUnaries, tu) } } for _, uo := range unconditionalUnaries { for _, k := range allKinds { tu := TypedUnaryOp{ UnaryOp: uo, k: k, } typedUncondUnaries = append(typedUncondUnaries, tu) } } for _, uo := range specialUnaries { for _, k := range allKinds { tu := TypedUnaryOp{ UnaryOp: uo, k: k, } typedSpecialUnaries = append(typedSpecialUnaries, tu) } } } tensor-0.9.24/genlib2/dense_argmethods_tests.go000066400000000000000000000215271426512615100215400ustar00rootroot00000000000000package main import ( "fmt" "io" "reflect" "text/template" ) type ArgMethodTestData struct { Kind reflect.Kind Data []int } var data = []int{ 3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3, } const argMethodsDataRaw = `var basicDense{{short .Kind}} = New(WithShape(2,3,4,5,2), WithBacking([]{{asType .Kind}}{ {{range .Data -}}{{.}}, {{end -}} })) ` const argmaxCorrect = `var argmaxCorrect = []struct { shape Shape data []int }{ {Shape{3,4,5,2}, []int{ 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, }}, {Shape{2,4,5,2}, []int{ 1, 0, 1, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 2, 2, 0, 1, 1, 2, 2, 1, 0, 2, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 1, 0, 1, 2, 1, 0, 1, 1, 2, 0, 1, 0, 0, 0, 0, 2, 1, 0, 1, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 2, 0, }}, {Shape{2,3,5,2}, []int{ 3, 2, 2, 1, 1, 2, 1, 0, 0, 1, 3, 2, 1, 0, 1, 0, 2, 2, 3, 0, 1, 0, 1, 3, 0, 2, 3, 3, 2, 1, 2, 2, 0, 0, 1, 3, 2, 0, 1, 2, 0, 3, 0, 1, 0, 1, 3, 2, 2, 1, 2, 1, 3, 1, 2, 0, 2, 2, 0, 0, }}, {Shape{2,3,4,2}, []int{ 4, 3, 2, 1, 1, 2, 0, 1, 1, 1, 1, 3, 1, 0, 0, 2, 2, 1, 0, 4, 2, 2, 3, 1, 1, 1, 0, 2, 0, 0, 2, 2, 1, 4, 0, 1, 4, 1, 1, 0, 4, 3, 1, 1, 2, 3, 1, 1, }}, {Shape{2,3,4,5}, []int{ 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, }}, } ` const argminCorrect = `var argminCorrect = []struct { shape Shape data []int }{ {Shape{3,4,5,2}, []int{ 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, }}, {Shape{2,4,5,2}, []int{ 2, 1, 0, 0, 1, 2, 1, 2, 1, 2, 1, 0, 0, 2, 1, 0, 1, 2, 0, 1, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 2, 1, 0, 1, 0, 2, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 2, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 2, 2, 0, 0, 1, 0, 2, 2, 0, 0, 0, 1, 2, 2, 2, 2, 1, 1, }}, {Shape{2,3,5,2}, []int{ 0, 1, 0, 2, 2, 1, 3, 2, 3, 2, 1, 0, 3, 3, 0, 1, 0, 3, 0, 2, 0, 1, 0, 1, 3, 0, 2, 1, 0, 0, 3, 1, 3, 1, 2, 2, 1, 2, 0, 1, 3, 0, 1, 0, 1, 0, 2, 1, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 1, }}, {Shape{2,3,4,2}, []int{ 1, 0, 0, 0, 2, 3, 4, 0, 3, 0, 3, 0, 4, 4, 3, 1, 0, 2, 3, 0, 3, 0, 0, 2, 4, 4, 3, 4, 2, 3, 0, 0, 4, 0, 1, 3, 3, 2, 0, 4, 2, 1, 4, 2, 4, 0, 2, 0, }}, {Shape{2,3,4,5}, []int{ 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, }}, } ` type ArgMethodTest struct { Kind reflect.Kind ArgMethod string ArgAllAxes int } const testArgMethodsRaw = `func TestDense_{{title .ArgMethod}}_{{short .Kind}}(t *testing.T){ assert := assert.New(t) var T, {{.ArgMethod}} *Dense var err error T = basicDense{{short .Kind}}.Clone().(*Dense) for i:= 0; i < T.Dims(); i++ { if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(i); err != nil { t.Error(err) continue } assert.True({{.ArgMethod}}Correct[i].shape.Eq({{.ArgMethod}}.Shape()), "{{title .ArgMethod}}(%d) error. Want shape %v. Got %v", i, {{.ArgMethod}}Correct[i].shape) assert.Equal({{.ArgMethod}}Correct[i].data, {{.ArgMethod}}.Data(), "{{title .ArgMethod}}(%d) error. ", i) } // test all axes if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Error(err) return } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{.ArgAllAxes}}, {{.ArgMethod}}.ScalarValue()) {{if hasPrefix .Kind.String "float" -}} // test with NaN T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,2,{{mathPkg .Kind}}NaN(), 4})) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal(2, {{.ArgMethod}}.ScalarValue(), "NaN test") // test with Mask and Nan T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,{{if eq .ArgMethod "argmax"}}9{{else}}-9{{end}},{{mathPkg .Kind}}NaN(), 4}, []bool{false,true,true,false})) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{if eq .ArgMethod "argmin"}}0{{else}}3{{end}}, {{.ArgMethod}}.ScalarValue(), "Masked NaN test") // test with +Inf T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,2,{{mathPkg .Kind}}Inf(1),4})) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with +Inf: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{if eq .ArgMethod "argmax"}}2{{else}}0{{end}}, {{.ArgMethod}}.ScalarValue(), "+Inf test") // test with Mask and +Inf T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,{{if eq .ArgMethod "argmax"}}9{{else}}-9{{end}},{{mathPkg .Kind}}Inf(1), 4}, []bool{false,true,true,false})) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{if eq .ArgMethod "argmin"}}0{{else}}3{{end}}, {{.ArgMethod}}.ScalarValue(), "Masked NaN test") // test with -Inf T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,2,{{mathPkg .Kind}}Inf(-1),4 })) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with -Inf: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{if eq .ArgMethod "argmin"}}2{{else}}3{{end}}, {{.ArgMethod}}.ScalarValue(), "+Inf test") // test with Mask and -Inf T = New(WithShape(4), WithBacking([]{{asType .Kind}}{1,{{if eq .ArgMethod "argmax"}}9{{else}}-9{{end}},{{mathPkg .Kind}}Inf(-1), 4}, []bool{false,true,true,false})) if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(AllAxes); err != nil { t.Errorf("Failed test with NaN: %v", err) } assert.True({{.ArgMethod}}.IsScalar()) assert.Equal({{if eq .ArgMethod "argmin"}}0{{else}}3{{end}}, {{.ArgMethod}}.ScalarValue(), "Masked -Inf test") {{end -}} // with different engine T = basicDense{{short .Kind}}.Clone().(*Dense) WithEngine(dummyEngine2{})(T) for i:= 0; i < T.Dims(); i++ { if {{.ArgMethod}}, err = T.{{title .ArgMethod}}(i); err != nil { t.Error(err) continue } assert.True({{.ArgMethod}}Correct[i].shape.Eq({{.ArgMethod}}.Shape()), "{{title .ArgMethod}}(%d) error. Want shape %v. Got %v", i, {{.ArgMethod}}Correct[i].shape) assert.Equal({{.ArgMethod}}Correct[i].data, {{.ArgMethod}}.Data(), "{{title .ArgMethod}}(%d) error. ", i) } // idiotsville _, err = T.{{title .ArgMethod}}(10000) assert.NotNil(err) } ` var ( argMethodsData *template.Template testArgMethods *template.Template ) func init() { argMethodsData = template.Must(template.New("argmethodsData").Funcs(funcs).Parse(argMethodsDataRaw)) testArgMethods = template.Must(template.New("testArgMethod").Funcs(funcs).Parse(testArgMethodsRaw)) } func generateArgmethodsTests(f io.Writer, generic Kinds) { fmt.Fprintf(f, "/* Test data */\n\n") for _, k := range generic.Kinds { if isNumber(k) && isOrd(k) { op := ArgMethodTestData{k, data} argMethodsData.Execute(f, op) } } fmt.Fprintf(f, "\n%s\n%s\n", argmaxCorrect, argminCorrect) for _, k := range generic.Kinds { if isNumber(k) && isOrd(k) { op := ArgMethodTest{k, "argmax", 7} testArgMethods.Execute(f, op) op = ArgMethodTest{k, "argmin", 11} testArgMethods.Execute(f, op) } } } tensor-0.9.24/genlib2/dense_arith.go000066400000000000000000000031601426512615100172610ustar00rootroot00000000000000package main import ( "fmt" "io" ) type DenseBinOp struct { MethodName string Name string Scalar bool } func (fn *DenseBinOp) Write(w io.Writer) { type tmp struct { Left, Right string } var ds tmp ds.Left = "t" ds.Right = "other" name := fn.MethodName if fn.Scalar { name += "Scalar" } if tmpl, ok := arithDocStrings[name]; ok { tmpl.Execute(w, ds) } if tmpl, ok := cmpDocStrings[name]; ok { tmpl.Execute(w, ds) } if fn.Scalar { fmt.Fprintf(w, "func (t *Dense) %sScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {\n", fn.MethodName) denseArithScalarBody.Execute(w, fn) } else { fmt.Fprintf(w, "func (t *Dense) %s(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {\n", fn.MethodName) denseArithBody.Execute(w, fn) } w.Write([]byte("}\n\n")) } func generateDenseArith(f io.Writer, ak Kinds) { var methods []*DenseBinOp for _, bo := range arithBinOps { meth := &DenseBinOp{ MethodName: bo.Name(), Name: bo.Name(), } methods = append(methods, meth) } for _, meth := range methods { meth.Write(f) meth.Scalar = true } for _, meth := range methods { meth.Write(f) } } func generateDenseCmp(f io.Writer, ak Kinds) { var methods []*DenseBinOp for _, cbo := range cmpBinOps { methName := cbo.Name() if methName == "Eq" || methName == "Ne" { methName = "El" + cbo.Name() } meth := &DenseBinOp{ MethodName: methName, Name: cbo.Name(), } methods = append(methods, meth) } for _, meth := range methods { meth.Write(f) meth.Scalar = true } for _, meth := range methods { meth.Write(f) } } tensor-0.9.24/genlib2/dense_compat.go000066400000000000000000000225131426512615100174400ustar00rootroot00000000000000package main import ( "io" "text/template" ) const importsArrowRaw = `import ( arrowArray "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/bitutil" arrowTensor "github.com/apache/arrow/go/arrow/tensor" arrow "github.com/apache/arrow/go/arrow" ) ` const conversionsRaw = `func convFromFloat64s(to Dtype, data []float64) interface{} { switch to { {{range .Kinds -}} {{if isNumber . -}} case {{reflectKind .}}: {{if eq .String "float64" -}} retVal := make([]float64, len(data)) copy(retVal, data) return retVal {{else if eq .String "float32" -}} retVal := make([]float32, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = math32.NaN() case math.IsInf(v, 1): retVal[i] = math32.Inf(1) case math.IsInf(v, -1): retVal[i] = math32.Inf(-1) default: retVal[i] = float32(v) } } return retVal {{else if eq .String "complex64" -}} retVal := make([]complex64, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = complex64(cmplx.NaN()) case math.IsInf(v, 0): retVal[i] = complex64(cmplx.Inf()) default: retVal[i] = complex(float32(v), float32(0)) } } return retVal {{else if eq .String "complex128" -}} retVal := make([]complex128, len(data)) for i, v := range data { switch { case math.IsNaN(v): retVal[i] = cmplx.NaN() case math.IsInf(v, 0): retVal[i] = cmplx.Inf() default: retVal[i] = complex(v, float64(0)) } } return retVal {{else -}} retVal := make([]{{asType .}}, len(data)) for i, v :=range data{ switch { case math.IsNaN(v), math.IsInf(v, 0): retVal[i] = 0 default: retVal[i] = {{asType .}}(v) } } return retVal {{end -}} {{end -}} {{end -}} default: panic("Unsupported Dtype") } } func convToFloat64s(t *Dense) (retVal []float64){ retVal = make([]float64, t.len()) switch t.t{ {{range .Kinds -}} {{if isNumber . -}} case {{reflectKind .}}: {{if eq .String "float64" -}} return t.{{sliceOf .}} {{else if eq .String "float32" -}} for i, v := range t.{{sliceOf .}} { switch { case math32.IsNaN(v): retVal[i] = math.NaN() case math32.IsInf(v, 1): retVal[i] = math.Inf(1) case math32.IsInf(v, -1): retVal[i] = math.Inf(-1) default: retVal[i] = float64(v) } } {{else if eq .String "complex64" -}} for i, v := range t.{{sliceOf .}} { switch { case cmplx.IsNaN(complex128(v)): retVal[i] = math.NaN() case cmplx.IsInf(complex128(v)): retVal[i] = math.Inf(1) default: retVal[i] = float64(real(v)) } } {{else if eq .String "complex128" -}} for i, v := range t.{{sliceOf .}} { switch { case cmplx.IsNaN(v): retVal[i] = math.NaN() case cmplx.IsInf(v): retVal[i] = math.Inf(1) default: retVal[i] = real(v) } } {{else -}} for i, v := range t.{{sliceOf .}} { retVal[i]= float64(v) } {{end -}} return retVal {{end -}} {{end -}} default: panic(fmt.Sprintf("Cannot convert *Dense of %v to []float64", t.t)) } } func convToFloat64(x interface{}) float64 { switch xt := x.(type) { {{range .Kinds -}} {{if isNumber . -}} case {{asType .}}: {{if eq .String "float64 -"}} return xt {{else if eq .String "complex64" -}} return float64(real(xt)) {{else if eq .String "complex128" -}} return real(xt) {{else -}} return float64(xt) {{end -}} {{end -}} {{end -}} default: panic("Cannot convert to float64") } } ` const compatRaw = `// FromMat64 converts a *"gonum/matrix/mat64".Dense into a *tensorf64.Tensor. func FromMat64(m *mat.Dense, opts ...FuncOpt) *Dense { r, c := m.Dims() fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) toCopy := fo.Safe() as := fo.As() if as.Type == nil { as = Float64 } switch as.Kind() { {{range .Kinds -}} {{if isNumber . -}} case reflect.{{reflectKind .}}: {{if eq .String "float64" -}} var backing []float64 if toCopy { backing = make([]float64, len(m.RawMatrix().Data)) copy(backing, m.RawMatrix().Data) } else { backing = m.RawMatrix().Data } {{else -}} backing := convFromFloat64s({{asType . | title}}, m.RawMatrix().Data).([]{{asType .}}) {{end -}} retVal := New(WithBacking(backing), WithShape(r, c)) return retVal {{end -}} {{end -}} default: panic(fmt.Sprintf("Unsupported Dtype - cannot convert float64 to %v", as)) } panic("Unreachable") } // ToMat64 converts a *Dense to a *mat.Dense. All the values are converted into float64s. // This function will only convert matrices. Anything *Dense with dimensions larger than 2 will cause an error. func ToMat64(t *Dense, opts ...FuncOpt) (retVal *mat.Dense, err error) { // checks: if !t.IsNativelyAccessible() { return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Data is inaccessible") } if !t.IsMatrix() { // error return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape()) } fo := ParseFuncOpts(opts...) defer returnOpOpt(fo) toCopy := fo.Safe() // fix dims r := t.Shape()[0] c := t.Shape()[1] var data []float64 switch { case t.t == Float64 && toCopy && !t.IsMaterializable(): data = make([]float64, t.len()) copy(data, t.Float64s()) case !t.IsMaterializable(): data = convToFloat64s(t) default: it := newFlatIterator(&t.AP) var next int for next, err = it.Next(); err == nil; next, err = it.Next() { if err = handleNoOp(err); err != nil { return } data = append(data, convToFloat64(t.Get(next))) } err = nil } retVal = mat.NewDense(r, c, data) return } ` type ArrowData struct { BinaryTypes []string FixedWidthTypes []string PrimitiveTypes []string } const compatArrowArrayRaw = `// FromArrowArray converts an "arrow/array".Interface into a Tensor of matching DataType. func FromArrowArray(a arrowArray.Interface) *Dense { a.Retain() defer a.Release() r := a.Len() // TODO(poopoothegorilla): instead of creating bool ValidMask maybe // bitmapBytes can be used from arrow API mask := make([]bool, r) for i := 0; i < r; i++ { mask[i] = a.IsNull(i) } switch a.DataType() { {{range .BinaryTypes -}} case arrow.BinaryTypes.{{.}}: {{if eq . "String" -}} backing := make([]string, r) for i := 0; i < r; i++ { backing[i] = a.(*arrowArray.{{.}}).Value(i) } {{else -}} backing := a.(*arrowArray.{{.}}).{{.}}Values() {{end -}} retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal {{end -}} {{range .FixedWidthTypes -}} case arrow.FixedWidthTypes.{{.}}: {{if eq . "Boolean" -}} backing := make([]bool, r) for i := 0; i < r; i++ { backing[i] = a.(*arrowArray.{{.}}).Value(i) } {{else -}} backing := a.(*arrowArray.{{.}}).{{.}}Values() {{end -}} retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal {{end -}} {{range .PrimitiveTypes -}} case arrow.PrimitiveTypes.{{.}}: backing := a.(*arrowArray.{{.}}).{{.}}Values() retVal := New(WithBacking(backing, mask), WithShape(r, 1)) return retVal {{end -}} default: panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType())) } panic("Unreachable") } ` const compatArrowTensorRaw = `// FromArrowTensor converts an "arrow/tensor".Interface into a Tensor of matching DataType. func FromArrowTensor(a arrowTensor.Interface) *Dense { a.Retain() defer a.Release() if !a.IsContiguous() { panic("Non-contiguous data is Unsupported") } var shape []int for _, val := range a.Shape() { shape = append(shape, int(val)) } l := a.Len() validMask := a.Data().Buffers()[0].Bytes() dataOffset := a.Data().Offset() mask := make([]bool, l) for i := 0; i < l; i++ { mask[i] = len(validMask) != 0 && bitutil.BitIsNotSet(validMask, dataOffset+i) } switch a.DataType() { {{range .PrimitiveTypes -}} case arrow.PrimitiveTypes.{{.}}: backing := a.(*arrowTensor.{{.}}).{{.}}Values() if a.IsColMajor() { return New(WithShape(shape...), AsFortran(backing, mask)) } return New(WithShape(shape...), WithBacking(backing, mask)) {{end -}} default: panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType())) } panic("Unreachable") } ` var ( importsArrow *template.Template conversions *template.Template compats *template.Template compatsArrowArray *template.Template compatsArrowTensor *template.Template ) func init() { importsArrow = template.Must(template.New("imports_arrow").Funcs(funcs).Parse(importsArrowRaw)) conversions = template.Must(template.New("conversions").Funcs(funcs).Parse(conversionsRaw)) compats = template.Must(template.New("compat").Funcs(funcs).Parse(compatRaw)) compatsArrowArray = template.Must(template.New("compat_arrow_array").Funcs(funcs).Parse(compatArrowArrayRaw)) compatsArrowTensor = template.Must(template.New("compat_arrow_tensor").Funcs(funcs).Parse(compatArrowTensorRaw)) } func generateDenseCompat(f io.Writer, generic Kinds) { // NOTE(poopoothegorilla): an alias is needed for the Arrow Array pkg to prevent naming // collisions importsArrow.Execute(f, generic) conversions.Execute(f, generic) compats.Execute(f, generic) arrowData := ArrowData{ BinaryTypes: arrowBinaryTypes, FixedWidthTypes: arrowFixedWidthTypes, PrimitiveTypes: arrowPrimitiveTypes, } compatsArrowArray.Execute(f, arrowData) compatsArrowTensor.Execute(f, arrowData) } tensor-0.9.24/genlib2/dense_compat_tests.go000066400000000000000000000207221426512615100206620ustar00rootroot00000000000000package main import ( "io" "text/template" ) const compatTestsRaw = `var toMat64Tests = []struct{ data interface{} sliced interface{} shape Shape dt Dtype }{ {{range .Kinds -}} {{if isNumber . -}} { Range({{asType . | title | strip}}, 0, 6), []{{asType .}}{0,1,3,4}, Shape{2,3}, {{asType . | title | strip}} }, {{end -}} {{end -}} } func TestToMat64(t *testing.T){ assert := assert.New(t) for i, tmt := range toMat64Tests { T := New(WithBacking(tmt.data), WithShape(tmt.shape...)) var m *mat.Dense var err error if m, err = ToMat64(T); err != nil { t.Errorf("ToMat basic test %d failed : %v", i, err) continue } conv := anyToFloat64s(tmt.data) assert.Equal(conv, m.RawMatrix().Data, "i %d from %v", i, tmt.dt) if T, err = sliceDense(T, nil, makeRS(0, 2)); err != nil{ t.Errorf("Slice failed %v", err) continue } if m, err = ToMat64(T); err != nil { t.Errorf("ToMat of slice test %d failed : %v", i, err) continue } conv = anyToFloat64s(tmt.sliced) assert.Equal(conv, m.RawMatrix().Data, "sliced test %d from %v", i, tmt.dt) t.Logf("Done") if tmt.dt == Float64 { T = New(WithBacking(tmt.data), WithShape(tmt.shape...)) if m, err = ToMat64(T, UseUnsafe()); err != nil { t.Errorf("ToMat64 unsafe test %d failed: %v", i, err) } conv = anyToFloat64s(tmt.data) assert.Equal(conv, m.RawMatrix().Data, "float64 unsafe i %d from %v", i, tmt.dt) conv[0] = 1000 assert.Equal(conv, m.RawMatrix().Data,"float64 unsafe i %d from %v", i, tmt.dt) conv[0] = 0 // reset for future tests that use the same backing } } // idiocy test T := New(Of(Float64), WithShape(2,3,4)) _, err := ToMat64(T) if err == nil { t.Error("Expected an error when trying to convert a 3-T to *mat.Dense") } } func TestFromMat64(t *testing.T){ assert := assert.New(t) var m *mat.Dense var T *Dense var backing []float64 for i, tmt := range toMat64Tests { backing = Range(Float64, 0, 6).([]float64) m = mat.NewDense(2, 3, backing) T = FromMat64(m) conv := anyToFloat64s(tmt.data) assert.Equal(conv, T.Float64s(), "test %d: []float64 from %v", i, tmt.dt) assert.True(T.Shape().Eq(tmt.shape)) T = FromMat64(m, As(tmt.dt)) assert.Equal(tmt.data, T.Data()) assert.True(T.Shape().Eq(tmt.shape)) if tmt.dt == Float64{ backing = Range(Float64, 0, 6).([]float64) m = mat.NewDense(2, 3, backing) T = FromMat64(m, UseUnsafe()) assert.Equal(backing, T.Float64s()) assert.True(T.Shape().Eq(tmt.shape)) backing[0] = 1000 assert.Equal(backing, T.Float64s(), "test %d - unsafe float64", i) } } } ` const compatArrowArrayTestsRaw = `var toArrowArrayTests = []struct{ data interface{} valid []bool dt arrow.DataType shape Shape }{ {{range .PrimitiveTypes -}} { data: Range({{.}}, 0, 6), valid: []bool{true, true, true, false, true, true}, dt: arrow.PrimitiveTypes.{{ . }}, shape: Shape{6,1}, }, {{end -}} } func TestFromArrowArray(t *testing.T){ assert := assert.New(t) var T *Dense pool := memory.NewGoAllocator() for i, taat := range toArrowArrayTests { var m arrowArray.Interface switch taat.dt { {{range .BinaryTypes -}} case arrow.BinaryTypes.{{ . }}: b := arrowArray.New{{ . }}Builder(pool) defer b.Release() b.AppendValues( {{if eq . "String" -}} []string{"0", "1", "2", "3", "4", "5"}, {{else -}} Range({{ . }}, 0, 6).([]{{lower . }}), {{end -}} taat.valid, ) m = b.NewArray() defer m.Release() {{end -}} {{range .FixedWidthTypes -}} case arrow.FixedWidthTypes.{{ . }}: b := arrowArray.New{{ . }}Builder(pool) defer b.Release() b.AppendValues( {{if eq . "Boolean" -}} []bool{true, false, true, false, true, false}, {{else -}} Range({{ . }}, 0, 6).([]{{lower . }}), {{end -}} taat.valid, ) m = b.NewArray() defer m.Release() {{end -}} {{range .PrimitiveTypes -}} case arrow.PrimitiveTypes.{{ . }}: b := arrowArray.New{{ . }}Builder(pool) defer b.Release() b.AppendValues( Range({{ . }}, 0, 6).([]{{lower . }}), taat.valid, ) m = b.NewArray() defer m.Release() {{end -}} default: t.Errorf("DataType not supported in tests: %v", taat.dt) } T = FromArrowArray(m) switch taat.dt { {{range .PrimitiveTypes -}} case arrow.PrimitiveTypes.{{ . }}: conv := taat.data.([]{{lower . }}) assert.Equal(conv, T.{{ . }}s(), "test %d: []{{lower . }} from %v", i, taat.dt) {{end -}} default: t.Errorf("DataType not supported in tests: %v", taat.dt) } for i, invalid := range T.Mask() { assert.Equal(taat.valid[i], !invalid) } assert.True(T.Shape().Eq(taat.shape)) } } ` const compatArrowTensorTestsRaw = `var toArrowTensorTests = []struct{ rowMajorData interface{} colMajorData interface{} rowMajorValid []bool colMajorValid []bool dt arrow.DataType shape Shape }{ {{range .PrimitiveTypes -}} { rowMajorData: []{{lower .}}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, colMajorData: []{{lower .}}{1, 6, 2, 7, 3, 8, 4, 9, 5, 10}, rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false}, colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false}, dt: arrow.PrimitiveTypes.{{ . }}, shape: Shape{2,5}, }, {{end -}} } func TestFromArrowTensor(t *testing.T){ assert := assert.New(t) var rowMajorT *Dense var colMajorT *Dense pool := memory.NewGoAllocator() for i, taat := range toArrowTensorTests { var rowMajorArr arrowArray.Interface var colMajorArr arrowArray.Interface var rowMajor arrowTensor.Interface var colMajor arrowTensor.Interface switch taat.dt { {{range .PrimitiveTypes -}} case arrow.PrimitiveTypes.{{ . }}: b := arrowArray.New{{ . }}Builder(pool) defer b.Release() b.AppendValues( []{{lower . }}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) rowMajorArr = b.NewArray() defer rowMajorArr.Release() b.AppendValues( []{{lower .}}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, taat.rowMajorValid, ) colMajorArr = b.NewArray() defer colMajorArr.Release() rowMajor = arrowTensor.New{{.}}(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"}) defer rowMajor.Release() colMajor = arrowTensor.New{{.}}(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.{{ . }}SizeBytes), int64(arrow.{{ . }}SizeBytes * 2)}, []string{"x", "y"}) defer colMajor.Release() {{end -}} default: t.Errorf("DataType not supported in tests: %v", taat.dt) } rowMajorT = FromArrowTensor(rowMajor) colMajorT = FromArrowTensor(colMajor) assert.Equal(taat.rowMajorData, rowMajorT.Data(), "test %d: row major %v", i, taat.dt) assert.Equal(len(taat.rowMajorValid), len(rowMajorT.Mask()), "test %d: row major %v mask length incorrect", i, taat.dt) for i, invalid := range rowMajorT.Mask() { assert.Equal(taat.rowMajorValid[i], !invalid, "test %d: row major %v mask value incorrect", i, taat.dt) } assert.True(colMajorT.Shape().Eq(taat.shape)) assert.Equal(taat.colMajorData, colMajorT.Data(), "test %d: column major %v", i, taat.dt) assert.Equal(len(taat.colMajorValid), len(colMajorT.Mask()), "test %d: column major %v mask length incorrect", i, taat.dt) for i, invalid := range colMajorT.Mask() { assert.Equal(taat.colMajorValid[i], !invalid, "test %d: column major %v mask value incorrect", i, taat.dt) } assert.True(rowMajorT.Shape().Eq(taat.shape)) } } ` var ( compatTests *template.Template compatArrowArrayTests *template.Template compatArrowTensorTests *template.Template ) func init() { compatTests = template.Must(template.New("testCompat").Funcs(funcs).Parse(compatTestsRaw)) compatArrowArrayTests = template.Must(template.New("testArrowArrayCompat").Funcs(funcs).Parse(compatArrowArrayTestsRaw)) compatArrowTensorTests = template.Must(template.New("testArrowTensorCompat").Funcs(funcs).Parse(compatArrowTensorTestsRaw)) } func generateDenseCompatTests(f io.Writer, generic Kinds) { // NOTE(poopoothegorilla): an alias is needed for the Arrow Array pkg to prevent naming // collisions importsArrow.Execute(f, generic) compatTests.Execute(f, generic) arrowData := ArrowData{ BinaryTypes: arrowBinaryTypes, FixedWidthTypes: arrowFixedWidthTypes, PrimitiveTypes: arrowPrimitiveTypes, } compatArrowArrayTests.Execute(f, arrowData) compatArrowTensorTests.Execute(f, arrowData) } tensor-0.9.24/genlib2/dense_cons.go000066400000000000000000000042361426512615100171210ustar00rootroot00000000000000package main import ( "io" "text/template" ) const onesRaw = `// Ones creates a *Dense with the provided shape and type func Ones(dt Dtype, shape ...int) *Dense { d := recycledDense(dt, shape) switch d.t.Kind() { {{range .Kinds -}} {{if isNumber . -}} case reflect.{{reflectKind .}}: d.Memset({{asType .}}(1)) {{end -}} {{end -}} case reflect.Bool: d.Memset(true) default: // TODO: add a Oner interface } return d } ` const Iraw = `// I creates the identity matrix (usually a square) matrix with 1s across the diagonals, and zeroes elsewhere, like so: // Matrix(4,4) // ⎡1 0 0 0⎤ // ⎢0 1 0 0⎥ // ⎢0 0 1 0⎥ // ⎣0 0 0 1⎦ // While technically an identity matrix is a square matrix, in attempt to keep feature parity with Numpy, // the I() function allows you to create non square matrices, as well as an index to start the diagonals. // // For example: // T = I(Float64, 4, 4, 1) // Yields: // ⎡0 1 0 0⎤ // ⎢0 0 1 0⎥ // ⎢0 0 0 1⎥ // ⎣0 0 0 0⎦ // // The index k can also be a negative number: // T = I(Float64, 4, 4, -1) // Yields: // ⎡0 0 0 0⎤ // ⎢1 0 0 0⎥ // ⎢0 1 0 0⎥ // ⎣0 0 1 0⎦ func I(dt Dtype, r, c, k int) *Dense{ ret := New(Of(dt), WithShape(r,c)) i := k if k < 0 { i = (-k) * c } var s *Dense var err error end := c - k if end > r { s, err = sliceDense(ret, nil) } else { s, err = sliceDense(ret, rs{0, end, 1}) } if err != nil { panic(err) } var nexts []int iter := newFlatIterator(&s.AP) nexts, err = iter.Slice(rs{i, s.Size(), c + 1}) switch s.t.Kind() { {{range .Kinds -}} {{if isNumber . -}} case reflect.{{reflectKind .}}: data := s.{{sliceOf .}} for _, v := range nexts { data[v] = 1 } {{end -}} {{end -}} } // TODO: create Oner interface for custom types return ret } ` var ( ones *template.Template eye *template.Template ) func init() { ones = template.Must(template.New("ones").Funcs(funcs).Parse(onesRaw)) eye = template.Must(template.New("eye").Funcs(funcs).Parse(Iraw)) } func generateDenseConstructionFns(f io.Writer, generic Kinds) { ones.Execute(f, generic) eye.Execute(f, generic) } tensor-0.9.24/genlib2/dense_cons_tests.go000066400000000000000000000055411426512615100203430ustar00rootroot00000000000000package main import ( "io" "text/template" ) const onesTestsRaw = `var onesTests = []struct { of Dtype shape Shape correct interface{} }{ {{range .Kinds -}} {{if isNumber . -}} { {{asType . | title | strip}}, ScalarShape(), {{asType .}}(1)}, { {{asType . | title | strip}}, Shape{2,2}, []{{asType .}}{1,1,1,1}}, {{end -}} {{end -}} {Bool, ScalarShape(), true}, {Bool, Shape{2,2}, []bool{true, true, true, true}}, } func TestOnes(t *testing.T){ assert := assert.New(t) for _, ot := range onesTests{ T := Ones(ot.of, ot.shape...) assert.True(ot.shape.Eq(T.Shape())) assert.Equal(ot.correct, T.Data()) } } ` const eyeTestsRaw = `// yes, it's a pun on eye tests, stop asking and go see your optometrist var eyeTests = []struct{ E Dtype R, C, K int correct interface{} }{ {{range .Kinds -}} {{if isNumber . -}} { {{asType . | title | strip}}, 4,4, 0, []{{asType .}}{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}}, { {{asType . | title | strip}}, 4,4, 1, []{{asType .}}{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, { {{asType . | title | strip}}, 4,4, 2, []{{asType .}}{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}}, { {{asType . | title | strip}}, 4,4, 3, []{{asType .}}{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, { {{asType . | title | strip}}, 4,4, 4, []{{asType .}}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, { {{asType . | title | strip}}, 4,4, -1, []{{asType .}}{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}}, { {{asType . | title | strip}}, 4,4, -2, []{{asType .}}{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}}, { {{asType . | title | strip}}, 4,4, -3, []{{asType .}}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}}, { {{asType . | title | strip}}, 4,4, -4, []{{asType .}}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, { {{asType . | title | strip}}, 4,5, 0, []{{asType .}}{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}}, { {{asType . | title | strip}}, 4,5, 1, []{{asType .}}{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}}, { {{asType . | title | strip}}, 4,5, -1, []{{asType .}}{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}}, {{end -}} {{end -}} } func TestI(t *testing.T){ assert := assert.New(t) var T Tensor for i, it := range eyeTests { T = I(it.E, it.R, it.C, it.K) assert.True(Shape{it.R, it.C}.Eq(T.Shape())) assert.Equal(it.correct, T.Data(), "Test %d-R: %d, C: %d K: %d", i, it.R, it.C, it.K) } } ` var ( onesTests *template.Template eyeTests *template.Template ) func init() { onesTests = template.Must(template.New("onesTest").Funcs(funcs).Parse(onesTestsRaw)) eyeTests = template.Must(template.New("eyeTest").Funcs(funcs).Parse(eyeTestsRaw)) } func generateDenseConsTests(f io.Writer, generic Kinds) { onesTests.Execute(f, generic) eyeTests.Execute(f, generic) } tensor-0.9.24/genlib2/dense_getset.go000066400000000000000000000057531426512615100174570ustar00rootroot00000000000000package main import ( "io" "text/template" ) const copySlicedRaw = `func copySliced(dest *Dense, dstart, dend int, src *Dense, sstart, send int) int{ if dest.t != src.t { panic("Cannot copy arrays of different types") } if src.IsMasked(){ mask:=dest.mask if cap(dest.mask) < dend{ mask = make([]bool, dend) } copy(mask, dest.mask) dest.mask=mask copy(dest.mask[dstart:dend], src.mask[sstart:send]) } switch dest.t { {{range .Kinds -}} {{if isParameterized .}} {{else -}} case {{reflectKind .}}: return copy(dest.{{sliceOf .}}[dstart:dend], src.{{sliceOf .}}[sstart:send]) {{end -}} {{end -}} default: dv := reflect.ValueOf(dest.v) dv = dv.Slice(dstart, dend) sv := reflect.ValueOf(src.v) sv = sv.Slice(sstart, send) return reflect.Copy(dv, sv) } } ` const copyIterRaw = `func copyDenseIter(dest, src *Dense, diter, siter *FlatIterator) (int, error) { if dest.t != src.t { panic("Cannot copy arrays of different types") } if diter == nil && siter == nil && !dest.IsMaterializable() && !src.IsMaterializable() { return copyDense(dest, src), nil } if diter == nil { diter = newFlatIterator(&dest.AP) } if siter == nil { siter = newFlatIterator(&src.AP) } isMasked:= src.IsMasked() if isMasked{ if cap(dest.mask) 3 && a.Dtype() != Bool { t.Errorf("a %v", a.Data()) t.Errorf("q %v", q.Data()) t.Error("Expected *Dense to be not equal") return false } return true } if err := quick.Check(eqFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Failed to perform equality checks") } }` var ( GetTest *template.Template MemsetTest *template.Template ZeroTest *template.Template ) func init() { GetTest = template.Must(template.New("GetTest").Funcs(funcs).Parse(getTestRaw)) MemsetTest = template.Must(template.New("MemsetTest").Funcs(funcs).Parse(memsetTestRaw)) ZeroTest = template.Must(template.New("ZeroTest").Funcs(funcs).Parse(zeroTestRaw)) } func generateDenseGetSetTests(f io.Writer, generic Kinds) { tests := makeTests(generic) GetTest.Execute(f, tests) fmt.Fprintf(f, "\n\n") MemsetTest.Execute(f, tests) fmt.Fprintf(f, "\n\n") ZeroTest.Execute(f, makeZeroTests(generic)) fmt.Fprintf(f, "\n%v\n", denseEqTestRaw) } tensor-0.9.24/genlib2/dense_io.go000066400000000000000000000402771426512615100165730ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const writeNpyRaw = ` type binaryWriter struct { io.Writer err error seq int } func (w *binaryWriter) w(x interface{}) { if w.err != nil { return } w.err = binary.Write(w, binary.LittleEndian, x) w.seq++ } func (w *binaryWriter) Err() error { if w.err == nil { return nil } return errors.Wrapf(w.err, "Sequence %d", w.seq) } type binaryReader struct { io.Reader err error seq int } func (r *binaryReader) Read(data interface{}) { if r.err != nil { return } r.err = binary.Read(r.Reader, binary.LittleEndian, data) r.seq++ } func (r *binaryReader) Err() error { if r.err == nil { return nil } return errors.Wrapf(r.err, "Sequence %d", r.seq) } // WriteNpy writes the *Tensor as a numpy compatible serialized file. // // The format is very well documented here: // http://docs.scipy.org/doc/numpy/neps/npy-format.html // // Gorgonia specifically uses Version 1.0, as 65535 bytes should be more than enough for the headers. // The values are written in little endian order, because let's face it - // 90% of the world's computers are running on x86+ processors. // // This method does not close the writer. Closing (if needed) is deferred to the caller // If tensor is masked, invalid values are replaced by the default fill value. func (t *Dense) WriteNpy(w io.Writer) (err error) { var npdt string if npdt, err = t.t.numpyDtype(); err != nil{ return } var header string if t.Dims() == 1 { // when t is a 1D vector, numpy expects "(N,)" instead of "(N)" which t.Shape() returns. header = "{'descr': '<%v', 'fortran_order': False, 'shape': (%d,)}" header = fmt.Sprintf(header, npdt, t.Shape()[0]) } else { header = "{'descr': '<%v', 'fortran_order': False, 'shape': %v}" header = fmt.Sprintf(header, npdt, t.Shape()) } padding := 16 - ((10 + len(header)) % 16) if padding > 0 { header = header + strings.Repeat(" ", padding) } bw := binaryWriter{Writer: w} bw.Write([]byte("\x93NUMPY")) // stupid magic bw.w(byte(1)) // major version bw.w(byte(0)) // minor version bw.w(uint16(len(header))) // 4 bytes to denote header length if err = bw.Err() ; err != nil { return err } bw.Write([]byte(header)) bw.seq = 0 if t.IsMasked(){ fillval:=t.FillValue() it := FlatMaskedIteratorFromDense(t) for i, err := it.Next(); err == nil; i, err = it.Next() { if t.mask[i] { bw.w(fillval) } else{ bw.w(t.Get(i)) } } } else { for i := 0; i < t.len(); i++ { bw.w(t.Get(i)) } } return bw.Err() } ` const writeCSVRaw = `// WriteCSV writes the *Dense to a CSV. It accepts an optional string formatting ("%v", "%f", etc...), which controls what is written to the CSV. // If tensor is masked, invalid values are replaced by the default fill value. func (t *Dense) WriteCSV(w io.Writer, formats ...string) (err error) { // checks: if !t.IsMatrix() { // error err = errors.Errorf("Cannot write *Dense to CSV. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape()) return } format := "%v" if len(formats) > 0{ format = formats[0] } cw := csv.NewWriter(w) it := IteratorFromDense(t) coord := it.Coord() // rows := t.Shape()[0] cols := t.Shape()[1] record := make([]string, 0, cols) var i, k, lastCol int isMasked:=t.IsMasked() fillval:= t.FillValue() fillstr:= fmt.Sprintf(format, fillval) for i, err = it.Next(); err == nil; i, err = it.Next() { record = append(record, fmt.Sprintf(format, t.Get(i))) if isMasked{ if t.mask[i] { record[k]=fillstr } k++ } if lastCol == cols-1 { if err = cw.Write(record); err != nil { // TODO: wrap errors return } cw.Flush() record = record[:0] } // cleanup switch { case t.IsRowVec(): // lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] case t.IsColVec(): // lastRow = coord[len(coord)-1] lastCol = coord[len(coord)-2] case t.IsVector(): lastCol = coord[len(coord)-1] default: // lastRow = coord[len(coord)-2] lastCol = coord[len(coord)-1] } } return nil } ` const gobEncodeRaw = `// GobEncode implements gob.GobEncoder func (t *Dense) GobEncode() (p []byte, err error){ var buf bytes.Buffer encoder := gob.NewEncoder(&buf) if err = encoder.Encode(t.Shape()); err != nil { return } if err = encoder.Encode(t.Strides()); err != nil { return } if err = encoder.Encode(t.AP.o); err != nil { return } if err = encoder.Encode(t.AP.Δ); err != nil { return } if err = encoder.Encode(t.mask); err != nil { return } data := t.Data() if err = encoder.Encode(&data); err != nil { return } return buf.Bytes(), err } ` const gobDecodeRaw = `// GobDecode implements gob.GobDecoder func (t *Dense) GobDecode(p []byte) (err error){ buf := bytes.NewBuffer(p) decoder := gob.NewDecoder(buf) var shape Shape if err = decoder.Decode(&shape); err != nil { return } var strides []int if err = decoder.Decode(&strides); err != nil { return } var o DataOrder var tr Triangle if err = decoder.Decode(&o); err == nil { if err = decoder.Decode(&tr); err != nil { return } } t.AP.Init(shape, strides) t.AP.o = o t.AP.Δ = tr var mask []bool if err = decoder.Decode(&mask); err != nil { return } var data interface{} if err = decoder.Decode(&data); err != nil { return } t.fromSlice(data) t.addMask(mask) t.fix() if t.e == nil { t.e = StdEng{} } return t.sanity() } ` const npyDescRE = `var npyDescRE = regexp.MustCompile(` + "`" + `'descr':` + `\` + `s*'([^']*)'` + "`" + ")" const rowOrderRE = `var rowOrderRE = regexp.MustCompile(` + "`" + `'fortran_order':\s*(False|True)` + "`)" const shapeRE = `var shapeRE = regexp.MustCompile(` + "`" + `'shape':\s*\(([^\(]*)\)` + "`)" const readNpyRaw = `// ReadNpy reads NumPy formatted files into a *Dense func (t *Dense) ReadNpy(r io.Reader) (err error){ br := binaryReader{Reader: r} var magic [6]byte if br.Read(magic[:]); string(magic[:]) != "\x93NUMPY" { return errors.Errorf("Not a numpy file. Got %q as the magic number instead", string(magic[:])) } var version, minor byte if br.Read(&version); version != 1 { return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)") } if br.Read(&minor); minor != 0 { return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)") } var headerLen uint16 br.Read(&headerLen) header := make([]byte, int(headerLen)) br.Read(header) if err = br.Err(); err != nil { return } // extract stuff from header var match [][]byte if match = npyDescRE.FindSubmatch(header); match == nil { return errors.New("No dtype information in npy file") } // TODO: check for endianness. For now we assume everything is little endian if t.t, err = fromNumpyDtype(string(match[1][1:])); err != nil { return } if match = rowOrderRE.FindSubmatch(header); match == nil { return errors.New("No Row Order information found in the numpy file") } if string(match[1]) != "False" { return errors.New("Cannot yet read from Fortran Ordered Numpy files") } if match = shapeRE.FindSubmatch(header); match == nil { return errors.New("No shape information found in npy file") } sizesStr := strings.Split(string(match[1]), ",") var shape Shape for _, s := range sizesStr { s = strings.Trim(s, " ") if len(s) == 0 { break } var size int if size, err = strconv.Atoi(s); err != nil { return } shape = append(shape, size) } size := shape.TotalSize() if t.e == nil { t.e = StdEng{} } t.makeArray(size) switch t.t.Kind() { {{range .Kinds -}} case reflect.{{reflectKind .}}: data := t.{{sliceOf .}} for i := 0; i < size; i++ { br.Read(&data[i]) } {{end -}} } if err = br.Err(); err != nil { return err } t.AP.zeroWithDims(len(shape)) t.setShape(shape...) t.fix() return t.sanity() } ` const readCSVRaw = `// convFromStrs converts a []string to a slice of the Dtype provided. It takes a provided backing slice. // If into is nil, then a backing slice will be created. func convFromStrs(to Dtype, record []string, into interface{}) (interface{}, error) { var err error switch to.Kind() { {{range .Kinds -}} {{if isNumber . -}} {{if isOrd . -}} case reflect.{{reflectKind .}}: retVal := make([]{{asType .}}, len(record)) var backing []{{asType .}} if into == nil { backing = make([]{{asType .}}, 0, len(record)) }else{ backing = into.([]{{asType .}}) } for i, v := range record { {{if eq .String "float64" -}} if retVal[i], err = strconv.ParseFloat(v, 64); err != nil { return nil, err } {{else if eq .String "float32" -}} var f float64 if f, err = strconv.ParseFloat(v, 32); err != nil { return nil, err } retVal[i] = float32(f) {{else if hasPrefix .String "int" -}} var i64 int64 if i64, err = strconv.ParseInt(v, 10, {{bitSizeOf .}}); err != nil { return nil, err } retVal[i] = {{asType .}}(i64) {{else if hasPrefix .String "uint" -}} var u uint64 if u, err = strconv.ParseUint(v, 10, {{bitSizeOf .}}); err != nil { return nil, err } retVal[i] = {{asType .}}(u) {{end -}} } backing = append(backing, retVal...) return backing, nil {{end -}} {{end -}} {{end -}} case reflect.String: var backing []string if into == nil { backing = make([]string, 0, len(record)) }else{ backing = into.([]string) } backing = append(backing, record...) return backing, nil default: return nil,errors.Errorf(methodNYI, "convFromStrs", to) } } // ReadCSV reads a CSV into a *Dense. It will override the underlying data. // // BUG(chewxy): reading CSV doesn't handle CSVs with different columns per row yet. func (t *Dense) ReadCSV(r io.Reader, opts ...FuncOpt) (err error) { fo := ParseFuncOpts(opts...) as := fo.As() if as.Type == nil { as = Float64 } cr := csv.NewReader(r) var record []string var rows, cols int var backing interface{} for { record, err = cr.Read() if err == io.EOF{ break } else if err != nil { return } if backing, err = convFromStrs(as, record, backing); err != nil { return } cols = len(record) rows++ } t.fromSlice(backing) t.AP.zero() t.AP.SetShape(rows, cols) return nil return errors.Errorf("not yet handled") } ` var fbEncodeDecodeRaw = `// FBEncode encodes to a byte slice using flatbuffers. // // Only natively accessible data can be encided func (t *Dense) FBEncode() ([]byte, error) { builder := flatbuffers.NewBuilder(1024) fb.DenseStartShapeVector(builder, len(t.shape)) for i := len(t.shape) - 1; i >= 0; i-- { builder.PrependInt32(int32(t.shape[i])) } shape := builder.EndVector(len(t.shape)) fb.DenseStartStridesVector(builder, len(t.strides)) for i := len(t.strides) - 1; i >= 0; i-- { builder.PrependInt32(int32(t.strides[i])) } strides := builder.EndVector(len(t.strides)) var o uint32 switch { case t.o.IsRowMajor() && t.o.IsContiguous(): o = 0 case t.o.IsRowMajor() && !t.o.IsContiguous(): o = 1 case t.o.IsColMajor() && t.o.IsContiguous(): o = 2 case t.o.IsColMajor() && !t.o.IsContiguous(): o = 3 } var triangle int32 switch t.Δ { case NotTriangle: triangle = fb.TriangleNOT_TRIANGLE case Upper: triangle = fb.TriangleUPPER case Lower: triangle = fb.TriangleLOWER case Symmetric: triangle = fb.TriangleSYMMETRIC } dt := builder.CreateString(t.Dtype().String()) data := t.byteSlice() fb.DenseStartDataVector(builder, len(data)) for i := len(data) - 1; i >= 0; i-- { builder.PrependUint8(data[i]) } databyte := builder.EndVector(len(data)) fb.DenseStart(builder) fb.DenseAddShape(builder, shape) fb.DenseAddStrides(builder, strides) fb.DenseAddO(builder, o) fb.DenseAddT(builder, triangle) fb.DenseAddType(builder, dt) fb.DenseAddData(builder, databyte) serialized := fb.DenseEnd(builder) builder.Finish(serialized) return builder.FinishedBytes(), nil } // FBDecode decodes a byteslice from a flatbuffer table into a *Dense func (t *Dense) FBDecode(buf []byte) error { serialized := fb.GetRootAsDense(buf, 0) o := serialized.O() switch o { case 0: t.o = 0 case 1: t.o = MakeDataOrder(NonContiguous) case 2: t.o = MakeDataOrder(ColMajor) case 3: t.o = MakeDataOrder(ColMajor, NonContiguous) } tri := serialized.T() switch tri { case fb.TriangleNOT_TRIANGLE: t.Δ = NotTriangle case fb.TriangleUPPER: t.Δ = Upper case fb.TriangleLOWER: t.Δ = Lower case fb.TriangleSYMMETRIC: t.Δ = Symmetric } t.shape = Shape(BorrowInts(serialized.ShapeLength())) for i := 0; i < serialized.ShapeLength(); i++ { t.shape[i] = int(int32(serialized.Shape(i))) } t.strides = BorrowInts(serialized.StridesLength()) for i := 0; i < serialized.ShapeLength(); i++ { t.strides[i] = int(serialized.Strides(i)) } typ := string(serialized.Type()) for _, dt := range allTypes.set { if dt.String() == typ { t.t = dt break } } if t.e == nil { t.e = StdEng{} } t.makeArray(t.shape.TotalSize()) // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, serialized.DataBytes()) t.fix() return t.sanity() } ` var pbEncodeDecodeRaw = `// PBEncode encodes the Dense into a protobuf byte slice. func (t *Dense) PBEncode() ([]byte, error) { var toSerialize pb.Dense toSerialize.Shape = make([]int32, len(t.shape)) for i, v := range t.shape { toSerialize.Shape[i] = int32(v) } toSerialize.Strides = make([]int32, len(t.strides)) for i, v := range t.strides { toSerialize.Strides[i] = int32(v) } switch { case t.o.IsRowMajor() && t.o.IsContiguous(): toSerialize.O = pb.RowMajorContiguous case t.o.IsRowMajor() && !t.o.IsContiguous(): toSerialize.O = pb.RowMajorNonContiguous case t.o.IsColMajor() && t.o.IsContiguous(): toSerialize.O = pb.ColMajorContiguous case t.o.IsColMajor() && !t.o.IsContiguous(): toSerialize.O = pb.ColMajorNonContiguous } toSerialize.T = pb.Triangle(t.Δ) toSerialize.Type = t.t.String() data := t.byteSlice() toSerialize.Data = make([]byte, len(data)) copy(toSerialize.Data, data) return toSerialize.Marshal() } // PBDecode unmarshalls a protobuf byteslice into a *Dense. func (t *Dense) PBDecode(buf []byte) error { var toSerialize pb.Dense if err := toSerialize.Unmarshal(buf); err != nil { return err } t.shape = make(Shape, len(toSerialize.Shape)) for i, v := range toSerialize.Shape { t.shape[i] = int(v) } t.strides = make([]int, len(toSerialize.Strides)) for i, v := range toSerialize.Strides { t.strides[i] = int(v) } switch toSerialize.O { case pb.RowMajorContiguous: case pb.RowMajorNonContiguous: t.o = MakeDataOrder(NonContiguous) case pb.ColMajorContiguous: t.o = MakeDataOrder(ColMajor) case pb.ColMajorNonContiguous: t.o = MakeDataOrder(ColMajor, NonContiguous) } t.Δ = Triangle(toSerialize.T) typ := string(toSerialize.Type) for _, dt := range allTypes.set { if dt.String() == typ { t.t = dt break } } if t.e == nil { t.e = StdEng{} } t.makeArray(t.shape.TotalSize()) // allocated data. Now time to actually copy over the data db := t.byteSlice() copy(db, toSerialize.Data) return t.sanity() } ` var ( readNpy *template.Template gobEncode *template.Template gobDecode *template.Template readCSV *template.Template ) func init() { readNpy = template.Must(template.New("readNpy").Funcs(funcs).Parse(readNpyRaw)) readCSV = template.Must(template.New("readCSV").Funcs(funcs).Parse(readCSVRaw)) gobEncode = template.Must(template.New("gobEncode").Funcs(funcs).Parse(gobEncodeRaw)) gobDecode = template.Must(template.New("gobDecode").Funcs(funcs).Parse(gobDecodeRaw)) } func generateDenseIO(f io.Writer, generic Kinds) { mk := Kinds{Kinds: filter(generic.Kinds, isNumber)} fmt.Fprint(f, "/* GOB SERIALIZATION */\n\n") gobEncode.Execute(f, mk) gobDecode.Execute(f, mk) fmt.Fprint(f, "\n") fmt.Fprint(f, "/* NPY SERIALIZATION */\n\n") fmt.Fprintln(f, npyDescRE) fmt.Fprintln(f, rowOrderRE) fmt.Fprintln(f, shapeRE) f.Write([]byte(writeNpyRaw)) readNpy.Execute(f, mk) fmt.Fprint(f, "\n") fmt.Fprint(f, "/* CSV SERIALIZATION */\n\n") f.Write([]byte(writeCSVRaw)) readCSV.Execute(f, mk) fmt.Fprint(f, "\n") fmt.Fprint(f, "/* FB SERIALIZATION */\n\n") fmt.Fprintln(f, fbEncodeDecodeRaw) fmt.Fprint(f, "\n") fmt.Fprint(f, "/* PB SERIALIZATION */\n\n") fmt.Fprintln(f, pbEncodeDecodeRaw) fmt.Fprint(f, "\n") } tensor-0.9.24/genlib2/dense_maskedmethods.go000066400000000000000000000054171426512615100210110ustar00rootroot00000000000000package main import ( "fmt" "io" "reflect" "text/template" ) var maskcmpMethods = []struct { Name string Desc string NumArgs int CmpFn string ReqFloat bool Kinds []reflect.Kind }{ {"MaskedEqual", "equal to ", 1, "a == x", false, nil}, {"MaskedNotEqual", "not equal to ", 1, "a != x", false, nil}, {"MaskedValues", " equal to ", 3, "math.Abs(float64(a-x)) <= delta", true, nil}, {"MaskedGreater", " greater than ", 1, "a > x", false, nil}, {"MaskedGreaterEqual", " greater than or equal to ", 1, "a >= x", false, nil}, {"MaskedLess", " less than ", 1, "a < x", false, nil}, {"MaskedLessEqual", " less than or equal to ", 1, "a <= x", false, nil}, {"MaskedInside", " inside range of ", 2, "(a >= x) && (a <= y)", false, nil}, {"MaskedOutside", " outside range of ", 2, "(a < x) || (a > y)", false, nil}, } const maskCmpMethodRaw = `// {{.Name}} sets the mask to true where the corresponding data is {{.Desc}} val // Any values must be the same type as the tensor func (t *Dense) {{.Name}}({{if ge .NumArgs 1 -}} val1 interface{} {{end}} {{if ge .NumArgs 2 -}} , val2 interface{} {{end}} {{if ge .NumArgs 3 -}} , val3 ...interface{}{{end}})(err error){ {{if .ReqFloat}} if !isFloat(t.t) { err = errors.Errorf("Can only do {{.Name}} with floating point types") return } {{end}} if !t.IsMasked() { t.makeMask() } {{$numargs := .NumArgs}} {{$name := .Name}} {{$fn := .CmpFn}} {{$reqFloat := .ReqFloat}} switch t.t.Kind(){ {{range .Kinds -}} {{if isParameterized . -}} {{else -}} {{if or (not (isOrd .)) (and $reqFloat (isntFloat .)) -}} {{else -}} case reflect.{{reflectKind .}}: data := t.{{sliceOf .}} mask := t.mask {{if ge $numargs 1 -}} x := val1.({{asType .}}) {{end}} {{if ge $numargs 2 -}} y := val2.({{asType .}}){{end}} {{if ge $numargs 3 -}} {{if eq $name "MaskedValues"}} delta := float64(1.0e-8) if len(val3) > 0 { delta = float64(val3[0].({{asType .}})) + float64(y)*math.Abs(float64(x)) } {{else}} z := val3.({{asType .}}) {{end}} {{end}} if t.maskIsSoft{ for i := range data { a := data[i] mask[i] = ({{$fn}}) } } else { for i := range data { a := data[i] mask[i] = mask[i] || ({{$fn}}) } } {{end}} {{end}} {{end}} } return nil } ` var ( maskCmpMethod *template.Template ) func init() { maskCmpMethod = template.Must(template.New("maskcmpmethod").Funcs(funcs).Parse(maskCmpMethodRaw)) } func generateDenseMaskedMethods(f io.Writer, generic Kinds) { for _, mm := range maskcmpMethods { mm.Kinds = generic.Kinds fmt.Fprintf(f, "/* %s */ \n\n", mm.Name) maskCmpMethod.Execute(f, mm) } } tensor-0.9.24/genlib2/dense_maskedmethods_tests.go000066400000000000000000000052601426512615100222270ustar00rootroot00000000000000package main import ( "fmt" "io" "reflect" "text/template" ) type MaskCmpMethodTest struct { Kind reflect.Kind Name string } const testMaskCmpMethodRaw = `func TestDense_{{title .Name}}_{{short .Kind}}(t *testing.T){ assert := assert.New(t) T := New(Of({{reflectKind .Kind}}), WithShape(2, 3, 4, 5)) assert.False(T.IsMasked()) data := T.{{sliceOf .Kind}} for i := range data { {{if eq "string" (asType .Kind) -}} data[i] = fmt.Sprint(i) {{else -}} data[i] = {{asType .Kind}}(i) {{end -}} } {{if eq "string" (asType .Kind) -}} T.MaskedEqual(fmt.Sprint(0)) {{else -}} T.MaskedEqual({{asType .Kind}}(0)) {{end -}} assert.True(T.IsMasked()) {{if eq "string" (asType .Kind) -}} T.MaskedEqual(fmt.Sprint(1)) {{else -}} T.MaskedEqual({{asType .Kind}}(1)) {{end -}} assert.True(T.mask[0] && T.mask[1]) {{if eq "string" (asType .Kind) -}} T.MaskedNotEqual(fmt.Sprint(2)) {{else -}} T.MaskedNotEqual({{asType .Kind}}(2)) {{end -}} assert.False(T.mask[2] && !(T.mask[0])) T.ResetMask() {{if eq "string" (asType .Kind) -}} T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22)) {{else -}} T.MaskedInside({{asType .Kind}}(1), {{asType .Kind}}(22)) {{end -}} assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22]) T.ResetMask() {{if eq "string" (asType .Kind) -}} T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22)) {{else -}} T.MaskedOutside({{asType .Kind}}(1), {{asType .Kind}}(22)) {{end -}} assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22]) T.ResetMask() for i := 0; i < 5; i++ { {{if eq "string" (asType .Kind) -}} T.MaskedEqual(fmt.Sprint(i*10)) {{else -}} T.MaskedEqual({{asType .Kind}}(i*10)) {{end -}} } it := IteratorFromDense(T) j := 0 for _, err := it.Next(); err == nil; _, err = it.Next() { j++ } it.Reset() assert.Equal(120, j) j = 0 for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() { j++ } it.Reset() assert.Equal(115, j) j = 0 for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { j++ } it.Reset() assert.Equal(5,j) } ` var ( testMaskCmpMethod *template.Template ) func init() { testMaskCmpMethod = template.Must(template.New("testmaskcmpmethod").Funcs(funcs).Parse(testMaskCmpMethodRaw)) } func generateMaskCmpMethodsTests(f io.Writer, generic Kinds) { for _, mm := range maskcmpMethods { fmt.Fprintf(f, "/* %s */ \n\n", mm.Name) for _, k := range generic.Kinds { if isOrd(k) { if mm.ReqFloat && isntFloat(k) { } else { op := MaskCmpMethodTest{k, mm.Name} testMaskCmpMethod.Execute(f, op) } } } } } tensor-0.9.24/genlib2/dense_reduction_methods_tests.go000066400000000000000000000131731426512615100231200ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const testDenseSumRaw = `var sumTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {{range .Kinds -}} {{if isNumber . -}} {"common case: T.Sum() for {{.}}", {{asType . | title}}, Shape{2,3}, []int{}, ScalarShape(), {{asType .}}(15)}, {"A.Sum(0) for {{.}}", {{asType . | title}}, Shape{2,3}, []int{0}, Shape{3}, []{{asType .}}{3, 5, 7}}, {"A.Sum(1) for {{.}}", {{asType . | title}}, Shape{2,3},[]int{1}, Shape{2}, []{{asType .}}{3, 12}}, {"A.Sum(0,1) for {{.}}", {{asType . | title}}, Shape{2,3},[]int{0, 1}, ScalarShape(), {{asType .}}(15)}, {"A.Sum(1,0) for {{.}}", {{asType . | title}}, Shape{2,3},[]int{1, 0}, ScalarShape(), {{asType .}}(15)}, {"3T.Sum(1,2) for {{.}}", {{asType . | title}}, Shape{2,3,4}, []int{1,2}, Shape{2}, []{{asType .}}{66, {{if eq .String "int8"}}-46{{else}}210{{end}} }}, {"4T.Sum() for {{.}}", {{asType . | title}}, Shape{2, 2, 2, 2},[]int{}, ScalarShape(), {{asType .}}(120)}, {"4T.Sum(1,3) for {{.}}", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []{{asType .}}{10, 18, 42, 50}}, {"4T.Sum(0, 2, 3) for {{.}}", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []{{asType .}}{44, 76}}, {{end -}} {{end -}} } func TestDense_Sum(t *testing.T){ assert := assert.New(t) var T, T2 *Dense var err error for _, sts := range sumTests { T = New(WithShape(sts.shape...), WithBacking(Range(sts.of, 0, sts.shape.TotalSize()))) if T2, err = T.Sum(sts.along ...); err != nil { t.Error(err) continue } assert.True(sts.correctShape.Eq(T2.Shape())) assert.Equal(sts.correct, T2.Data()) } // idiots _,err =T.Sum(1000) assert.NotNil(err) } ` const testDenseMaxRaw = `var maxTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {{range .Kinds -}} {{if isNumber . -}} {{if isOrd . -}} {"common case: T.Max() for {{.}}", {{asType . | title}}, Shape{2,3}, []int{}, ScalarShape(), {{asType .}}(5)}, {"A.Max(0)", {{asType . | title}}, Shape{2,3},[]int{0}, Shape{3}, []{{asType . }}{3, 4, 5}}, {"A.Max(1)", {{asType . | title}}, Shape{2,3},[]int{1}, Shape{2}, []{{asType . }}{2,5}}, {"A.Max(0,1)", {{asType . | title}}, Shape{2,3},[]int{0, 1}, ScalarShape(), {{asType .}}(5)}, {"A.Max(1,0)", {{asType . | title}}, Shape{2,3},[]int{1, 0}, ScalarShape(), {{asType .}}(5)}, {"3T.Max(1,2)", {{asType . | title}}, Shape{2,3,4}, []int{1,2}, Shape{2}, []{{asType .}}{11, 23} }, {"4T.Max()", {{asType . | title}}, Shape{2, 2, 2, 2},[]int{}, ScalarShape(), {{asType .}}(15)}, {"4T.Max(1,3)", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []{{asType .}}{5, 7, 13, 15}}, {"4T.Max(0, 2, 3)", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []{{asType .}}{11, 15}}, {{end -}} {{end -}} {{end -}} } func TestDense_Max(t *testing.T){ assert := assert.New(t) var T, T2 *Dense var err error for _, mts := range maxTests { T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize()))) if T2, err = T.Max(mts.along...); err != nil{ t.Error(err) continue } assert.True(mts.correctShape.Eq(T2.Shape())) assert.Equal(mts.correct, T2.Data()) } /* IDIOT TESTING TIME */ _, err = T.Max(1000) assert.NotNil(err) } ` const testDenseMinRaw = `var minTests = []struct { name string of Dtype shape Shape along []int correctShape Shape correct interface{} }{ {{range .Kinds -}} {{if isNumber . -}} {{if isOrd . -}} {"common case: T.Min() for {{.}}", {{asType .|title}}, Shape{2,3}, []int{}, ScalarShape(), {{asType .}}(0)}, {"A.Min(0)", {{asType .|title}}, Shape{2,3}, []int{0}, Shape{3}, []{{asType .}}{0, 1, 2}}, {"A.Min(1)", {{asType .|title}}, Shape{2,3}, []int{1}, Shape{2}, []{{asType .}}{0, 3}}, {"A.Min(0,1)", {{asType .|title}}, Shape{2,3}, []int{0, 1}, ScalarShape(), {{asType .}}(0)}, {"A.Min(1,0)", {{asType .|title}}, Shape{2,3}, []int{1, 0}, ScalarShape(), {{asType .}}(0)}, {"3T.Min(1,2)", {{asType . | title}}, Shape{2,3,4}, []int{1,2}, Shape{2}, []{{asType .}}{0,12} }, {"4T.Min()", {{asType . | title}}, Shape{2, 2, 2, 2},[]int{}, ScalarShape(), {{asType .}}(0)}, {"4T.Min(1,3)", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []{{asType .}}{0, 2, 8, 10}}, {"4T.Min(0, 2, 3)", {{asType . | title}}, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []{{asType .}}{0, 4}}, {{end -}} {{end -}} {{end -}} } func TestDense_Min(t *testing.T){ assert := assert.New(t) var T, T2 *Dense var err error for _, mts := range minTests { T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize()))) if T2, err = T.Min(mts.along...); err != nil{ t.Error(err) continue } assert.True(mts.correctShape.Eq(T2.Shape())) assert.Equal(mts.correct, T2.Data()) } /* IDIOT TESTING TIME */ _, err = T.Min(1000) assert.NotNil(err) } ` var ( testDenseSum *template.Template testDenseMax *template.Template testDenseMin *template.Template ) func init() { testDenseSum = template.Must(template.New("testDenseSum").Funcs(funcs).Parse(testDenseSumRaw)) testDenseMax = template.Must(template.New("testDenseMax").Funcs(funcs).Parse(testDenseMaxRaw)) testDenseMin = template.Must(template.New("testDenseMin").Funcs(funcs).Parse(testDenseMinRaw)) } func generateDenseReductionMethodsTests(f io.Writer, generic Kinds) { testDenseSum.Execute(f, generic) fmt.Fprint(f, "\n") testDenseMax.Execute(f, generic) fmt.Fprint(f, "\n") testDenseMin.Execute(f, generic) } tensor-0.9.24/genlib2/dense_reduction_tests.go000066400000000000000000000034011426512615100213660ustar00rootroot00000000000000package main import ( "io" "text/template" ) const testDenseReduceRaw = `var denseReductionTests = []struct { of Dtype fn interface{} def interface{} axis int correct interface{} correctShape Shape }{ {{range .Kinds -}} {{if isNumber . -}} // {{.}} { {{asType . | title}}, execution.Add{{short .}}, {{asType .}}(0), 0, []{{asType .}}{6, 8, 10, 12, 14, 16}, Shape{3,2} }, { {{asType . | title}}, execution.Add{{short .}}, {{asType .}}(0), 1, []{{asType .}}{6, 9, 24, 27}, Shape{2, 2}}, { {{asType . | title}}, execution.Add{{short .}}, {{asType .}}(0), 2, []{{asType .}}{1, 5, 9, 13, 17, 21}, Shape{2, 3}}, {{end -}} {{end -}} } func TestDense_Reduce(t *testing.T){ assert := assert.New(t) for _, drt := range denseReductionTests { T := New(WithShape(2,3,2), WithBacking(Range(drt.of, 0, 2*3*2))) T2, err := T.Reduce(drt.fn, drt.axis, drt.def, ) if err != nil { t.Error(err) continue } assert.True(drt.correctShape.Eq(T2.Shape())) assert.Equal(drt.correct, T2.Data()) // stupids: _, err = T.Reduce(drt.fn, 1000, drt.def,) assert.NotNil(err) // wrong function type var f interface{} f = func(a, b float64)float64{return 0} if drt.of == Float64 { f = func(a, b int)int{return 0} } _, err = T.Reduce(f, 0, drt.correct) assert.NotNil(err) // wrong default value type var def2 interface{} def2 = 3.14 if drt.of == Float64 { def2 = int(1) } _, err = T.Reduce(drt.fn, 3, def2) // only last axis requires a default value assert.NotNil(err) } } ` var ( testDenseReduce *template.Template ) func init() { testDenseReduce = template.Must(template.New("testDenseReduce").Funcs(funcs).Parse(testDenseReduceRaw)) } func generateDenseReductionTests(f io.Writer, generic Kinds) { testDenseReduce.Execute(f, generic) } tensor-0.9.24/genlib2/docstrings.go000066400000000000000000000201431426512615100171530ustar00rootroot00000000000000package main import "text/template" var arithDocStrings = map[string]*template.Template{ "Add": template.Must(template.New("+").Parse("// Add performs {{.Left}} + {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "Sub": template.Must(template.New("-").Parse("// Sub performs {{.Left}} - {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "Mul": template.Must(template.New("×").Parse("// Mul performs {{.Left}} × {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "Div": template.Must(template.New("÷").Parse("// Div performs {{.Left}} ÷ {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "Pow": template.Must(template.New("^").Parse("// Pow performs {{.Left}} ^ {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "Mod": template.Must(template.New("%").Parse("// Mod performs {{.Left}} % {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "AddScalar": template.Must(template.New("+").Parse("// AddScalar performs {{.Left}} + {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "SubScalar": template.Must(template.New("-").Parse("// SubScalar performs {{.Left}} - {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "MulScalar": template.Must(template.New("×").Parse("// MulScalar performs {{.Left}} × {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "DivScalar": template.Must(template.New("÷").Parse("// DivScalar performs {{.Left}} ÷ {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "PowScalar": template.Must(template.New("^").Parse("// PowScalar performs {{.Left}} ^ {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), "ModScalar": template.Must(template.New("%").Parse("// ModScalar performs {{.Left}} % {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}.\n// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)\n")), } var cmpDocStrings = map[string]*template.Template{ "Lt": template.Must(template.New("+").Parse("// Lt performs {{.Left}} < {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "Lte": template.Must(template.New("+").Parse("// Lte performs {{.Left}} ≤ {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "Gt": template.Must(template.New("+").Parse("// Gt performs {{.Left}} > {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "Gte": template.Must(template.New("+").Parse("// Gte performs {{.Left}} ≥ {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "ElEq": template.Must(template.New("+").Parse("// ElEq performs {{.Left}} == {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "ElNe": template.Must(template.New("+").Parse("// ElNe performs {{.Left}} ≠ {{.Right}} elementwise. Both {{.Left}} and {{.Right}} must have the same shape.\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n//UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "LtScalar": template.Must(template.New("+").Parse("// LtScalar performs {{.Left}} < {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "LteScalar": template.Must(template.New("+").Parse("// LteScalar performs {{.Left}} ≤ {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "GtScalar": template.Must(template.New("+").Parse("// GtScalar performs {{.Left}} > {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "GteScalar": template.Must(template.New("+").Parse("// GteScalar performs {{.Left}} ≥ {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "ElEqScalar": template.Must(template.New("+").Parse("// EqScalar performs {{.Left}} == {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), "ElNeScalar": template.Must(template.New("+").Parse("// NeScalar performs {{.Left}} ≠ {{.Right}} elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in {{.Right}}\n// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().\n// UseUnsafe() will ensure that the same type is returned.\n// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.\n")), } tensor-0.9.24/genlib2/engine.go000066400000000000000000000215021426512615100162410ustar00rootroot00000000000000package main import ( "io" "reflect" "text/template" ) type EngineArith struct { Name string VecVar string PrepData string TypeClassCheck string IsCommutative bool VV bool LeftVec bool } func (fn *EngineArith) methName() string { switch { case fn.VV: return fn.Name default: return fn.Name + "Scalar" } } func (fn *EngineArith) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.VV: paramNames = []string{"a", "b", "opts"} paramTemplates = []*template.Template{tensorType, tensorType, splatFuncOptType} default: paramNames = []string{"t", "s", "leftTensor", "opts"} paramTemplates = []*template.Template{tensorType, interfaceType, boolType, splatFuncOptType} } return &Signature{ Name: fn.methName(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: false, } } func (fn *EngineArith) WriteBody(w io.Writer) { var prep *template.Template switch { case fn.VV: prep = prepVV fn.VecVar = "a" case !fn.VV && fn.LeftVec: fn.VecVar = "t" fn.PrepData = "prepDataVS" prep = prepMixed default: fn.VecVar = "t" fn.PrepData = "prepDataSV" prep = prepMixed } template.Must(prep.New("prep").Parse(arithPrepRaw)) prep.Execute(w, fn) agg2Body.Execute(w, fn) } func (fn *EngineArith) Write(w io.Writer) { if tmpl, ok := arithDocStrings[fn.methName()]; ok { type tmp struct { Left, Right string } var ds tmp if fn.VV { ds.Left = "a" ds.Right = "b" } else { ds.Left = "t" ds.Right = "s" } tmpl.Execute(w, ds) } sig := fn.Signature() w.Write([]byte("func (e StdEng) ")) sig.Write(w) w.Write([]byte("(retVal Tensor, err error) {\n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateStdEngArith(f io.Writer, ak Kinds) { var methods []*EngineArith for _, abo := range arithBinOps { meth := &EngineArith{ Name: abo.Name(), VV: true, TypeClassCheck: "Number", IsCommutative: abo.IsCommutative, } methods = append(methods, meth) } // VV for _, meth := range methods { meth.Write(f) meth.VV = false } // Scalar for _, meth := range methods { meth.Write(f) meth.LeftVec = true } } type EngineCmp struct { Name string VecVar string PrepData string TypeClassCheck string Inv string VV bool LeftVec bool } func (fn *EngineCmp) methName() string { switch { case fn.VV: if fn.Name == "Eq" || fn.Name == "Ne" { return "El" + fn.Name } return fn.Name default: return fn.Name + "Scalar" } } func (fn *EngineCmp) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.VV: paramNames = []string{"a", "b", "opts"} paramTemplates = []*template.Template{tensorType, tensorType, splatFuncOptType} default: paramNames = []string{"t", "s", "leftTensor", "opts"} paramTemplates = []*template.Template{tensorType, interfaceType, boolType, splatFuncOptType} } return &Signature{ Name: fn.methName(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: false, } } func (fn *EngineCmp) WriteBody(w io.Writer) { var prep *template.Template switch { case fn.VV: prep = prepVV fn.VecVar = "a" case !fn.VV && fn.LeftVec: fn.VecVar = "t" fn.PrepData = "prepDataVS" prep = prepMixed default: fn.VecVar = "t" fn.PrepData = "prepDataSV" prep = prepMixed } template.Must(prep.New("prep").Parse(cmpPrepRaw)) prep.Execute(w, fn) agg2CmpBody.Execute(w, fn) } func (fn *EngineCmp) Write(w io.Writer) { if tmpl, ok := cmpDocStrings[fn.methName()]; ok { type tmp struct { Left, Right string } var ds tmp if fn.VV { ds.Left = "a" ds.Right = "b" } else { ds.Left = "t" ds.Right = "s" } tmpl.Execute(w, ds) } sig := fn.Signature() w.Write([]byte("func (e StdEng) ")) sig.Write(w) w.Write([]byte("(retVal Tensor, err error) {\n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateStdEngCmp(f io.Writer, ak Kinds) { var methods []*EngineCmp for _, abo := range cmpBinOps { var tc string if abo.Name() == "Eq" || abo.Name() == "Ne" { tc = "Eq" } else { tc = "Ord" } meth := &EngineCmp{ Name: abo.Name(), Inv: abo.Inv, VV: true, TypeClassCheck: tc, } methods = append(methods, meth) } // VV for _, meth := range methods { meth.Write(f) meth.VV = false } // Scalar for _, meth := range methods { meth.Write(f) meth.LeftVec = true } } type EngineMinMax struct { Name string VecVar string PrepData string TypeClassCheck string Kinds []reflect.Kind VV bool LeftVec bool } func (fn *EngineMinMax) methName() string { switch { case fn.VV: return fn.Name default: return fn.Name + "Scalar" } } func (fn *EngineMinMax) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.VV: paramNames = []string{"a", "b", "opts"} paramTemplates = []*template.Template{tensorType, tensorType, splatFuncOptType} default: paramNames = []string{"t", "s", "leftTensor", "opts"} paramTemplates = []*template.Template{tensorType, interfaceType, boolType, splatFuncOptType} } return &Signature{ Name: fn.methName(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: false, } } func (fn *EngineMinMax) WriteBody(w io.Writer) { var prep *template.Template switch { case fn.VV: prep = prepVV fn.VecVar = "a" case !fn.VV && fn.LeftVec: fn.VecVar = "t" fn.PrepData = "prepDataVS" prep = prepMixed default: fn.VecVar = "t" fn.PrepData = "prepDataSV" prep = prepMixed } template.Must(prep.New("prep").Parse(minmaxPrepRaw)) prep.Execute(w, fn) agg2MinMaxBody.Execute(w, fn) } func (fn *EngineMinMax) Write(w io.Writer) { if tmpl, ok := cmpDocStrings[fn.methName()]; ok { type tmp struct { Left, Right string } var ds tmp if fn.VV { ds.Left = "a" ds.Right = "b" } else { ds.Left = "t" ds.Right = "s" } tmpl.Execute(w, ds) } sig := fn.Signature() w.Write([]byte("func (e StdEng) ")) sig.Write(w) w.Write([]byte("(retVal Tensor, err error) {\n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateStdEngMinMax(f io.Writer, ak Kinds) { methods := []*EngineMinMax{ &EngineMinMax{ Name: "MinBetween", VV: true, TypeClassCheck: "Ord", }, &EngineMinMax{ Name: "MaxBetween", VV: true, TypeClassCheck: "Ord", }, } f.Write([]byte(`var ( _ MinBetweener = StdEng{} _ MaxBetweener = StdEng{} ) `)) // VV for _, meth := range methods { meth.Write(f) meth.VV = false } // Scalar-Vector for _, meth := range methods { meth.Write(f) meth.LeftVec = true } } /* UNARY METHODS */ type EngineUnary struct { Name string TypeClassCheck string Kinds []reflect.Kind } func (fn *EngineUnary) Signature() *Signature { return &Signature{ Name: fn.Name, NameTemplate: plainName, ParamNames: []string{"a", "opts"}, ParamTemplates: []*template.Template{tensorType, splatFuncOptType}, RetVals: []string{"retVal"}, RetValTemplates: []*template.Template{tensorType}, Err: true, } } func (fn *EngineUnary) WriteBody(w io.Writer) { prepUnary.Execute(w, fn) agg2UnaryBody.Execute(w, fn) } func (fn *EngineUnary) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func (e StdEng) ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("\n}\n")) } func generateStdEngUncondUnary(f io.Writer, ak Kinds) { tcc := []string{ "Number", // Neg "Number", // Inv "Number", // Square "Number", // Cube "FloatCmplx", // Exp "FloatCmplx", // Tanhh "FloatCmplx", // Log "Float", // Log2 "FloatCmplx", // Log10 "FloatCmplx", // Sqrt "Float", // Cbrt "Float", // InvSqrt } var gen []*EngineUnary for i, u := range unconditionalUnaries { var ks []reflect.Kind for _, k := range ak.Kinds { if tc := u.TypeClass(); tc != nil && !tc(k) { continue } ks = append(ks, k) } fn := &EngineUnary{ Name: u.Name(), TypeClassCheck: tcc[i], Kinds: ks, } gen = append(gen, fn) } for _, fn := range gen { fn.Write(f) } } func generateStdEngCondUnary(f io.Writer, ak Kinds) { tcc := []string{ "Signed", // Abs "Signed", // Sign } var gen []*EngineUnary for i, u := range conditionalUnaries { var ks []reflect.Kind for _, k := range ak.Kinds { if tc := u.TypeClass(); tc != nil && !tc(k) { continue } ks = append(ks, k) } fn := &EngineUnary{ Name: u.Name(), TypeClassCheck: tcc[i], Kinds: ks, } gen = append(gen, fn) } for _, fn := range gen { fn.Write(f) } } tensor-0.9.24/genlib2/generic_argmethods.go000066400000000000000000000054271426512615100206350ustar00rootroot00000000000000package main import ( "fmt" "io" "reflect" "text/template" ) const argMethodLoopBody = `v := a[i] if !set { f = v {{.ArgX}} = i set = true continue } {{if isFloat .Kind -}} if {{mathPkg .Kind}}IsNaN(v) || {{mathPkg .Kind}}IsInf(v, {{if eq .ArgX "min"}}-{{end}}1) { {{.ArgX}} = i return {{.ArgX}} } {{end -}} if v {{if eq .ArgX "max"}}>{{else}}<{{end}} f { {{.ArgX}} = i f = v } ` const argMethodIter = `data := t.{{sliceOf .}} tmp := make([]{{asType .}}, 0, lastSize) for next, err = it.Next(); err == nil; next; err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := {{.ArgX | title}}(tmp) indices = append(indices, am) tmp = tmp[:0] } } return ` type GenericArgMethod struct { ArgX string Masked bool Range string Kind reflect.Kind } func (fn *GenericArgMethod) Name() string { switch { case fn.ArgX == "max" && fn.Masked: return "ArgmaxMasked" case fn.ArgX == "min" && fn.Masked: return "ArgminMasked" case fn.ArgX == "max" && !fn.Masked: return "Argmax" case fn.ArgX == "min" && !fn.Masked: return "Argmin" } panic("Unreachable") } func (fn *GenericArgMethod) Signature() *Signature { paramNames := []string{"a"} paramTemplates := []*template.Template{sliceType} if fn.Masked { paramNames = append(paramNames, "mask") paramTemplates = append(paramTemplates, boolsType) } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind, } } func (fn *GenericArgMethod) WriteBody(w io.Writer) { T := template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(argMethodLoopBody)) if fn.Masked { template.Must(T.New("check").Parse(maskCheck)) } else { template.Must(T.New("check").Parse("")) } genericArgmaxVarDecl.Execute(w, fn) T.Execute(w, fn) fmt.Fprintf(w, "\nreturn %s\n", fn.ArgX) } func (fn *GenericArgMethod) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("int {\n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateGenericArgMethods(f io.Writer, ak Kinds) { var argMethods []*GenericArgMethod for _, k := range ak.Kinds { if !isOrd(k) { continue } m := &GenericArgMethod{ ArgX: "max", Kind: k, Range: "a", } argMethods = append(argMethods, m) } // argmax for _, m := range argMethods { m.Write(f) m.Masked = true } for _, m := range argMethods { m.Write(f) m.Masked = false m.ArgX = "min" } // argmin for _, m := range argMethods { m.Write(f) m.Masked = true } for _, m := range argMethods { m.Write(f) } } tensor-0.9.24/genlib2/generic_arith.go000066400000000000000000000274151426512615100176100ustar00rootroot00000000000000package main import ( "fmt" "io" "strings" "text/template" ) type GenericVecVecArith struct { TypedBinOp Iter bool Incr bool WithRecv bool // not many BinOps have this Check TypeClass // can be nil CheckTemplate string } func (fn *GenericVecVecArith) Name() string { switch { case fn.Iter && fn.Incr: return fmt.Sprintf("%sIterIncr", fn.TypedBinOp.Name()) case fn.Iter && !fn.Incr: return fmt.Sprintf("%sIter", fn.TypedBinOp.Name()) case !fn.Iter && fn.Incr: return fmt.Sprintf("%sIncr", fn.TypedBinOp.Name()) case fn.WithRecv: return fmt.Sprintf("%vRecv", fn.TypedBinOp.Name()) default: return fmt.Sprintf("Vec%s", fn.TypedBinOp.Name()) } } func (fn *GenericVecVecArith) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template var err bool switch { case fn.Iter && fn.Incr: paramNames = []string{"a", "b", "incr", "ait", "bit", "iit"} paramTemplates = []*template.Template{sliceType, sliceType, sliceType, iteratorType, iteratorType, iteratorType} err = true case fn.Iter && !fn.Incr: paramNames = []string{"a", "b", "ait", "bit"} paramTemplates = []*template.Template{sliceType, sliceType, iteratorType, iteratorType} err = true case !fn.Iter && fn.Incr: paramNames = []string{"a", "b", "incr"} paramTemplates = []*template.Template{sliceType, sliceType, sliceType} case fn.WithRecv: paramNames = []string{"a", "b", "recv"} paramTemplates = []*template.Template{sliceType, sliceType, sliceType} default: paramNames = []string{"a", "b"} paramTemplates = []*template.Template{sliceType, sliceType} } if fn.Check != nil { err = true } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: err, } } func (fn *GenericVecVecArith) WriteBody(w io.Writer) { var Range, Left, Right string var Index0, Index1, Index2 string var IterName0, IterName1, IterName2 string var T *template.Template Range = "a" Index0 = "i" Index1 = "j" Left = "a[i]" Right = "b[j]" T = template.New(fn.Name()).Funcs(funcs) switch { case fn.Iter && fn.Incr: Range = "incr" Index2 = "k" IterName0 = "ait" IterName1 = "bit" IterName2 = "iit" T = template.Must(T.Parse(genericTernaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(iterIncrLoopBody)) case fn.Iter && !fn.Incr: IterName0 = "ait" IterName1 = "bit" T = template.Must(T.Parse(genericBinaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) case !fn.Iter && fn.Incr: Range = "incr" Right = "b[i]" T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicIncr)) case fn.WithRecv: Range = "recv" Right = "b[i]" T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) default: Right = "b[i]" T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) } template.Must(T.New("callFunc").Parse(binOpCallFunc)) template.Must(T.New("opDo").Parse(binOpDo)) template.Must(T.New("symbol").Parse(fn.SymbolTemplate())) if fn.Check != nil && fn.Check(fn.Kind()) { w.Write([]byte("var errs errorIndices\n")) } template.Must(T.New("check").Parse(fn.CheckTemplate)) lb := LoopBody{ TypedOp: fn.TypedBinOp, Range: Range, Left: Left, Right: Right, Index0: Index0, Index1: Index1, Index2: Index2, IterName0: IterName0, IterName1: IterName1, IterName2: IterName2, } T.Execute(w, lb) } func (fn *GenericVecVecArith) Write(w io.Writer) { sig := fn.Signature() if !fn.Iter && isFloat(fn.Kind()) && !fn.WithRecv { // golinkPragma.Execute(w, fn) w.Write([]byte("func ")) sig.Write(w) if fn.Incr { fmt.Fprintf(w, "{ %v%v(a, b, incr)}\n", vecPkg(fn.Kind()), getalias(fn.Name())) } else { fmt.Fprintf(w, "{ %v%v(a, b)}\n", vecPkg(fn.Kind()), getalias(fn.Name())) } return } w.Write([]byte("func ")) sig.Write(w) switch { case !fn.Iter && fn.Incr: w.Write([]byte("{\na = a[:len(a)]; b = b[:len(a)]; incr = incr[:len(a)]\n")) case fn.WithRecv: w.Write([]byte("{\na = a[:len(recv)]; b = b[:len(recv)]\n")) case !fn.Iter && !fn.Incr && !fn.WithRecv: w.Write([]byte("{\na = a[:len(a)]; b = b[:len(a)]\n")) default: w.Write([]byte("{\n")) } fn.WriteBody(w) if sig.Err { if fn.Check != nil { w.Write([]byte("\nif err != nil {\n return\n}\nif len(errs) > 0 {\n return errs }\nreturn nil")) } else { w.Write([]byte("\nreturn\n")) } } w.Write([]byte("}\n\n")) } type GenericMixedArith struct { GenericVecVecArith LeftVec bool } func (fn *GenericMixedArith) Name() string { n := fn.GenericVecVecArith.Name() n = strings.TrimPrefix(n, "Vec") if fn.LeftVec { n += "VS" } else { n += "SV" } return n } func (fn *GenericMixedArith) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template var err bool switch { case fn.Iter && fn.Incr: paramNames = []string{"a", "b", "incr", "ait", "iit"} paramTemplates = []*template.Template{sliceType, sliceType, sliceType, iteratorType, iteratorType} if fn.LeftVec { paramTemplates[1] = scalarType } else { paramTemplates[0] = scalarType paramNames[3] = "bit" } err = true case fn.Iter && !fn.Incr: paramNames = []string{"a", "b", "ait"} paramTemplates = []*template.Template{sliceType, sliceType, iteratorType} if fn.LeftVec { paramTemplates[1] = scalarType } else { paramTemplates[0] = scalarType paramNames[2] = "bit" } err = true case !fn.Iter && fn.Incr: paramNames = []string{"a", "b", "incr"} paramTemplates = []*template.Template{sliceType, sliceType, sliceType} if fn.LeftVec { paramTemplates[1] = scalarType } else { paramTemplates[0] = scalarType } default: paramNames = []string{"a", "b"} paramTemplates = []*template.Template{sliceType, sliceType} if fn.LeftVec { paramTemplates[1] = scalarType } else { paramTemplates[0] = scalarType } } if fn.Check != nil { err = true } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: err, } } func (fn *GenericMixedArith) WriteBody(w io.Writer) { var Range, Left, Right string var Index0, Index1 string var IterName0, IterName1 string Range = "a" Left = "a[i]" Right = "b[i]" Index0 = "i" T := template.New(fn.Name()).Funcs(funcs) switch { case fn.Iter && fn.Incr: Range = "incr" T = template.Must(T.Parse(genericBinaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(iterIncrLoopBody)) case fn.Iter && !fn.Incr: T = template.Must(T.Parse(genericUnaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) case !fn.Iter && fn.Incr: Range = "incr" T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicIncr)) default: T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) } if fn.LeftVec { Right = "b" } else { Left = "a" if !fn.Incr { Range = "b" } // Index0 = "j" } switch { case fn.Iter && fn.Incr && fn.LeftVec: IterName0 = "ait" IterName1 = "iit" Index1 = "k" case fn.Iter && !fn.Incr && fn.LeftVec: IterName0 = "ait" case fn.Iter && fn.Incr && !fn.LeftVec: IterName0 = "bit" IterName1 = "iit" Index1 = "k" case fn.Iter && !fn.Incr && !fn.LeftVec: IterName0 = "bit" } template.Must(T.New("callFunc").Parse(binOpCallFunc)) template.Must(T.New("opDo").Parse(binOpDo)) template.Must(T.New("symbol").Parse(fn.SymbolTemplate())) if fn.Check != nil && fn.Check(fn.Kind()) { w.Write([]byte("var errs errorIndices\n")) } template.Must(T.New("check").Parse(fn.CheckTemplate)) lb := LoopBody{ TypedOp: fn.TypedBinOp, Range: Range, Left: Left, Right: Right, Index0: Index0, Index1: Index1, IterName0: IterName0, IterName1: IterName1, } T.Execute(w, lb) } func (fn *GenericMixedArith) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) if sig.Err { if fn.Check != nil { w.Write([]byte("\nif err != nil {\n return\n}\nif len(errs) > 0 {\n return errs }\nreturn nil")) } else { w.Write([]byte("\nreturn\n")) } } w.Write([]byte("}\n\n")) } type GenericScalarScalarArith struct { TypedBinOp } func (fn *GenericScalarScalarArith) Signature() *Signature { return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: []string{"a", "b"}, ParamTemplates: []*template.Template{scalarType, scalarType}, RetVals: []string{""}, RetValTemplates: []*template.Template{scalarType}, Kind: fn.Kind(), } } func (fn *GenericScalarScalarArith) WriteBody(w io.Writer) { tmpl := `return {{if .IsFunc -}} {{ template "callFunc" . -}} {{else -}} {{template "opDo" . -}} {{end -}}` opDo := `a {{template "symbol" .Kind}} b` callFunc := `{{template "symbol" .Kind}}(a, b)` T := template.Must(template.New(fn.Name()).Funcs(funcs).Parse(tmpl)) template.Must(T.New("opDo").Parse(opDo)) template.Must(T.New("callFunc").Parse(callFunc)) template.Must(T.New("symbol").Parse(fn.SymbolTemplate())) T.Execute(w, fn) } func (fn *GenericScalarScalarArith) Write(w io.Writer) { w.Write([]byte("func ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{")) fn.WriteBody(w) w.Write([]byte("}\n")) } func makeGenericVecVecAriths(tbo []TypedBinOp) (retVal []*GenericVecVecArith) { for _, tb := range tbo { if tc := tb.TypeClass(); tc != nil && !tc(tb.Kind()) { continue } fn := &GenericVecVecArith{ TypedBinOp: tb, } if tb.Name() == "Div" && !isFloatCmplx(tb.Kind()) { fn.Check = panicsDiv0 fn.CheckTemplate = check0 } retVal = append(retVal, fn) } return retVal } func makeGenericMixedAriths(tbo []TypedBinOp) (retVal []*GenericMixedArith) { for _, tb := range tbo { if tc := tb.TypeClass(); tc != nil && !tc(tb.Kind()) { continue } fn := &GenericMixedArith{ GenericVecVecArith: GenericVecVecArith{ TypedBinOp: tb, }, } if tb.Name() == "Div" && !isFloatCmplx(tb.Kind()) { fn.Check = panicsDiv0 fn.CheckTemplate = check0 } retVal = append(retVal, fn) } return } func makeGenericScalarScalarAriths(tbo []TypedBinOp) (retVal []*GenericScalarScalarArith) { for _, tb := range tbo { if tc := tb.TypeClass(); tc != nil && !tc(tb.Kind()) { continue } fn := &GenericScalarScalarArith{ TypedBinOp: tb, } retVal = append(retVal, fn) } return } func generateGenericVecVecArith(f io.Writer, ak Kinds) { gen := makeGenericVecVecAriths(typedAriths) // importStmt := ` // import ( // _ "unsafe" // _ "gorgonia.org/vecf32" // _ "gorgonia.org/vecf64") // ` // f.Write([]byte(importStmt)) for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) g.Incr = false g.Iter = true } for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) } for _, g := range gen { g.Incr = false g.Iter = false g.WithRecv = true g.Write(f) } } func generateGenericMixedArith(f io.Writer, ak Kinds) { gen := makeGenericMixedAriths(typedAriths) // SV first for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) g.Incr = false g.Iter = true } for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) // reset g.LeftVec = true g.Incr = false g.Iter = false } // VS for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) g.Incr = false g.Iter = true } for _, g := range gen { g.Write(f) g.Incr = true } for _, g := range gen { g.Write(f) } } func generateGenericScalarScalarArith(f io.Writer, ak Kinds) { gen := makeGenericScalarScalarAriths(typedAriths) for _, g := range gen { g.Write(f) } } tensor-0.9.24/genlib2/generic_cmp.go000066400000000000000000000303541426512615100172540ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) type GenericVecVecCmp struct { TypedBinOp RetSame bool Iter bool } func (fn *GenericVecVecCmp) Name() string { switch { case fn.Iter && fn.RetSame: return fmt.Sprintf("%sSameIter", fn.TypedBinOp.Name()) case fn.Iter && !fn.RetSame: return fmt.Sprintf("%sIter", fn.TypedBinOp.Name()) case !fn.Iter && fn.RetSame: return fmt.Sprintf("%sSame", fn.TypedBinOp.Name()) default: return fn.TypedBinOp.Name() } } func (fn *GenericVecVecCmp) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template var err bool switch { case fn.Iter && fn.RetSame: paramNames = []string{"a", "b", "ait", "bit"} paramTemplates = []*template.Template{sliceType, sliceType, iteratorType, iteratorType} err = true case fn.Iter && !fn.RetSame: paramNames = []string{"a", "b", "retVal", "ait", "bit", "rit"} paramTemplates = []*template.Template{sliceType, sliceType, boolsType, iteratorType, iteratorType, iteratorType} err = true case !fn.Iter && fn.RetSame: paramNames = []string{"a", "b"} paramTemplates = []*template.Template{sliceType, sliceType} default: paramNames = []string{"a", "b", "retVal"} paramTemplates = []*template.Template{sliceType, sliceType, boolsType} } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: err, } } func (fn *GenericVecVecCmp) WriteBody(w io.Writer) { var Range, Left, Right string var Index0, Index1, Index2 string var IterName0, IterName1, IterName2 string var T *template.Template Range = "a" Left = "a[i]" Right = "b[j]" Index0 = "i" Index1 = "j" switch { case fn.Iter && fn.RetSame: IterName0 = "ait" IterName1 = "bit" T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericBinaryIterLoopRaw)) template.Must(T.New("loopbody").Funcs(funcs).Parse(sameSet)) case fn.Iter && !fn.RetSame: Range = "retVal" Index2 = "k" IterName0 = "ait" IterName1 = "bit" IterName2 = "rit" T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericTernaryIterLoopRaw)) template.Must(T.New("loopbody").Funcs(funcs).Parse(ternaryIterSet)) case !fn.Iter && fn.RetSame: Right = "b[i]" T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericLoopRaw)) template.Must(T.New("loopbody").Funcs(funcs).Parse(sameSet)) default: Range = "retVal" Right = "b[i]" T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericLoopRaw)) template.Must(T.New("loopbody").Funcs(funcs).Parse(basicSet)) } template.Must(T.New("opDo").Funcs(funcs).Parse(binOpDo)) template.Must(T.New("callFunc").Funcs(funcs).Parse("")) template.Must(T.New("check").Funcs(funcs).Parse("")) template.Must(T.New("symbol").Funcs(funcs).Parse(fn.SymbolTemplate())) lb := LoopBody{ TypedOp: fn.TypedBinOp, Range: Range, Left: Left, Right: Right, Index0: Index0, Index1: Index1, Index2: Index2, IterName0: IterName0, IterName1: IterName1, IterName2: IterName2, } T.Execute(w, lb) } func (fn *GenericVecVecCmp) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) switch { case !fn.Iter && !fn.RetSame: w.Write([]byte("{\na = a[:len(a)]; b = b[:len(a)]; retVal=retVal[:len(a)]\n")) case !fn.Iter && fn.RetSame: w.Write([]byte("{\na = a[:len(a)]; b = b[:len(a)]\n")) default: w.Write([]byte("{")) } fn.WriteBody(w) if sig.Err { w.Write([]byte("\n return\n")) } w.Write([]byte("}\n\n")) } type GenericMixedCmp struct { GenericVecVecCmp LeftVec bool } func (fn *GenericMixedCmp) Name() string { n := fn.GenericVecVecCmp.Name() if fn.LeftVec { n += "VS" } else { n += "SV" } return n } func (fn *GenericMixedCmp) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template var err bool switch { case fn.Iter && !fn.RetSame: paramNames = []string{"a", "b", "retVal", "ait", "rit"} paramTemplates = []*template.Template{sliceType, sliceType, boolsType, iteratorType, iteratorType} err = true case fn.Iter && fn.RetSame: paramNames = []string{"a", "b", "ait"} paramTemplates = []*template.Template{sliceType, sliceType, iteratorType} err = true case !fn.Iter && fn.RetSame: paramNames = []string{"a", "b"} paramTemplates = []*template.Template{sliceType, sliceType} default: paramNames = []string{"a", "b", "retVal"} paramTemplates = []*template.Template{sliceType, sliceType, boolsType} } if fn.LeftVec { paramTemplates[1] = scalarType } else { paramTemplates[0] = scalarType if fn.Iter && !fn.RetSame { paramNames[3] = "bit" } else if fn.Iter && fn.RetSame { paramNames[2] = "bit" } } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: err, } } func (fn *GenericMixedCmp) WriteBody(w io.Writer) { var Range, Left, Right string var Index0, Index1 string var IterName0, IterName1 string var T *template.Template Range = "a" Left = "a[i]" Right = "b[i]" Index0 = "i" T = template.New(fn.Name()).Funcs(funcs) switch { case fn.Iter && !fn.RetSame: Range = "retVal" T = template.Must(T.Parse(genericBinaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(ternaryIterSet)) case fn.Iter && fn.RetSame: T = template.Must(T.Parse(genericUnaryIterLoopRaw)) template.Must(T.New("loopbody").Parse(sameSet)) case !fn.Iter && fn.RetSame: T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(sameSet)) default: T = template.Must(T.Parse(genericLoopRaw)) template.Must(T.New("loopbody").Parse(basicSet)) } if fn.LeftVec { Right = "b" } else { Left = "a" } if !fn.RetSame { Range = "retVal" } else { if !fn.LeftVec { Range = "b" } } switch { case fn.Iter && !fn.RetSame && fn.LeftVec: IterName0 = "ait" IterName1 = "rit" Index1 = "k" case fn.Iter && fn.RetSame && fn.LeftVec: IterName0 = "ait" case fn.Iter && !fn.RetSame && !fn.LeftVec: IterName0 = "bit" IterName1 = "rit" Index1 = "k" case fn.Iter && fn.RetSame && !fn.LeftVec: IterName0 = "bit" } template.Must(T.New("callFunc").Parse("")) template.Must(T.New("opDo").Parse(binOpDo)) template.Must(T.New("symbol").Parse(fn.SymbolTemplate())) template.Must(T.New("check").Parse("")) lb := LoopBody{ TypedOp: fn.TypedBinOp, Range: Range, Left: Left, Right: Right, Index0: Index0, Index1: Index1, IterName0: IterName0, IterName1: IterName1, } T.Execute(w, lb) } func (fn *GenericMixedCmp) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) if sig.Err { w.Write([]byte("\nreturn\n")) } w.Write([]byte("}\n\n")) } func makeGenericVecVecCmps(tbo []TypedBinOp) (retVal []*GenericVecVecCmp) { for _, tb := range tbo { if tc := tb.TypeClass(); tc != nil && !tc(tb.Kind()) { continue } fn := &GenericVecVecCmp{ TypedBinOp: tb, } retVal = append(retVal, fn) } return } func makeGenericMixedCmps(tbo []TypedBinOp) (retVal []*GenericMixedCmp) { for _, tb := range tbo { if tc := tb.TypeClass(); tc != nil && !tc(tb.Kind()) { continue } fn := &GenericMixedCmp{ GenericVecVecCmp: GenericVecVecCmp{ TypedBinOp: tb, }, } retVal = append(retVal, fn) } return } func generateGenericVecVecCmp(f io.Writer, ak Kinds) { gen := makeGenericVecVecCmps(typedCmps) for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } g.RetSame = false g.Iter = true } for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } } } func generateGenericMixedCmp(f io.Writer, ak Kinds) { gen := makeGenericMixedCmps(typedCmps) for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } g.RetSame = false g.Iter = true } for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } g.LeftVec = true g.RetSame = false g.Iter = false } // VS for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } g.RetSame = false g.Iter = true } for _, g := range gen { g.Write(f) g.RetSame = true } for _, g := range gen { if isBoolRepr(g.Kind()) { g.Write(f) } } } /* OTHER */ // element wise Min/Max const genericElMinMaxRaw = `func VecMin{{short . | title}}(a, b []{{asType .}}) { a = a[:len(a)] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSV{{short . | title}}(a {{asType .}}, b []{{asType .}}){ for i := range b { if a < b[i]{ b[i] = a } } } func MinVS{{short . | title}}(a []{{asType .}}, b {{asType .}}){ for i := range a { if b < a[i]{ a[i] = b } } } func VecMax{{short . | title}}(a, b []{{asType .}}) { a = a[:len(a)] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSV{{short . | title}}(a {{asType .}}, b []{{asType .}}){ for i := range b { if a > b[i]{ b[i] = a } } } func MaxVS{{short . | title}}(a []{{asType .}}, b {{asType .}}){ for i := range a { if b > a[i]{ a[i] = b } } } ` // Iter Min/Max const genericIterMinMaxRaw = `func MinIterSV{{short . | title}}(a {{asType .}}, b []{{asType .}}, bit Iterator) (err error){ var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVS{{short . | title}}(a []{{asType .}}, b {{asType .}}, ait Iterator) (err error){ var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIter{{short . | title}}(a , b []{{asType .}}, ait, bit Iterator) (err error){ var i,j int var validi ,validj bool for { if i, validi, err = ait.NextValidity(); err != nil{ err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSV{{short . | title}}(a {{asType .}}, b []{{asType .}}, bit Iterator) (err error){ var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVS{{short . | title}}(a []{{asType .}}, b {{asType .}}, ait Iterator) (err error){ var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIter{{short . | title}}(a , b []{{asType .}}, ait, bit Iterator) (err error){ var i,j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil{ err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil{ err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } ` // scalar Min/Max const genericScalarMinMaxRaw = `func Min{{short .}}(a, b {{asType .}}) (c {{asType .}}) {if a < b { return a } return b } func Max{{short .}}(a, b {{asType .}}) (c {{asType .}}) {if a > b { return a } return b } ` var ( genericElMinMax *template.Template genericMinMax *template.Template genericElMinMaxIter *template.Template ) func init() { genericElMinMax = template.Must(template.New("genericVecVecMinMax").Funcs(funcs).Parse(genericElMinMaxRaw)) genericMinMax = template.Must(template.New("genericMinMax").Funcs(funcs).Parse(genericScalarMinMaxRaw)) genericElMinMaxIter = template.Must(template.New("genericIterMinMax").Funcs(funcs).Parse(genericIterMinMaxRaw)) } func generateMinMax(f io.Writer, ak Kinds) { for _, k := range filter(ak.Kinds, isOrd) { genericElMinMax.Execute(f, k) } for _, k := range filter(ak.Kinds, isOrd) { genericMinMax.Execute(f, k) } for _, k := range filter(ak.Kinds, isOrd) { genericElMinMaxIter.Execute(f, k) } } tensor-0.9.24/genlib2/generic_map.go000066400000000000000000000075331426512615100172550ustar00rootroot00000000000000package main import ( "io" "reflect" "text/template" ) const ( fnErrSet = `if {{.Range}}[i], err = {{template "callFunc" .}}; handleNoOp(err) != nil { return }` fnErrIncr = `var x {{asType .Kind}} if x, err = {{template "callFunc" .}}; err != nil { if err = handleNoOp(err);err != nil { return } } {{.Range}}[i] = x ` simpleUnaryCallFunc = `{{template "symbol" .}}({{.Left}}[{{.Index0}}])` ) type Map struct { k reflect.Kind Iter bool Incr bool Err bool } func (fn *Map) Name() string { switch { case fn.Iter && fn.Incr && fn.Err: return "MapIterIncrErr" case fn.Iter && fn.Incr && !fn.Err: return "MapIterIncr" case fn.Iter && !fn.Incr && fn.Err: return "MapIterErr" case fn.Iter && !fn.Incr && !fn.Err: return "MapIter" case !fn.Iter && fn.Incr && fn.Err: return "MapIncrErr" case !fn.Iter && fn.Incr && !fn.Err: return "MapIncr" case !fn.Iter && !fn.Incr && fn.Err: return "MapErr" default: return "Map" } } func (fn *Map) Arity() int { return 1 } func (fn *Map) SymbolTemplate() string { return "fn" } func (fn *Map) TypeClass() TypeClass { return nil } func (fn *Map) IsFunc() bool { return true } func (fn *Map) Kind() reflect.Kind { return fn.k } func (fn *Map) Signature() *Signature { var retErr bool paramNames := []string{"fn", "a"} paramTemplates := []*template.Template{unaryFuncType, sliceType} if fn.Iter { paramNames = append(paramNames, "ait") paramTemplates = append(paramTemplates, iteratorType) retErr = true } if fn.Err { paramTemplates[0] = unaryFuncErrType retErr = true } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: retErr, } } func (fn *Map) WriteBody(w io.Writer) { Range := "a" Left := "a" var T *template.Template var IterName0 string if fn.Iter { T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericUnaryIterLoopRaw)) IterName0 = "ait" } else { T = template.Must(template.New(fn.Name()).Funcs(funcs).Parse(genericLoopRaw)) } switch { case fn.Incr && fn.Err: template.Must(T.New("loopbody").Funcs(funcs).Parse(fnErrIncr)) case fn.Incr && !fn.Err: template.Must(T.New("loopbody").Funcs(funcs).Parse(basicIncr)) case !fn.Incr && fn.Err: template.Must(T.New("loopbody").Funcs(funcs).Parse(fnErrSet)) default: template.Must(T.New("loopbody").Funcs(funcs).Parse(basicSet)) } template.Must(T.New("callFunc").Funcs(funcs).Parse(simpleUnaryCallFunc)) template.Must(T.New("symbol").Funcs(funcs).Parse("fn")) template.Must(T.New("opDo").Funcs(funcs).Parse("")) template.Must(T.New("check").Funcs(funcs).Parse("")) lb := LoopBody{ TypedOp: fn, Range: Range, Left: Left, Index0: "i", IterName0: IterName0, } T.Execute(w, lb) } func (fn *Map) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("\nreturn \n")) w.Write([]byte("}\n\n")) } func makeGenericMaps(incr bool) (retVal []*Map) { for _, k := range allKinds { if incr { if !isAddable(k) { continue } } if isParameterized(k) { continue } m := &Map{k: k} if incr { m.Incr = true } retVal = append(retVal, m) } return } func generateGenericMap(f io.Writer, ak Kinds) { gen0 := makeGenericMaps(false) for _, m := range gen0 { m.Write(f) m.Err = true } for _, m := range gen0 { m.Write(f) m.Err = false m.Iter = true } for _, m := range gen0 { m.Write(f) m.Err = true } for _, m := range gen0 { m.Write(f) } gen1 := makeGenericMaps(true) for _, m := range gen1 { m.Write(f) m.Err = true } for _, m := range gen1 { m.Write(f) m.Err = false m.Iter = true } for _, m := range gen1 { m.Write(f) m.Err = true } for _, m := range gen1 { m.Write(f) } } tensor-0.9.24/genlib2/generic_reduce.go000066400000000000000000000130671426512615100177460ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const reflectBasedReduceRaw = `func ReduceRef(f reflect.Value, fnT reflect.Type, def reflect.Value, l *Dense) interface{} { retVal := def if l.len() == 0 { return retVal.Interface() } args := make([]reflect.Value, 0, fnT.NumIn()) for i := 0; i < l.len(); i++ { v := reflect.ValueOf(l.Get(i)) args = append(args, retVal) args = append(args, v) retVal = f.Call(args)[0] args = args[:0] } return retVal.Interface() } ` const genericReduceRaw = `func Reduce{{short .}}(f func(a, b {{asType .}}) {{asType .}}, def {{asType .}}, l ...{{asType .}}) (retVal {{asType .}}){ retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } ` const genericSumRaw = `func Sum{{short .}}(a []{{asType .}}) {{asType .}}{ var retVal {{asType .}} a = a[:len(a)] for _, v := range a { retVal += v } return retVal } ` const genericProdRaw = `func Prod{{short .}}(a []{{asType .}}) {{asType .}} { if len(a) == 0 { return 0 } var retVal {{asType .}} = 1 a = a[:len(a)] for _, v := range a { retVal *= v } return retVal } ` const genericSliceMinMaxRaw = `func SliceMin{{short .}}(a []{{asType .}}) {{asType .}}{ if len(a) < 1 { panic("Max of empty slice is meaningless") } return Reduce{{short .}}(Min{{short .}}, a[0], a[1:]...) } func SliceMax{{short .}}(a []{{asType .}}) {{asType .}}{ if len(a) < 1 { panic("Max of empty slice is meaningless") } return Reduce{{short .}}(Max{{short .}}, a[0], a[1:]...) } ` const genericReduce0Raw = `func reduceFirst{{short .}}(data, retVal []{{asType .}}, split, size int, fn func(a, b []{{asType .}})) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size - 1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirst{{short .}}(data, retVal []{{asType .}}, split, size int, fn func(a, b {{asType .}}){{asType .}} ){ start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size - 1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } ` const genericReduce0ParRaw = `func reduceFirst{{short .}}(data, retVal []{{asType .}}, split, size int, fn func(a, b {{asType .}}){{asType .}}) { start := split var wg sync.Waitgroup for i := 0; i < size - 1; i++ { wg.Add(1) go func(sp, st int) { for j := 0; j < sp; j++ { retVal[j] = fn(retVal[j], data[j+start]) } }(split, start, &wg) start += split } } ` const genericReduceLastRaw = `func reduceLast{{short .}}(a, retVal []{{asType .}}, dimSize int, defaultValue {{asType .}}, fn func(a []{{asType .}}){{asType .}}) { var at int for start := 0; start <= len(a) - dimSize; start += dimSize { r := fn(a[start:start+dimSize]) retVal[at] = r at++ } } func genericReduceLast{{short .}}(a, retVal []{{asType .}}, dimSize int, defaultValue {{asType .}}, fn func({{asType .}}, {{asType .}}){{asType .}}) { var at int for start := 0; start <= len(a) - dimSize; start += dimSize { r := Reduce{{short .}}(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } ` const genericReduceDefaultRaw = `func reduceDefault{{short .}}(data, retVal []{{asType .}}, dim0, dimSize, outerStride, stride, expected int, fn func(a,b {{asType .}}){{asType .}}) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i * expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k * stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } ` var ( genericReduce *template.Template genericSum *template.Template genericProd *template.Template genericSliceMinMax *template.Template genericReduce0 *template.Template genericReduceLast *template.Template genericReduceDefault *template.Template ) func init() { genericReduce = template.Must(template.New("genericReduce").Funcs(funcs).Parse(genericReduceRaw)) genericSum = template.Must(template.New("genericSum").Funcs(funcs).Parse(genericSumRaw)) genericProd = template.Must(template.New("genericProd").Funcs(funcs).Parse(genericProdRaw)) genericSliceMinMax = template.Must(template.New("genericSliceMinMax").Funcs(funcs).Parse(genericSliceMinMaxRaw)) genericReduce0 = template.Must(template.New("genericReduce0").Funcs(funcs).Parse(genericReduce0Raw)) genericReduceLast = template.Must(template.New("genericReduceLast").Funcs(funcs).Parse(genericReduceLastRaw)) genericReduceDefault = template.Must(template.New("genericReduceDefault").Funcs(funcs).Parse(genericReduceDefaultRaw)) } func generateGenericReduce(f io.Writer, generic Kinds) { // fmt.Fprintln(f, reflectBasedReduceRaw) for _, k := range generic.Kinds { if !isParameterized(k) { genericReduce.Execute(f, k) } } for _, k := range filter(generic.Kinds, isNumber) { genericSum.Execute(f, k) } for _, k := range filter(generic.Kinds, isNumber) { genericProd.Execute(f, k) } fmt.Fprintf(f, "\n") for _, k := range filter(generic.Kinds, isOrd) { if isNumber(k) { genericSliceMinMax.Execute(f, k) } } for _, k := range filter(generic.Kinds, isNotParameterized) { genericReduce0.Execute(f, k) } for _, k := range filter(generic.Kinds, isNotParameterized) { genericReduceLast.Execute(f, k) } for _, k := range filter(generic.Kinds, isNotParameterized) { genericReduceDefault.Execute(f, k) } } tensor-0.9.24/genlib2/generic_unary.go000066400000000000000000000105611426512615100176310ustar00rootroot00000000000000package main import ( "io" "text/template" ) type GenericUnary struct { TypedUnaryOp Iter bool Cond bool } func (fn *GenericUnary) Name() string { if fn.Iter { return fn.TypedUnaryOp.Name() + "Iter" } return fn.TypedUnaryOp.Name() } func (fn *GenericUnary) Signature() *Signature { paramNames := []string{"a"} paramTemplates := []*template.Template{sliceType} var err bool if fn.Iter { paramNames = append(paramNames, "ait") paramTemplates = append(paramTemplates, iteratorType) err = true } return &Signature{ Name: fn.Name(), NameTemplate: typeAnnotatedName, ParamNames: paramNames, ParamTemplates: paramTemplates, Kind: fn.Kind(), Err: err, } } func (fn *GenericUnary) WriteBody(w io.Writer) { var IterName0 string T := template.New(fn.Name()).Funcs(funcs) if fn.Iter { T = template.Must(T.Parse(genericUnaryIterLoopRaw)) IterName0 = "ait" } else { T = template.Must(T.Parse(genericLoopRaw)) } if fn.Cond { template.Must(T.New("loopbody").Parse(fn.SymbolTemplate())) } else { template.Must(T.New("loopbody").Parse(basicSet)) template.Must(T.New("symbol").Parse(fn.SymbolTemplate())) } template.Must(T.New("opDo").Parse(unaryOpDo)) template.Must(T.New("callFunc").Parse(unaryOpCallFunc)) template.Must(T.New("check").Parse("")) lb := LoopBody{ TypedOp: fn.TypedUnaryOp, Range: "a", Left: "a", Index0: "i", IterName0: IterName0, } T.Execute(w, lb) } func (fn *GenericUnary) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) if sig.Err { w.Write([]byte("\nreturn\n")) } w.Write([]byte("}\n\n")) } func generateGenericUncondUnary(f io.Writer, ak Kinds) { var gen []*GenericUnary for _, tu := range typedUncondUnaries { if tc := tu.TypeClass(); tc != nil && !tc(tu.Kind()) { continue } fn := &GenericUnary{ TypedUnaryOp: tu, } gen = append(gen, fn) } for _, g := range gen { g.Write(f) g.Iter = true } for _, g := range gen { g.Write(f) } } func generateGenericCondUnary(f io.Writer, ak Kinds) { var gen []*GenericUnary for _, tu := range typedCondUnaries { if tc := tu.TypeClass(); tc != nil && !tc(tu.Kind()) { continue } // special case for cmplx if isComplex(tu.Kind()) { continue } fn := &GenericUnary{ TypedUnaryOp: tu, Cond: true, } gen = append(gen, fn) } for _, g := range gen { g.Write(f) g.Iter = true } for _, g := range gen { g.Write(f) } } /* SPECIAL CASES */ type GenericUnarySpecial struct { *GenericUnary AdditionalParams []string AdditionalParamTemplates []*template.Template } func (fn *GenericUnarySpecial) Signature() *Signature { sig := fn.GenericUnary.Signature() sig.ParamNames = append(sig.ParamNames, fn.AdditionalParams...) sig.ParamTemplates = append(sig.ParamTemplates, fn.AdditionalParamTemplates...) return sig } func (fn *GenericUnarySpecial) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) if sig.Err { w.Write([]byte("\nreturn\n")) } w.Write([]byte("}\n\n")) } func (fn *GenericUnarySpecial) WriteBody(w io.Writer) { var IterName0 string T := template.New(fn.Name()).Funcs(funcs) if fn.Iter { T = template.Must(T.Parse(genericUnaryIterLoopRaw)) IterName0 = "ait" } else { T = template.Must(T.Parse(genericLoopRaw)) } template.Must(T.New("loopbody").Parse(clampBody)) template.Must(T.New("opDo").Parse(unaryOpDo)) template.Must(T.New("callFunc").Parse(unaryOpCallFunc)) template.Must(T.New("check").Parse("")) lb := LoopBody{ TypedOp: fn.TypedUnaryOp, Range: "a", Left: "a", Index0: "i", IterName0: IterName0, } T.Execute(w, lb) } func generateSpecialGenericUnaries(f io.Writer, ak Kinds) { var gen []*GenericUnarySpecial for _, tu := range typedSpecialUnaries { if tc := tu.TypeClass(); tc != nil && !tc(tu.Kind()) { continue } additional := tu.UnaryOp.(specialUnaryOp).additionalParams tmpls := make([]*template.Template, len(additional)) for i := range tmpls { tmpls[i] = scalarType } fn := &GenericUnarySpecial{ GenericUnary: &GenericUnary{ TypedUnaryOp: tu, }, AdditionalParams: additional, AdditionalParamTemplates: tmpls, } gen = append(gen, fn) } for _, fn := range gen { fn.Write(f) fn.Iter = true } for _, fn := range gen { fn.Write(f) } } tensor-0.9.24/genlib2/generic_utils.go000066400000000000000000000054401426512615100176330ustar00rootroot00000000000000package main import ( "io" "text/template" ) const rangeRaw = `// Range creates a ranged array with a given type. It panics if the Dtype is not supported or does not represent a naturally orderable type (strings, pointers etc) // Do note that the range algorithm is very simple, and simply does increments or decrements of 1. This means for floating point types // you're not able to create a range with a 0.1 increment step, and for complex number types, the imaginary part will always be 0i func Range(dt Dtype, start, end int) interface{} { size := end - start incr := true if start > end { incr = false size = start - end } if size < 0 { panic("Cannot create a range that is negative in size") } switch dt.Kind(){ {{range .Kinds -}} {{if isParameterized . -}} {{else -}} {{if isRangeable . -}} case reflect.{{reflectKind .}}: {{if hasPrefix .String "float" -}} return vec{{short . | lower}}.Range(start, end) {{else -}} retVal := make([]{{asType .}}, size) {{if eq .String "complex64" -}} for i, v := 0, complex(float32(start), float32(0.0)); i < size; i++ { {{else if eq .String "complex128" -}} for i, v := 0, complex(float64(start), float64(0.0)); i < size; i++ { {{else -}} for i, v := 0, {{asType .}}(start); i < size; i++ { {{end -}} retVal[i] = v if incr { v++ } else{ v-- } } return retVal {{end -}} {{end -}} {{end -}} {{end -}} default: err := errors.Errorf("Unrangeable Type %v", dt) panic(err) } } ` const randomRaw = `// Random creates an array of random numbers of the given type. // For complex Dtypes, the imaginary component will be 0. // // This function is only useful in cases where the randomness is not vital. func Random(dt Dtype, size int) interface{} { r := rand.New(rand.NewSource(1337)) switch dt.Kind() { {{range .Kinds -}} {{if isNumber . -}} case reflect.{{reflectKind .}}: retVal := make([]{{asType .}}, size) for i := range retVal { retVal[i] = {{if hasPrefix .String "int" -}} {{asType .}}(r.Int()) {{else if hasPrefix .String "uint" -}} {{asType .}}(r.Uint32()) {{else if hasPrefix .String "complex64" -}} complex(r.Float32(), float32(0)) {{else if hasPrefix .String "complex128" -}} complex(r.Float64(), float64(0)) {{else if eq .String "float64" -}} rand.NormFloat64() {{else if eq .String "float32" -}} float32(r.NormFloat64()) {{end -}} } return retVal {{end -}} {{end -}} } panic("unreachable") } ` var ( Range *template.Template Random *template.Template ) func init() { Range = template.Must(template.New("Range").Funcs(funcs).Parse(rangeRaw)) Random = template.Must(template.New("Random").Funcs(funcs).Parse(randomRaw)) } func generateUtils(f io.Writer, generic Kinds) { Range.Execute(f, generic) Random.Execute(f, generic) } tensor-0.9.24/genlib2/genlib.go000066400000000000000000000104431426512615100162360ustar00rootroot00000000000000package main import ( "fmt" "reflect" "strings" ) type TypeClass func(a reflect.Kind) bool func isParameterized(a reflect.Kind) bool { for _, v := range parameterizedKinds { if v == a { return true } } return false } func isNotParameterized(a reflect.Kind) bool { return !isParameterized(a) } func isRangeable(a reflect.Kind) bool { for _, v := range rangeable { if v == a { return true } } return false } func isSpecialized(a reflect.Kind) bool { for _, v := range specialized { if v == a { return true } } return false } func isNumber(a reflect.Kind) bool { for _, v := range number { if v == a { return true } } return false } func isSignedNumber(a reflect.Kind) bool { for _, v := range signedNumber { if v == a { return true } } return false } func isNonComplexNumber(a reflect.Kind) bool { for _, v := range nonComplexNumber { if v == a { return true } } return false } func isAddable(a reflect.Kind) bool { if a == reflect.String { return true } return isNumber(a) } func isComplex(a reflect.Kind) bool { if a == reflect.Complex128 || a == reflect.Complex64 { return true } return false } func panicsDiv0(a reflect.Kind) bool { for _, v := range div0panics { if v == a { return true } } return false } func isEq(a reflect.Kind) bool { for _, v := range elEq { if v == a { return true } } return false } func isOrd(a reflect.Kind) bool { for _, v := range elOrd { if v == a { return true } } return false } func isBoolRepr(a reflect.Kind) bool { for _, v := range boolRepr { if v == a { return true } } return false } func mathPkg(a reflect.Kind) string { if a == reflect.Float64 { return "math." } if a == reflect.Float32 { return "math32." } if a == reflect.Complex64 || a == reflect.Complex128 { return "cmplx." } return "" } func vecPkg(a reflect.Kind) string { if a == reflect.Float64 { return "vecf64." } if a == reflect.Float32 { return "vecf32." } return "" } func getalias(name string) string { if nice, ok := nameMaps[name]; ok { return nice } return name } func interfaceName(name string) string { switch name { case "Square": return "Squarer" case "Cube": return "Cuber" case "Eq", "ElEq": return "ElEqer" case "Ne", "ElNe": return "ElEqer" default: return name + "er" } } func bitSizeOf(a reflect.Kind) string { switch a { case reflect.Int, reflect.Uint: return "0" case reflect.Int8, reflect.Uint8: return "8" case reflect.Int16, reflect.Uint16: return "16" case reflect.Int32, reflect.Uint32, reflect.Float32: return "32" case reflect.Int64, reflect.Uint64, reflect.Float64: return "64" } return "UNKNOWN BIT SIZE" } func trueValue(a reflect.Kind) string { switch a { case reflect.String: return `"true"` case reflect.Bool: return "true" default: return "1" } } func falseValue(a reflect.Kind) string { switch a { case reflect.String: return `"false"` case reflect.Bool: return "false" default: return "0" } } func isFloat(a reflect.Kind) bool { if a == reflect.Float32 || a == reflect.Float64 { return true } return false } func isFloatCmplx(a reflect.Kind) bool { if a == reflect.Float32 || a == reflect.Float64 || a == reflect.Complex64 || a == reflect.Complex128 { return true } return false } func isntFloat(a reflect.Kind) bool { return !isFloat(a) } func isntComplex(a reflect.Kind) bool { return !isComplex(a) } func short(a reflect.Kind) string { return shortNames[a] } func clean(a string) string { if a == "unsafe.pointer" { return "unsafe.Pointer" } return a } func unexport(a string) string { return strings.ToLower(string(a[0])) + a[1:] } func strip(a string) string { return strings.Replace(a, ".", "", -1) } func reflectKind(a reflect.Kind) string { return strip(strings.Title(a.String())) } func asType(a reflect.Kind) string { return clean(a.String()) } func sliceOf(a reflect.Kind) string { s := fmt.Sprintf("%ss()", strings.Title(a.String())) return strip(clean(s)) } func getOne(a reflect.Kind) string { return fmt.Sprintf("Get%s", short(a)) } func setOne(a reflect.Kind) string { return fmt.Sprintf("Set%s", short(a)) } func filter(a []reflect.Kind, is func(reflect.Kind) bool) (retVal []reflect.Kind) { for _, k := range a { if is(k) { retVal = append(retVal, k) } } return } tensor-0.9.24/genlib2/internaleng.go000066400000000000000000000414771426512615100173170ustar00rootroot00000000000000package main import ( "fmt" "io" "reflect" "strings" "text/template" ) type InternalEngArithMethod struct { BinOp Kinds []reflect.Kind Incr bool Iter bool WithRecv bool } type eLoopBody struct { BinOp Err bool Kinds []reflect.Kind } func (fn *InternalEngArithMethod) Name() string { switch { case fn.Incr && fn.Iter: return fmt.Sprintf("%sIterIncr", fn.BinOp.Name()) case fn.Incr && !fn.Iter: return fmt.Sprintf("%sIncr", fn.BinOp.Name()) case !fn.Incr && fn.Iter: return fmt.Sprintf("%sIter", fn.BinOp.Name()) case fn.WithRecv: return fmt.Sprintf("%sRecv", fn.BinOp.Name()) default: return fn.BinOp.Name() } } func (fn *InternalEngArithMethod) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.Iter && fn.Incr: paramNames = []string{"t", "a", "b", "incr", "ait", "bit", "iit"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, arrayType, iteratorType, iteratorType, iteratorType} case fn.Iter && !fn.Incr: paramNames = []string{"t", "a", "b", "ait", "bit"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, iteratorType, iteratorType} case !fn.Iter && fn.Incr: paramNames = []string{"t", "a", "b", "incr"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, arrayType} case fn.WithRecv: paramNames = []string{"t", "a", "b", "recv"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, arrayType} default: paramNames = []string{"t", "a", "b"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType} } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: true, } } func (fn *InternalEngArithMethod) WriteBody(w io.Writer) { var T *template.Template switch { case fn.Incr && fn.Iter: T = eArithIterIncr case fn.Incr && !fn.Iter: T = eArithIncr case fn.Iter && !fn.Incr: T = eArithIter case fn.WithRecv: T = eArithRecv default: T = eArith } lb := eLoopBody{ BinOp: fn.BinOp, Kinds: fn.Kinds, } T.Execute(w, lb) } func (fn *InternalEngArithMethod) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateEArith(f io.Writer, kinds Kinds) { var methods []*InternalEngArithMethod for _, bo := range arithBinOps { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := bo.TypeClass(); tc != nil && tc(k) { ks = append(ks, k) } } meth := &InternalEngArithMethod{ BinOp: bo, Kinds: ks, } methods = append(methods, meth) } // write vanilla for _, meth := range methods { meth.Write(f) meth.Incr = true } // write incr for _, meth := range methods { meth.Write(f) meth.Incr = false meth.Iter = true } // write iter for _, meth := range methods { meth.Write(f) meth.Incr = true } // write iter incr for _, meth := range methods { meth.Write(f) meth.Incr = false meth.Iter = false } // write recv for _, meth := range methods { meth.WithRecv = true meth.Write(f) } } /* MAP */ type InternalEngMap struct { Kinds []reflect.Kind Iter bool } func (fn *InternalEngMap) Signature() *Signature { paramNames := []string{"t", "fn", "a", "incr"} paramTemplates := []*template.Template{reflectType, interfaceType, arrayType, boolType} name := "Map" if fn.Iter { paramNames = append(paramNames, "ait") paramTemplates = append(paramTemplates, iteratorType) name += "Iter" } return &Signature{ Name: name, NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: true, } } func (fn *InternalEngMap) WriteBody(w io.Writer) { T := eMap if fn.Iter { T = eMapIter } template.Must(T.New("fntype0").Funcs(funcs).Parse(unaryFuncTypeRaw)) template.Must(T.New("fntype1").Funcs(funcs).Parse(unaryFuncErrTypeRaw)) lb := eLoopBody{ Kinds: fn.Kinds, } T.Execute(w, lb) } func (fn *InternalEngMap) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("\nreturn\n}\n\n")) } func generateEMap(f io.Writer, kinds Kinds) { m := new(InternalEngMap) for _, k := range kinds.Kinds { if isParameterized(k) { continue } m.Kinds = append(m.Kinds, k) } m.Write(f) m.Iter = true m.Write(f) } /* Cmp */ // InternalEngCmpMethod is exactly the same structure as the arith one, except it's Same instead of Incr. // Some copy and paste leads to more clarity, rather than reusing the structure. type InternalEngCmp struct { BinOp Kinds []reflect.Kind RetSame bool Iter bool } func (fn *InternalEngCmp) Name() string { switch { case fn.Iter && fn.RetSame: return fmt.Sprintf("%sSameIter", fn.BinOp.Name()) case fn.Iter && !fn.RetSame: return fmt.Sprintf("%sIter", fn.BinOp.Name()) case !fn.Iter && fn.RetSame: return fmt.Sprintf("%sSame", fn.BinOp.Name()) default: return fn.BinOp.Name() } } func (fn *InternalEngCmp) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.Iter && fn.RetSame: paramNames = []string{"t", "a", "b", "ait", "bit"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, iteratorType, iteratorType} case fn.Iter && !fn.RetSame: paramNames = []string{"t", "a", "b", "retVal", "ait", "bit", "rit"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, arrayType, iteratorType, iteratorType, iteratorType} case !fn.Iter && fn.RetSame: paramNames = []string{"t", "a", "b"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType} default: paramNames = []string{"t", "a", "b", "retVal"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, arrayType} } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: true, } } func (fn *InternalEngCmp) WriteBody(w io.Writer) { var T *template.Template switch { case fn.Iter && fn.RetSame: T = eCmpSameIter case fn.Iter && !fn.RetSame: T = eCmpBoolIter case !fn.Iter && fn.RetSame: T = eCmpSame default: T = eCmpBool } lb := eLoopBody{ BinOp: fn.BinOp, Kinds: fn.Kinds, } T.Execute(w, lb) } func (fn *InternalEngCmp) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateECmp(f io.Writer, kinds Kinds) { var methods []*InternalEngCmp for _, bo := range cmpBinOps { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := bo.TypeClass(); tc != nil && tc(k) { ks = append(ks, k) } } meth := &InternalEngCmp{ BinOp: bo, Kinds: ks, } methods = append(methods, meth) } for _, meth := range methods { meth.Write(f) meth.RetSame = true } for _, meth := range methods { meth.Write(f) meth.RetSame = false meth.Iter = true } for _, meth := range methods { meth.Write(f) meth.RetSame = true } for _, meth := range methods { meth.Write(f) } } /* MIN/MAX BETWEEN */ type InternalEngMinMaxBetween struct { BinOp Kinds []reflect.Kind Iter bool } func (fn *InternalEngMinMaxBetween) Name() string { name := fn.BinOp.Name() switch { case fn.Iter: return fmt.Sprintf("%sBetweenIter", name) default: return name + "Between" } } func (fn *InternalEngMinMaxBetween) Signature() *Signature { var paramNames []string var paramTemplates []*template.Template switch { case fn.Iter: paramNames = []string{"t", "a", "b", "ait", "bit"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, iteratorType, iteratorType} default: paramNames = []string{"t", "a", "b"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType} } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: true, } } func (fn *InternalEngMinMaxBetween) WriteBody(w io.Writer) { var T *template.Template switch { case fn.Iter: T = eMinMaxIter default: T = eMinMaxSame } lb := eLoopBody{ BinOp: fn.BinOp, Kinds: fn.Kinds, } T.Execute(w, lb) } func (fn *InternalEngMinMaxBetween) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateEMinMaxBetween(f io.Writer, kinds Kinds) { minmaxOps := []cmpOp{cmpBinOps[0], cmpBinOps[2]} // Gt and Lt minmaxOps[0].name = "Max" minmaxOps[1].name = "Min" var methods []*InternalEngMinMaxBetween for _, bo := range minmaxOps { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := bo.TypeClass(); tc != nil && tc(k) { ks = append(ks, k) } } meth := &InternalEngMinMaxBetween{ BinOp: bo, Kinds: ks, } methods = append(methods, meth) } for _, meth := range methods { meth.Write(f) meth.Iter = true } for _, meth := range methods { meth.Write(f) } } /* REDUCTION */ type InternalEngReduce struct { Kinds []reflect.Kind Dim int // 0 == first dim, -1 == last dim Flat bool } func (fn *InternalEngReduce) Name() string { switch { case fn.Flat: return "Reduce" case fn.Dim == 0: return "ReduceFirst" case fn.Dim < 0: return "ReduceLast" case fn.Dim > 0: return "ReduceDefault" } panic("unreachable") } func (fn *InternalEngReduce) Signature() *Signature { var paramNames, retVals []string var paramTemplates, retValTemplates []*template.Template switch { case fn.Flat: paramNames = []string{"t", "a", "defaultValue", "fn"} paramTemplates = []*template.Template{reflectType, arrayType, interfaceType, interfaceType} retVals = []string{"retVal"} retValTemplates = []*template.Template{interfaceType} case fn.Dim == 0: paramNames = []string{"t", "data", "retVal", "split", "size", "fn"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, intType, intType, interfaceType} case fn.Dim < 0: paramNames = []string{"t", "data", "retVal", "dimSize", "defaultValue", "fn"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, intType, interfaceType, interfaceType} case fn.Dim > 0: paramNames = []string{"t", "data", "retVal", "dim0", "dimSize", "outerStride", "stride", "expected", "fn"} paramTemplates = []*template.Template{reflectType, arrayType, arrayType, intType, intType, intType, intType, intType, interfaceType} } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, RetVals: retVals, RetValTemplates: retValTemplates, Err: true, } } func (fn *InternalEngReduce) WriteBody(w io.Writer) { var T *template.Template switch { case fn.Flat: T = eReduce case fn.Dim == 0: T = eReduceFirst case fn.Dim < 0: T = eReduceLast case fn.Dim > 0: T = eReduceDefault } T.Execute(w, fn) } func (fn *InternalEngReduce) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateEReduce(f io.Writer, kinds Kinds) { ks := filter(kinds.Kinds, isNotParameterized) fn := &InternalEngReduce{ Kinds: ks, } fn.Write(f) fn.Dim = -1 fn.Write(f) fn.Dim = 1 fn.Write(f) fn.Flat = true fn.Write(f) } /* UNARY */ type InternalEngUnary struct { UnaryOp Kinds []reflect.Kind Iter bool } func (fn *InternalEngUnary) Signature() *Signature { paramNames := []string{"t", "a"} paramTemplates := []*template.Template{reflectType, arrayType} if fn.Iter { paramNames = append(paramNames, "ait") paramTemplates = append(paramTemplates, iteratorType) } if strings.HasPrefix(fn.Name(), "Clamp") { paramNames = append(paramNames, "minVal", "maxVal") paramTemplates = append(paramTemplates, interfaceType, interfaceType) } return &Signature{ Name: fn.Name(), NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, Err: true, } } func (fn *InternalEngUnary) Name() string { n := fn.UnaryOp.Name() if fn.Iter { n += "Iter" } return n } func (fn *InternalEngUnary) WriteBody(w io.Writer) { var T *template.Template switch { case fn.Name() == "Clamp": T = eUnaryClamp case fn.Name() == "ClampIter": T = eUnaryClampIter case fn.Iter: T = eUnaryIter default: T = eUnary } T.Execute(w, fn) } func (fn *InternalEngUnary) Write(w io.Writer) { w.Write([]byte("func (e E) ")) sig := fn.Signature() sig.Write(w) w.Write([]byte("{ \n")) fn.WriteBody(w) w.Write([]byte("}\n\n")) } func generateUncondEUnary(f io.Writer, kinds Kinds) { var unaries []*InternalEngUnary for _, u := range unconditionalUnaries { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := u.TypeClass(); tc != nil && !tc(k) { continue } ks = append(ks, k) } ieu := &InternalEngUnary{ UnaryOp: u, Kinds: ks, } unaries = append(unaries, ieu) } for _, u := range unaries { u.Write(f) u.Iter = true } for _, u := range unaries { u.Write(f) } } func generateCondEUnary(f io.Writer, kinds Kinds) { var unaries []*InternalEngUnary for _, u := range conditionalUnaries { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := u.TypeClass(); tc != nil && !tc(k) { continue } // special case for complex if isComplex(k) { continue } ks = append(ks, k) } ieu := &InternalEngUnary{ UnaryOp: u, Kinds: ks, } unaries = append(unaries, ieu) } for _, u := range unaries { u.Write(f) u.Iter = true } for _, u := range unaries { u.Write(f) } } func generateSpecialEUnaries(f io.Writer, kinds Kinds) { var unaries []*InternalEngUnary for _, u := range specialUnaries { var ks []reflect.Kind for _, k := range kinds.Kinds { if tc := u.TypeClass(); tc != nil && !tc(k) { continue } ks = append(ks, k) } ieu := &InternalEngUnary{ UnaryOp: u, Kinds: ks, } unaries = append(unaries, ieu) } for _, u := range unaries { u.Write(f) u.Iter = true } for _, u := range unaries { u.Write(f) } } /* Argmethods */ type InternalEngArgMethod struct { Name string Masked bool Flat bool Kinds []reflect.Kind } func (fn *InternalEngArgMethod) Signature() *Signature { var name string var paramNames []string var paramTemplates []*template.Template var retVals []string var retValTemplates []*template.Template var err bool switch { case fn.Masked && fn.Flat: name = fmt.Sprintf("Arg%sFlatMasked", fn.Name) paramNames = []string{"t", "a", "mask"} paramTemplates = []*template.Template{reflectType, arrayType, boolsType} retVals = []string{"retVal"} retValTemplates = []*template.Template{intType} err = false case fn.Masked && !fn.Flat: name = fmt.Sprintf("Arg%sIterMasked", fn.Name) paramNames = []string{"t", "a", "mask", "it", "lastSize"} paramTemplates = []*template.Template{reflectType, arrayType, boolsType, iteratorType, intType} retVals = []string{"indices"} retValTemplates = []*template.Template{intsType} err = true case !fn.Masked && fn.Flat: name = fmt.Sprintf("Arg%sFlat", fn.Name) paramNames = []string{"t", "a"} paramTemplates = []*template.Template{reflectType, arrayType} retVals = []string{"retVal"} retValTemplates = []*template.Template{intType} err = false default: name = fmt.Sprintf("Arg%sIter", fn.Name) paramNames = []string{"t", "a", "it", "lastSize"} paramTemplates = []*template.Template{reflectType, arrayType, iteratorType, intType} retVals = []string{"indices"} retValTemplates = []*template.Template{intsType} err = true } return &Signature{ Name: name, NameTemplate: plainName, ParamNames: paramNames, ParamTemplates: paramTemplates, RetVals: retVals, RetValTemplates: retValTemplates, Err: err, } } func (fn *InternalEngArgMethod) WriteBody(w io.Writer) { switch { case fn.Masked && fn.Flat: eArgmaxFlatMasked.Execute(w, fn) case fn.Masked && !fn.Flat: eArgmaxMasked.Execute(w, fn) case !fn.Masked && fn.Flat: eArgmaxFlat.Execute(w, fn) default: eArgmax.Execute(w, fn) } } func (fn *InternalEngArgMethod) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func (e E) ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("}\n")) } func generateInternalEngArgmethods(f io.Writer, ak Kinds) { ks := filter(ak.Kinds, isOrd) meths := []*InternalEngArgMethod{ &InternalEngArgMethod{Name: "max", Kinds: ks}, &InternalEngArgMethod{Name: "min", Kinds: ks}, } // default for _, fn := range meths { fn.Write(f) fn.Masked = true } // masked for _, fn := range meths { fn.Write(f) fn.Flat = true } // flat masked for _, fn := range meths { fn.Write(f) fn.Flat = true fn.Masked = false } // flat for _, fn := range meths { fn.Write(f) } } tensor-0.9.24/genlib2/main.go000066400000000000000000000170521426512615100157250ustar00rootroot00000000000000package main import ( "io" "io/ioutil" "log" "os" "os/exec" "os/user" "path" "path/filepath" "reflect" "runtime" "strings" ) const genmsg = "Code generated by genlib2. DO NOT EDIT." var ( gopath, tensorPkgLoc, nativePkgLoc, execLoc, storageLoc string ) type Kinds struct { Kinds []reflect.Kind } func init() { gopath = os.Getenv("GOPATH") // now that go can have a default gopath, this checks that path if gopath == "" { usr, err := user.Current() if err != nil { log.Fatal(err) } gopath = path.Join(usr.HomeDir, "go") stat, err := os.Stat(gopath) if err != nil { log.Fatal(err) } if !stat.IsDir() { log.Fatal("You need to define a $GOPATH") } } tensorPkgLoc = path.Join(gopath, "src/gorgonia.org/tensor") nativePkgLoc = path.Join(gopath, "src/gorgonia.org/tensor/native") execLoc = path.Join(gopath, "src/gorgonia.org/tensor/internal/execution") storageLoc = path.Join(gopath, "src/gorgonia.org/tensor/internal/storage") } func main() { pregenerate() // storage pipeline(storageLoc, "consts.go", Kinds{allKinds}, generateReflectTypes) pipeline(storageLoc, "getset.go", Kinds{allKinds}, generateHeaderGetSet) pipeline(tensorPkgLoc, "array_getset.go", Kinds{allKinds}, generateArrayMethods) // execution pipeline(execLoc, "generic_arith_vv.go", Kinds{allKinds}, generateGenericVecVecArith) pipeline(execLoc, "generic_arith_mixed.go", Kinds{allKinds}, generateGenericMixedArith) // pipeline(execLoc, "generic_arith.go", Kinds{allKinds}, generateGenericScalarScalarArith) // generate once and manually edit later pipeline(execLoc, "generic_cmp_vv.go", Kinds{allKinds}, generateGenericVecVecCmp) pipeline(execLoc, "generic_cmp_mixed.go", Kinds{allKinds}, generateGenericMixedCmp) pipeline(execLoc, "generic_minmax.go", Kinds{allKinds}, generateMinMax) pipeline(execLoc, "generic_map.go", Kinds{allKinds}, generateGenericMap) pipeline(execLoc, "generic_unary.go", Kinds{allKinds}, generateGenericUncondUnary, generateGenericCondUnary, generateSpecialGenericUnaries) pipeline(execLoc, "generic_reduce.go", Kinds{allKinds}, generateGenericReduce) pipeline(execLoc, "generic_argmethods.go", Kinds{allKinds}, generateGenericArgMethods) pipeline(tensorPkgLoc, "generic_utils.go", Kinds{allKinds}, generateUtils) // level 1 aggregation pipeline(execLoc, "eng_arith.go", Kinds{allKinds}, generateEArith) pipeline(execLoc, "eng_map.go", Kinds{allKinds}, generateEMap) pipeline(execLoc, "eng_cmp.go", Kinds{allKinds}, generateECmp) pipeline(execLoc, "eng_minmaxbetween.go", Kinds{allKinds}, generateEMinMaxBetween) pipeline(execLoc, "eng_reduce.go", Kinds{allKinds}, generateEReduce) pipeline(execLoc, "eng_unary.go", Kinds{allKinds}, generateUncondEUnary, generateCondEUnary, generateSpecialEUnaries) pipeline(execLoc, "reduction_specialization.go", Kinds{allKinds}, generateReductionSpecialization) pipeline(execLoc, "eng_argmethods.go", Kinds{allKinds}, generateInternalEngArgmethods) // level 2 aggregation pipeline(tensorPkgLoc, "defaultengine_arith.go", Kinds{allKinds}, generateStdEngArith) pipeline(tensorPkgLoc, "defaultengine_cmp.go", Kinds{allKinds}, generateStdEngCmp) pipeline(tensorPkgLoc, "defaultengine_unary.go", Kinds{allKinds}, generateStdEngUncondUnary, generateStdEngCondUnary) pipeline(tensorPkgLoc, "defaultengine_minmax.go", Kinds{allKinds}, generateStdEngMinMax) // level 3 aggregation pipeline(tensorPkgLoc, "dense_arith.go", Kinds{allKinds}, generateDenseArith) pipeline(tensorPkgLoc, "dense_cmp.go", Kinds{allKinds}, generateDenseCmp) // generate once, manually edit later // level 4 aggregation pipeline(tensorPkgLoc, "api_unary.go", Kinds{allKinds}, generateUncondUnaryAPI, generateCondUnaryAPI, generateSpecialUnaryAPI) // dense methods (old genlib style) pipeline(tensorPkgLoc, "dense_generated.go", Kinds{allKinds}, generateDenseConstructionFns) pipeline(tensorPkgLoc, "dense_io.go", Kinds{allKinds}, generateDenseIO) pipeline(tensorPkgLoc, "dense_compat.go", Kinds{allKinds}, generateDenseCompat) pipeline(tensorPkgLoc, "dense_maskcmp_methods.go", Kinds{allKinds}, generateDenseMaskedMethods) // tests pipeline(tensorPkgLoc, "test_test.go", Kinds{allKinds}, generateTestUtils) pipeline(tensorPkgLoc, "dense_argmethods_test.go", Kinds{allKinds}, generateArgmethodsTests) pipeline(tensorPkgLoc, "dense_getset_test.go", Kinds{allKinds}, generateDenseGetSetTests) // old-genlib style tests pipeline(tensorPkgLoc, "dense_reduction_test.go", Kinds{allKinds}, generateDenseReductionTests, generateDenseReductionMethodsTests) pipeline(tensorPkgLoc, "dense_compat_test.go", Kinds{allKinds}, generateDenseCompatTests) pipeline(tensorPkgLoc, "dense_generated_test.go", Kinds{allKinds}, generateDenseConsTests) pipeline(tensorPkgLoc, "dense_maskcmp_methods_test.go", Kinds{allKinds}, generateMaskCmpMethodsTests) // qc-style tests pipeline(tensorPkgLoc, "api_arith_generated_test.go", Kinds{allKinds}, generateAPIArithTests, generateAPIArithScalarTests) pipeline(tensorPkgLoc, "dense_arith_test.go", Kinds{allKinds}, generateDenseMethodArithTests, generateDenseMethodScalarTests) pipeline(tensorPkgLoc, "api_unary_generated_test.go", Kinds{allKinds}, generateAPIUnaryTests) pipeline(tensorPkgLoc, "api_cmp_generated_test.go", Kinds{allKinds}, generateAPICmpTests, generateAPICmpMixedTests) pipeline(tensorPkgLoc, "dense_cmp_test.go", Kinds{allKinds}, generateDenseMethodCmpTests, generateDenseMethodCmpMixedTests) // native iterators pipeline(nativePkgLoc, "iterator_native.go", Kinds{allKinds}, generateNativeIterators) pipeline(nativePkgLoc, "iterator_native_test.go", Kinds{allKinds}, generateNativeIteratorTests) pipeline(nativePkgLoc, "iterator_native2.go", Kinds{allKinds}, generateNativeSelect) pipeline(nativePkgLoc, "iterator_native2_test.go", Kinds{allKinds}, generateNativeSelectTests) } func pipeline(pkg, filename string, kinds Kinds, fns ...func(io.Writer, Kinds)) { fullpath := path.Join(pkg, filename) f, err := os.Create(fullpath) if err != nil { log.Printf("fullpath %q", fullpath) log.Fatal(err) } defer f.Close() writePkgName(f, pkg) for _, fn := range fns { fn(f, kinds) } // gofmt and goimports this stuff cmd := exec.Command("goimports", "-w", fullpath) if err = cmd.Run(); err != nil { log.Fatalf("Go imports failed with %v for %q", err, fullpath) } // account for differences in the postix from the linux sed if runtime.GOOS == "darwin" || strings.HasSuffix(runtime.GOOS, "bsd") { cmd = exec.Command("sed", "-i", "", `s/github.com\/alecthomas\/assert/github.com\/stretchr\/testify\/assert/g`, fullpath) } else { cmd = exec.Command("sed", "-E", "-i", `s/github.com\/alecthomas\/assert/github.com\/stretchr\/testify\/assert/g`, fullpath) } if err = cmd.Run(); err != nil { if err.Error() != "exit status 4" { // exit status 4 == not found log.Fatalf("sed failed with %v for %q", err.Error(), fullpath) } } cmd = exec.Command("gofmt", "-s", "-w", fullpath) if err = cmd.Run(); err != nil { log.Fatalf("Gofmt failed for %q", fullpath) } } // pregenerate cleans up all files that were previously generated. func pregenerate() error { if err := cleanup(storageLoc); err != nil { return err } if err := cleanup(execLoc); err != nil { return err } if err := cleanup(nativePkgLoc); err != nil { return err } return cleanup(tensorPkgLoc) } func cleanup(loc string) error { pattern := path.Join(loc, "*.go") matches, err := filepath.Glob(pattern) if err != nil { return err } for _, m := range matches { b, err := ioutil.ReadFile(m) if err != nil { return err } s := string(b) if strings.Contains(s, genmsg) { if err := os.Remove(m); err != nil { return err } } } return nil } tensor-0.9.24/genlib2/native_iterator.go000066400000000000000000000112171426512615100201750ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const checkNativeiterable = `func checkNativeIterable(t *Dense, dims int, dt Dtype) error { // checks: if !t.IsNativelyAccessible() { return errors.Errorf("Cannot convert *Dense to *mat.Dense. Data is inaccessible") } if t.Shape().Dims() != dims { return errors.Errorf("Cannot convert *Dense to native iterator. Expected number of dimension: %d, T has got %d dimensions (Shape: %v)", dims, t.Dims(), t.Shape()) } if t.F() || t.RequiresIterator() { return errors.Errorf("Not yet implemented: native matrix for colmajor or unpacked matrices") } if t.Dtype() != dt { return errors.Errorf("Conversion to native iterable only works on %v. Got %v", dt, t.Dtype()) } return nil } ` const nativeIterRaw = `// Vector{{short .}} converts a *Dense into a []{{asType .}} // If the *Dense does not represent a vector of the wanted type, it will return // an error. func Vector{{short .}}(t *Dense) (retVal []{{asType .}}, err error) { if err = checkNativeIterable(t, 1, {{reflectKind .}}); err != nil { return nil, err } return t.{{sliceOf .}}, nil } // Matrix{{short .}} converts a *Dense into a [][]{{asType .}} // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func Matrix{{short .}}(t *Dense) (retVal [][]{{asType .}}, err error) { if err = checkNativeIterable(t, 2, {{reflectKind .}}); err != nil { return nil, err } data := t.{{sliceOf .}} shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]{{asType .}}, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]{{asType .}}, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3{{short .}} converts a *Dense into a [][][]{{asType .}}. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3{{short .}}(t *Dense) (retVal [][][]{{asType .}}, err error) { if err = checkNativeIterable(t, 3, {{reflectKind .}}); err != nil { return nil, err } data := t.{{sliceOf .}} shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]{{asType .}}, layers) for i := range retVal { retVal[i] = make([][]{{asType .}}, rows) for j := range retVal[i] { retVal[i][j] = make([]{{asType .}}, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } ` const nativeIterTestRaw = `func Test_Vector{{short .}}(t *testing.T) { assert := assert.New(t) var T *Dense {{if isRangeable . -}} T = New(WithBacking(Range({{reflectKind .}}, 0, 6)), WithShape(6)) {{else -}} T = New(Of({{reflectKind .}}), WithShape(6)) {{end -}} it, err := Vector{{short .}}(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_Matrix{{short .}}(t *testing.T) { assert := assert.New(t) var T *Dense {{if isRangeable . -}} T = New(WithBacking(Range({{reflectKind .}}, 0, 6)), WithShape(2, 3)) {{else -}} T = New(Of({{reflectKind .}}), WithShape(2, 3)) {{end -}} it, err := Matrix{{short .}}(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3{{short .}}(t *testing.T) { assert := assert.New(t) var T *Dense {{if isRangeable . -}} T = New(WithBacking(Range({{reflectKind .}}, 0, 24)), WithShape(2, 3, 4)) {{else -}} T = New(Of({{reflectKind .}}), WithShape(2, 3, 4)) {{end -}} it, err := Tensor3{{short .}}(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } ` var ( NativeIter *template.Template NativeIterTest *template.Template ) func init() { NativeIter = template.Must(template.New("NativeIter").Funcs(funcs).Parse(nativeIterRaw)) NativeIterTest = template.Must(template.New("NativeIterTest").Funcs(funcs).Parse(nativeIterTestRaw)) } func generateNativeIterators(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) fmt.Fprintf(f, "%v\n", checkNativeiterable) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { fmt.Fprintf(f, "/* Native Iterables for %v */\n\n", k) NativeIter.Execute(f, k) fmt.Fprint(f, "\n\n") } } func generateNativeIteratorTests(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { NativeIterTest.Execute(f, k) fmt.Fprint(f, "\n\n") } } tensor-0.9.24/genlib2/native_select.go000066400000000000000000000073651426512615100176340ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const checkNativeSelectable = `func checkNativeSelectable(t *Dense, axis int, dt Dtype) error { if !t.IsNativelyAccessible() { return errors.New("Cannot select on non-natively accessible data") } if axis >= t.Shape().Dims() && !(t.IsScalar() && axis == 0) { return errors.Errorf("Cannot select on axis %d. Shape is %v", axis, t.Shape()) } if t.F() || t.RequiresIterator() { return errors.Errorf("Not yet implemented: native select for colmajor or unpacked matrices") } if t.Dtype() != dt { return errors.Errorf("Native selection only works on %v. Got %v", dt, t.Dtype()) } return nil } ` const nativeSelectRaw = `// Select{{short .}} creates a slice of flat data types. See Example of NativeSelectF64. func Select{{short .}}(t *Dense, axis int) (retVal [][]{{asType .}}, err error) { if err := checkNativeSelectable(t, axis, {{reflectKind .}}); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]{{asType .}}, 1) retVal[0] = t.{{sliceOf .}} case 2: if axis == 0 { return Matrix{{short .}}(t) } fallthrough default: // size := t.Shape()[axis] data := t.{{sliceOf .}} stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]{{asType .}}, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]{{asType .}}, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } ` const nativeSelectTestRaw = `func TestSelect{{short .}}(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]{{asType .}} T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3), ) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3), ) if x, err = Select{{short .}}(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar({{if eq .String "bool" -}}false{{else if eq .String "string" -}}""{{else -}}{{asType .}}(0) {{end -}} )) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = Select{{short .}}(T, 10); err == nil{ t.Fatal("Expected errors") } } ` var ( NativeSelect *template.Template NativeSelectTest *template.Template ) func init() { NativeSelect = template.Must(template.New("NativeSelect").Funcs(funcs).Parse(nativeSelectRaw)) NativeSelectTest = template.Must(template.New("NativeSelectTest").Funcs(funcs).Parse(nativeSelectTestRaw)) } func generateNativeSelect(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) fmt.Fprintf(f, "%v\n", checkNativeSelectable) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { fmt.Fprintf(f, "/* Native Select for %v */\n\n", k) NativeSelect.Execute(f, k) fmt.Fprint(f, "\n\n") } } func generateNativeSelectTests(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { NativeSelectTest.Execute(f, k) fmt.Fprint(f, "\n\n") } } tensor-0.9.24/genlib2/op.go000066400000000000000000000041011426512615100154060ustar00rootroot00000000000000package main import "reflect" type Op interface { Name() string Arity() int SymbolTemplate() string TypeClass() TypeClass IsFunc() bool } type TypedOp interface { Op Kind() reflect.Kind } type BinOp interface { Op IsBinOp() } type UnaryOp interface { Op IsUnaryOp() } type RetSamer interface { RetSame() bool } type basicBinOp struct { symbol string name string isFunc bool is TypeClass } type arithOp struct { basicBinOp TypeClassName string HasIdentity bool Identity int IsInv bool Inv string IsCommutative bool IsInvolutionary bool } type cmpOp struct { basicBinOp TypeClassName string Inv string IsTransitive bool IsSymmetric bool } func (op basicBinOp) Name() string { return op.name } func (op basicBinOp) Arity() int { return 2 } func (op basicBinOp) SymbolTemplate() string { return op.symbol } func (op basicBinOp) TypeClass() TypeClass { return op.is } func (op basicBinOp) IsFunc() bool { return op.isFunc } func (op basicBinOp) IsBinOp() {} type TypedBinOp struct { BinOp k reflect.Kind } // IsFunc contains special conditions func (op TypedBinOp) IsFunc() bool { if op.Name() == "Mod" && isFloatCmplx(op.k) { return true } return op.BinOp.IsFunc() } func (op TypedBinOp) Kind() reflect.Kind { return op.k } type unaryOp struct { symbol string name string isfunc bool is TypeClass TypeClassName string // mainly used in tests Inv string } func (op unaryOp) Name() string { return op.name } func (op unaryOp) Arity() int { return 1 } func (op unaryOp) SymbolTemplate() string { return op.symbol } func (op unaryOp) TypeClass() TypeClass { return op.is } func (op unaryOp) IsFunc() bool { return op.isfunc } func (op unaryOp) IsUnaryOp() {} type TypedUnaryOp struct { UnaryOp k reflect.Kind } func (op TypedUnaryOp) Kind() reflect.Kind { return op.k } type specialUnaryOp struct { unaryOp additionalParams []string } type Level int const ( Basic Level = iota InternalE StdEng Dense API ) tensor-0.9.24/genlib2/package.go000066400000000000000000000010321426512615100163630ustar00rootroot00000000000000package main import ( "fmt" "io" ) func writePkgName(f io.Writer, pkg string) { switch pkg { case tensorPkgLoc: fmt.Fprintf(f, "// %s\n\npackage tensor\n\n", genmsg) case nativePkgLoc: fmt.Fprintf(f, "// %s\n\npackage native\n\n", genmsg) case execLoc: fmt.Fprintf(f, "// %s\n\npackage execution\n\n", genmsg) case storageLoc: fmt.Fprintf(f, "// %s\n\npackage storage\n\n", genmsg) default: fmt.Fprintf(f, "// %s\n\npackage unknown\n\n", genmsg) } } const importUnqualifiedTensor = `import . "gorgonia.org/tensor" ` tensor-0.9.24/genlib2/reduction_specialization.go000066400000000000000000000036571426512615100221010ustar00rootroot00000000000000package main import ( "io" "reflect" "text/template" ) type ReductionOp struct { OpName string VecVec string // sum(a, b []T) OpOfVec string // sum([]T) GenericName string // sum(T, T) T Kinds []reflect.Kind Typeclass TypeClass } var reductionOps = []ReductionOp{ {OpName: "Sum", VecVec: "VecAdd", OpOfVec: "Sum", GenericName: "Add", Typeclass: isNumber}, {OpName: "Max", VecVec: "VecMax", OpOfVec: "SliceMax", GenericName: "Max", Typeclass: isNonComplexNumber}, {OpName: "Min", VecVec: "VecMin", OpOfVec: "SliceMin", GenericName: "Min", Typeclass: isNonComplexNumber}, } const reductionSpecializationRaw = `func Monotonic{{.OpName | title}}(t reflect.Type, a *storage.Header) (retVal interface{}, err error) { switch t { {{$opOfVec := .OpOfVec -}} {{range .Kinds -}} {{if isNumber . -}} case {{reflectKind .}}: retVal = {{$opOfVec}}{{short .}}(a.{{sliceOf .}}) return {{end -}} {{end -}} default: err = errors.Errorf("Cannot perform {{.OpName}} on %v", t) return } } func {{.OpName | title}}Methods(t reflect.Type)(firstFn, lasFn, defaultFn interface{}, err error) { {{$vecVec := .VecVec -}} {{$opOfVec := .OpOfVec -}} {{$genericName := .GenericName -}} switch t { {{range .Kinds -}} {{if isNumber . -}} case {{reflectKind .}}: return {{$vecVec}}{{short .}}, {{$opOfVec}}{{short .}}, {{$genericName}}{{short .}}, nil {{end -}} {{end -}} default: return nil, nil, nil, errors.Errorf("No methods found for {{.OpName}} for %v", t) } } ` var reductionSpecialization *template.Template func init() { reductionSpecialization = template.Must(template.New("reduction specialization").Funcs(funcs).Parse(reductionSpecializationRaw)) } func generateReductionSpecialization(f io.Writer, ak Kinds) { for _, op := range reductionOps { for _, k := range ak.Kinds { if !op.Typeclass(k) { continue } op.Kinds = append(op.Kinds, k) } reductionSpecialization.Execute(f, op) } } tensor-0.9.24/genlib2/signature.go000066400000000000000000000105641426512615100170030ustar00rootroot00000000000000package main import ( "io" "reflect" "text/template" ) type Signature struct { Name string NameTemplate *template.Template ParamNames []string ParamTemplates []*template.Template RetVals []string RetValTemplates []*template.Template Kind reflect.Kind Err bool } func (s *Signature) Write(w io.Writer) { s.NameTemplate.Execute(w, s) w.Write([]byte("(")) for i, p := range s.ParamTemplates { w.Write([]byte(s.ParamNames[i])) w.Write([]byte(" ")) p.Execute(w, s.Kind) if i < len(s.ParamNames) { w.Write([]byte(", ")) } } w.Write([]byte(")")) if len(s.RetVals) > 0 { w.Write([]byte("(")) for i, r := range s.RetValTemplates { w.Write([]byte(s.RetVals[i])) w.Write([]byte(" ")) r.Execute(w, s.Kind) if i < len(s.RetVals) { w.Write([]byte(", ")) } } if s.Err { w.Write([]byte("err error")) } w.Write([]byte(")")) return } if s.Err { w.Write([]byte("(err error)")) } } const ( golinkPragmaRaw = "//go:linkname {{.Name}}{{short .Kind}} github.com/chewxy/{{vecPkg .Kind}}{{getalias .Name}}\n" typeAnnotatedNameRaw = `{{.Name}}{{short .Kind}}` plainNameRaw = `{{.Name}}` ) const ( scalarTypeRaw = `{{asType .}}` sliceTypeRaw = `[]{{asType .}}` iteratorTypeRaw = `Iterator` interfaceTypeRaw = "interface{}" boolsTypeRaw = `[]bool` boolTypeRaw = `bool` intTypeRaw = `int` intsTypeRaw = `[]int` reflectTypeRaw = `reflect.Type` // arrayTypeRaw = `Array` arrayTypeRaw = `*storage.Header` unaryFuncTypeRaw = `func({{asType .}}){{asType .}} ` unaryFuncErrTypeRaw = `func({{asType .}}) ({{asType .}}, error)` reductionFuncTypeRaw = `func(a, b {{asType .}}) {{asType .}}` reductionFuncTypeErrRaw = `func(a, b {{asType .}}) ({{asType .}}, error)` tensorTypeRaw = `Tensor` splatFuncOptTypeRaw = `...FuncOpt` denseTypeRaw = `*Dense` testingTypeRaw = `*testing.T` ) var ( golinkPragma *template.Template typeAnnotatedName *template.Template plainName *template.Template scalarType *template.Template sliceType *template.Template iteratorType *template.Template interfaceType *template.Template boolsType *template.Template boolType *template.Template intType *template.Template intsType *template.Template reflectType *template.Template arrayType *template.Template unaryFuncType *template.Template unaryFuncErrType *template.Template tensorType *template.Template splatFuncOptType *template.Template denseType *template.Template testingType *template.Template ) func init() { golinkPragma = template.Must(template.New("golinkPragma").Funcs(funcs).Parse(golinkPragmaRaw)) typeAnnotatedName = template.Must(template.New("type annotated name").Funcs(funcs).Parse(typeAnnotatedNameRaw)) plainName = template.Must(template.New("plainName").Funcs(funcs).Parse(plainNameRaw)) scalarType = template.Must(template.New("scalarType").Funcs(funcs).Parse(scalarTypeRaw)) sliceType = template.Must(template.New("sliceType").Funcs(funcs).Parse(sliceTypeRaw)) iteratorType = template.Must(template.New("iteratorType").Funcs(funcs).Parse(iteratorTypeRaw)) interfaceType = template.Must(template.New("interfaceType").Funcs(funcs).Parse(interfaceTypeRaw)) boolsType = template.Must(template.New("boolsType").Funcs(funcs).Parse(boolsTypeRaw)) boolType = template.Must(template.New("boolType").Funcs(funcs).Parse(boolTypeRaw)) intType = template.Must(template.New("intTYpe").Funcs(funcs).Parse(intTypeRaw)) intsType = template.Must(template.New("intsType").Funcs(funcs).Parse(intsTypeRaw)) reflectType = template.Must(template.New("reflectType").Funcs(funcs).Parse(reflectTypeRaw)) arrayType = template.Must(template.New("arrayType").Funcs(funcs).Parse(arrayTypeRaw)) unaryFuncType = template.Must(template.New("unaryFuncType").Funcs(funcs).Parse(unaryFuncTypeRaw)) unaryFuncErrType = template.Must(template.New("unaryFuncErrType").Funcs(funcs).Parse(unaryFuncErrTypeRaw)) tensorType = template.Must(template.New("tensorType").Funcs(funcs).Parse(tensorTypeRaw)) splatFuncOptType = template.Must(template.New("splatFuncOpt").Funcs(funcs).Parse(splatFuncOptTypeRaw)) denseType = template.Must(template.New("*Dense").Funcs(funcs).Parse(denseTypeRaw)) testingType = template.Must(template.New("*testing.T").Funcs(funcs).Parse(testingTypeRaw)) } tensor-0.9.24/genlib2/testutils.go000066400000000000000000000141741426512615100170430ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const anyToF64sRaw = `func anyToFloat64s(x interface{}) (retVal []float64) { switch xt := x.(type) { {{range .Kinds -}} {{if isNumber . -}} case []{{asType .}}: {{if eq .String "float64" -}} {{else if eq .String "float32" -}} retVal = make([]float64, len(xt)) for i, v := range xt { switch { case math32.IsNaN(v): retVal[i] = math.NaN() case math32.IsInf(v, 1): retVal[i] = math.Inf(1) case math32.IsInf(v, -1): retVal[i] = math.Inf(-1) default: retVal[i] = float64(v) } } {{else if eq .String "complex64" -}} retVal = make([]float64, len(xt)) for i, v := range xt { switch { case cmplx.IsNaN(complex128(v)): retVal[i] = math.NaN() case cmplx.IsInf(complex128(v)): retVal[i] = math.Inf(1) default: retVal[i] = float64(real(v)) } } {{else if eq .String "complex128" -}} retVal = make([]float64, len(xt)) for i, v := range xt { switch { case cmplx.IsNaN(v): retVal[i] = math.NaN() case cmplx.IsInf(v): retVal[i] = math.Inf(1) default: retVal[i] = real(v) } } {{else -}} retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i]= float64(v) } {{end -}} return {{if eq .String "float64"}}xt{{end}} {{end -}} {{end -}} } panic("Unreachable") } ` const qcGenraw = `func randomQC(a Tensor, r *rand.Rand) { switch a.Dtype() { {{range .Kinds -}} {{if isParameterized . -}} {{else -}} case {{reflectKind . -}}: s := a.Data().([]{{asType .}}) for i := range s { {{if hasPrefix .String "uint" -}} s[i] = {{asType .}}(r.Uint32()) {{else if hasPrefix .String "int" -}} s[i] = {{asType .}}(r.Int()) {{else if eq .String "float64" -}} s[i] = r.Float64() {{else if eq .String "float32" -}} s[i] = r.Float32() {{else if eq .String "complex64" -}} s[i] = complex(r.Float32(), r.Float32()) {{else if eq .String "complex128" -}} s[i] = complex(r.Float64(), r.Float64()) {{else if eq .String "bool" -}} s[i] = randomBool() {{else if eq .String "string" -}} s[i] = randomString() {{else if eq .String "unsafe.Pointer" -}} s[i] = nil {{end -}} } {{end -}} {{end -}} } } ` const testQCRaw = `type QCDense{{short .}} struct { *Dense } func (*QCDense{{short .}}) Generate(r *rand.Rand, size int) reflect.Value { s := make([]{{asType .}}, size) for i := range s { {{if hasPrefix .String "uint" -}} s[i] = {{asType .}}(r.Uint32()) {{else if hasPrefix .String "int" -}} s[i] = {{asType .}}(r.Int()) {{else if eq .String "float64" -}} s[i] = r.Float64() {{else if eq .String "float32" -}} s[i] = r.Float32() {{else if eq .String "complex64" -}} s[i] = complex(r.Float32(), r.Float32()) {{else if eq .String "complex128" -}} s[i] = complex(r.Float64(), r.Float64()) {{else if eq .String "bool" -}} s[i] = randomBool() {{else if eq .String "string" -}} s[i] = randomString() {{else if eq .String "unsafe.Pointer" -}} s[i] = nil {{end -}} } d := recycledDense({{asType . | title | strip}}, Shape{size}, WithBacking(s)) q := new(QCDense{{short .}}) q.Dense = d return reflect.ValueOf(q) } ` const identityFnsRaw = `func identity{{short .}}(a {{asType .}}) {{asType .}}{return a} ` const mutateFnsRaw = `func mutate{{short .}}(a {{asType . }}){{asType .}} { {{if isNumber . -}}return 1} {{else if eq .String "bool" -}}return true } {{else if eq .String "string" -}}return "Hello World"} {{else if eq .String "uintptr" -}}return 0xdeadbeef} {{else if eq .String "unsafe.Pointer" -}}return unsafe.Pointer(uintptr(0xdeadbeef))} {{end -}} ` const identityValsRaw = `func identityVal(x int, dt Dtype) interface{} { switch dt { {{range .Kinds -}} case {{reflectKind .}}: return {{asType .}}(x) {{end -}} case Complex64: var c complex64 if x == 0 { return c } c = 1 return c case Complex128: var c complex128 if x == 0 { return c } c = 1 return c case Bool: if x == 0 { return false } return true case String: if x == 0 { return "" } return fmt.Sprintf("%v", x) default: return x } }` const threewayEqualityRaw = `func threewayEq(a, b, c interface{}) bool { switch at := a.(type){ {{range .Kinds -}} case []{{asType .}}: bt := b.([]{{asType .}}) ct := c.([]{{asType .}}) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true {{end -}} {{range .Kinds -}} case {{asType .}}: bt := b.({{asType .}}) ct := c.({{asType .}}) if (at == 1 && bt == 1) && ct != 1 { return false } return true {{end -}} } return false } ` var ( anyToF64s *template.Template qcGen *template.Template testQC *template.Template identityFns *template.Template mutateFns *template.Template identityVals *template.Template threewayEquality *template.Template ) func init() { qcGen = template.Must(template.New("QCGen").Funcs(funcs).Parse(qcGenraw)) testQC = template.Must(template.New("testQCs").Funcs(funcs).Parse(testQCRaw)) anyToF64s = template.Must(template.New("anyToF64s").Funcs(funcs).Parse(anyToF64sRaw)) identityFns = template.Must(template.New("identityFn").Funcs(funcs).Parse(identityFnsRaw)) mutateFns = template.Must(template.New("mutateFns").Funcs(funcs).Parse(mutateFnsRaw)) identityVals = template.Must(template.New("identityVal").Funcs(funcs).Parse(identityValsRaw)) threewayEquality = template.Must(template.New("threeway").Funcs(funcs).Parse(threewayEqualityRaw)) } func generateTestUtils(f io.Writer, ak Kinds) { anyToF64s.Execute(f, ak) fmt.Fprintf(f, "\n") ak2 := Kinds{Kinds: filter(ak.Kinds, isNonComplexNumber)} identityVals.Execute(f, ak2) fmt.Fprintf(f, "\n") ak3 := Kinds{Kinds: filter(ak.Kinds, isNumber)} threewayEquality.Execute(f, ak3) fmt.Fprintf(f, "\n") for _, k := range ak.Kinds { if !isParameterized(k) { identityFns.Execute(f, k) } } for _, k := range ak.Kinds { if !isParameterized(k) { mutateFns.Execute(f, k) } } fmt.Fprintf(f, "\n") // for _, k := range ak.Kinds { // if !isParameterized(k) { // testQC.Execute(f, k) // fmt.Fprint(f, "\n") // } // } } tensor-0.9.24/genlib2/unary_tests.go000066400000000000000000000067761426512615100173740ustar00rootroot00000000000000package main import ( "fmt" "io" "text/template" ) const unaryTestBodyRaw = `invFn := func(q *Dense) bool { a := q.Clone().(*Dense) {{template "funcoptdecl" -}} correct := a.Clone().(*Dense) {{template "funcoptcorrect" -}} we, willFailEq := willerr(a, {{.TypeClassName}}, {{.EqFailTypeClassName}}) _, ok := q.Engine().({{interfaceName .Name}}); we = we || !ok ret, err := {{.Name}}(a {{template "funcoptuse"}}) if err, retEarly := qcErrCheck(t, "{{.Name}}", a, nil, we, err); retEarly{ if err != nil { return false } return true } {{if ne .InvTypeClass "" -}} if err := typeclassCheck(a.Dtype(), {{.InvTypeClass}}); err != nil { return true // uninvertible due to type class implementation issues } {{end -}} {{if eq .FuncOpt "incr" -}} if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()) ; err != nil { t.Errorf("err while subtracting incr: %v", err) return false } {{end -}} {{.Inv}}(ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { return false } {{template "funcoptcheck" -}} return true } if err := quick.Check(invFn, &quick.Config{Rand:newRand(), MaxCount: quickchecks}); err != nil{ t.Errorf("Inv tests for {{.Name}} failed: %v", err) } ` type unaryTest struct { unaryOp FuncOpt string EqFailTypeClassName string InvTypeClass string } func (fn *unaryTest) Name() string { if fn.unaryOp.Name() == "Eq" || fn.unaryOp.Name() == "Ne" { return "El" + fn.unaryOp.Name() } return fn.unaryOp.Name() } func (fn *unaryTest) Signature() *Signature { name := fmt.Sprintf("Test%s", fn.unaryOp.Name()) if fn.FuncOpt != "" { name += "_" + fn.FuncOpt } return &Signature{ Name: name, NameTemplate: plainName, ParamNames: []string{"t"}, ParamTemplates: []*template.Template{testingType}, } } func (fn *unaryTest) WriteBody(w io.Writer) { t := template.Must(template.New("unary test body").Funcs(funcs).Parse(unaryTestBodyRaw)) template.Must(t.New("funcoptdecl").Parse(funcOptDecl[fn.FuncOpt])) template.Must(t.New("funcoptcorrect").Parse(funcOptCorrect[fn.FuncOpt])) template.Must(t.New("funcoptuse").Parse(funcOptUse[fn.FuncOpt])) template.Must(t.New("funcoptcheck").Parse(funcOptCheck[fn.FuncOpt])) t.Execute(w, fn) } func (fn *unaryTest) canWrite() bool { return fn.Inv != "" } func (fn *unaryTest) Write(w io.Writer) { sig := fn.Signature() w.Write([]byte("func ")) sig.Write(w) w.Write([]byte("{\n")) fn.WriteBody(w) w.Write([]byte("}\n")) } func generateAPIUnaryTests(f io.Writer, ak Kinds) { var tests []*unaryTest for _, op := range conditionalUnaries { t := &unaryTest{ unaryOp: op, EqFailTypeClassName: "nil", } tests = append(tests, t) } for _, op := range unconditionalUnaries { t := &unaryTest{ unaryOp: op, EqFailTypeClassName: "nil", } switch op.name { case "Square": t.InvTypeClass = "floatcmplxTypes" case "Cube": t.InvTypeClass = "floatTypes" } tests = append(tests, t) } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "unsafe" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "reuse" } for _, fn := range tests { if fn.canWrite() { fn.Write(f) } fn.FuncOpt = "incr" } // for now incr cannot be quickchecked for _, fn := range tests { if fn.canWrite() { fn.Write(f) } } } tensor-0.9.24/go.mod000066400000000000000000000015071426512615100142340ustar00rootroot00000000000000module gorgonia.org/tensor go 1.18 require ( github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc github.com/chewxy/hm v1.0.0 github.com/chewxy/math32 v1.0.8 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.4.3 github.com/google/flatbuffers v1.12.0 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.6.1 go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 gonum.org/v1/gonum v0.8.2 gorgonia.org/vecf32 v0.9.0 gorgonia.org/vecf64 v0.9.0 ) require ( github.com/davecgh/go-spew v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/xtgo/set v1.0.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/protobuf v1.25.0 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) tensor-0.9.24/go.sum000066400000000000000000000373111426512615100142630ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc h1:zvQ6w7KwtQWgMQiewOF9tFtundRMVZFSAksNV6ogzuY= github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc/go.mod h1:c9sxoIT3YgLxH4UhLOCKaBlEojuMhVYpk4Ntv3opUTQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chewxy/hm v1.0.0 h1:zy/TSv3LV2nD3dwUEQL2VhXeoXbb9QkpmdRAVUFiA6k= github.com/chewxy/hm v1.0.0/go.mod h1:qg9YI4q6Fkj/whwHR1D+bOGeF7SniIP40VweVepLjg0= github.com/chewxy/math32 v1.0.0/go.mod h1:Miac6hA1ohdDUTagnvJy/q+aNnEk16qWUdb8ZVhvCN0= github.com/chewxy/math32 v1.0.8 h1:fU5E4Ec4Z+5RtRAi3TovSxUjQPkgRh+HbP7tKB2OFbM= github.com/chewxy/math32 v1.0.8/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/xtgo/set v1.0.0 h1:6BCNBRv3ORNDQ7fyoJXRv+tstJz3m1JVFQErfeZz2pY= github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f h1:Yv4xsIx7HZOoyUGSJ2ksDyWE2qIBXROsZKt2ny3hCGM= google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200910201057-6591123024b3/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorgonia.org/vecf32 v0.9.0 h1:PClazic1r+JVJ1dEzRXgeiVl4g1/Hf/w+wUSqnco1Xg= gorgonia.org/vecf32 v0.9.0/go.mod h1:NCc+5D2oxddRL11hd+pCB1PEyXWOyiQxfZ/1wwhOXCA= gorgonia.org/vecf64 v0.9.0 h1:bgZDP5x0OzBF64PjMGC3EvTdOoMEcmfAh1VCUnZFm1A= gorgonia.org/vecf64 v0.9.0/go.mod h1:hp7IOWCnRiVQKON73kkC/AUMtEXyf9kGlVrtPQ9ccVA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= tensor-0.9.24/interfaces.go000066400000000000000000000056131426512615100156020ustar00rootroot00000000000000package tensor import ( "reflect" "gorgonia.org/tensor/internal/storage" ) // Dtyper is any type that has a Dtype type Dtyper interface { Dtype() Dtype } // Eq is any type where you can perform an equality test type Eq interface { Eq(interface{}) bool } // Cloner is any type that can clone itself type Cloner interface { Clone() interface{} } // Dataer is any type that returns the data in its original form (typically a Go slice of something) type Dataer interface { Data() interface{} } // Boolable is any type has a zero and one value, and is able to set itself to either type Boolable interface { Zeroer Oner } // A Zeroer is any type that can set itself to the zeroth value. It's used to implement the arrays type Zeroer interface { Zero() } // A Oner is any type that can set itself to the equivalent of one. It's used to implement the arrays type Oner interface { One() } // A MemSetter is any type that can set itself to a value. type MemSetter interface { Memset(interface{}) error } // A Densor is any type that can return a *Dense type Densor interface { Dense() *Dense } // ScalarRep is any Tensor that can represent a scalar type ScalarRep interface { IsScalar() bool ScalarValue() interface{} } // View is any Tensor that can provide a view on memory type View interface { Tensor IsView() bool IsMaterializable() bool Materialize() Tensor } // Slicer is any tensor that can slice type Slicer interface { Slice(...Slice) (View, error) } // DenseTensor is the interface for any Dense tensor. type DenseTensor interface { Tensor Info() *AP IsMatrix() bool IsVector() bool IsRowVec() bool IsColVec() bool // headerer // arrayer unsafeMem setAP(*AP) rtype() reflect.Type reshape(dims ...int) error setDataOrder(o DataOrder) isTransposed() bool ostrides() []int oshape() Shape transposeAxes() []int transposeIndex(i int, transposePat, strides []int) int oldAP() *AP setOldAP(ap *AP) parentTensor() *Dense setParentTensor(*Dense) len() int cap() int // operations Inner(other Tensor) (retVal interface{}, err error) MatMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) MatVecMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) TensorMul(other Tensor, axesA, axesB []int) (retVal *Dense, err error) stackDense(axis int, others ...DenseTensor) (DenseTensor, error) } type SparseTensor interface { Sparse AsCSC() AsCSR() Indices() []int Indptr() []int // headerer } type MaskedTensor interface { DenseTensor IsMasked() bool SetMask([]bool) Mask() []bool } // Kinder. Bueno. type Kinder interface { Kind() reflect.Kind } type headerer interface { hdr() *storage.Header } type arrayer interface { arr() array arrPtr() *array } type unsafeMem interface { Set(i int, x interface{}) GetF64(i int) float64 GetF32(i int) float32 Ints() []int Float64s() []float64 Float32s() []float32 Complex64s() []complex64 Complex128s() []complex128 } tensor-0.9.24/internal/000077500000000000000000000000001426512615100147375ustar00rootroot00000000000000tensor-0.9.24/internal/IDLs/000077500000000000000000000000001426512615100155325ustar00rootroot00000000000000tensor-0.9.24/internal/IDLs/generated.fbs000066400000000000000000000012011426512615100201560ustar00rootroot00000000000000// Generated from generated.proto namespace gorgonia.org.tensor.internal.serialization.pb; enum Triangle : int { NOT_TRIANGLE = 0, UPPER = 1, LOWER = 2, SYMMETRIC = 3, } table AP { shape:[int]; strides:[int]; o:uint; t:gorgonia.org.tensor.internal.serialization.pb.Triangle; } table Dense { shape:[int]; strides:[int]; o:uint; t:gorgonia.org.tensor.internal.serialization.pb.Triangle; type:string; data:[ubyte]; } table MaskedDense { shape:[int]; strides:[int]; o:uint; t:gorgonia.org.tensor.internal.serialization.pb.Triangle; type:string; data:[ubyte]; mask:[bool]; mask_is_soft:[bool]; } tensor-0.9.24/internal/IDLs/generated.proto000077500000000000000000000030731426512615100205630ustar00rootroot00000000000000syntax = "proto3"; package gorgonia.org.tensor.internal.serialization.pb; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; option (gogoproto.protosizer_all) = true; option (gogoproto.sizer_all) = false; option go_package = "pb"; message AP { option (gogoproto.goproto_getters) = false; option (gogoproto.typedecl) = false; repeated int32 shape = 1; repeated int32 strides = 2; uint32 o = 3 [(gogoproto.casttype) = "DataOrder"]; gorgonia.org.tensor.internal.serialization.pb.Triangle t = 4; } message Dense { option (gogoproto.goproto_getters) = false; option (gogoproto.typedecl) = false; repeated int32 shape = 1; repeated int32 strides = 2; uint32 o = 3 [(gogoproto.casttype) = "DataOrder"]; gorgonia.org.tensor.internal.serialization.pb.Triangle t = 4; string type = 5; bytes data = 6; } message MaskedDense { option (gogoproto.goproto_getters) = false; option (gogoproto.typedecl) = false; repeated int32 shape = 1; repeated int32 strides = 2; uint32 o = 3 [(gogoproto.casttype) = "DataOrder"]; gorgonia.org.tensor.internal.serialization.pb.Triangle t = 4; string type = 5; bytes data = 6; repeated bool mask = 7; repeated bool mask_is_soft = 8; } enum Triangle { option (gogoproto.enumdecl) = false; option (gogoproto.goproto_enum_prefix) = false; option (gogoproto.goproto_enum_stringer) = false; NOT_TRIANGLE = 0 [(gogoproto.enumvalue_customname) = "NotTriangle"]; UPPER = 1 [(gogoproto.enumvalue_customname) = "Upper"]; LOWER = 2 [(gogoproto.enumvalue_customname) = "Lower"]; SYMMETRIC = 3 [(gogoproto.enumvalue_customname) = "Symmetric"]; } tensor-0.9.24/internal/execution/000077500000000000000000000000001426512615100167425ustar00rootroot00000000000000tensor-0.9.24/internal/execution/e.go000066400000000000000000000030521426512615100175150ustar00rootroot00000000000000package execution // import "gorgonia.org/tensor/internal/execution" import ( "fmt" "reflect" "unsafe" "gorgonia.org/tensor/internal/storage" ) // E is the standard engine. It's to be embedded in package tensor type E struct{} // basic types supported. var ( Bool = reflect.TypeOf(true) Int = reflect.TypeOf(int(1)) Int8 = reflect.TypeOf(int8(1)) Int16 = reflect.TypeOf(int16(1)) Int32 = reflect.TypeOf(int32(1)) Int64 = reflect.TypeOf(int64(1)) Uint = reflect.TypeOf(uint(1)) Uint8 = reflect.TypeOf(uint8(1)) Uint16 = reflect.TypeOf(uint16(1)) Uint32 = reflect.TypeOf(uint32(1)) Uint64 = reflect.TypeOf(uint64(1)) Float32 = reflect.TypeOf(float32(1)) Float64 = reflect.TypeOf(float64(1)) Complex64 = reflect.TypeOf(complex64(1)) Complex128 = reflect.TypeOf(complex128(1)) String = reflect.TypeOf("") // aliases Byte = Uint8 // extras Uintptr = reflect.TypeOf(uintptr(0)) UnsafePointer = reflect.TypeOf(unsafe.Pointer(&Uintptr)) ) func isScalar(a *storage.Header, t reflect.Type) bool { return a.TypedLen(t) == 1 } type errorIndices []int func (e errorIndices) Indices() []int { return []int(e) } func (e errorIndices) Error() string { return fmt.Sprintf("Error in indices %v", []int(e)) } const ( lenMismatch = `Cannot compare with differing lengths: %d and %d` reductionErrMsg = `Cannot reduce with function of type %T` defaultValueErrMsg = `Expected default value of type %T. Got %v of %T instead` typeMismatch = `TypeMismatch: a %T and b %T` ) tensor-0.9.24/internal/execution/empty.s000066400000000000000000000000001426512615100202520ustar00rootroot00000000000000tensor-0.9.24/internal/execution/eng_argmethods.go000066400000000000000000000606221426512615100222650ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) ArgmaxIter(t reflect.Type, a *storage.Header, it Iterator, lastSize int) (indices []int, err error) { var next int switch t { case Int: data := a.Ints() tmp := make([]int, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxI(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int8: data := a.Int8s() tmp := make([]int8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxI8(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int16: data := a.Int16s() tmp := make([]int16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxI16(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int32: data := a.Int32s() tmp := make([]int32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxI32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int64: data := a.Int64s() tmp := make([]int64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxI64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint: data := a.Uints() tmp := make([]uint, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxU(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint8: data := a.Uint8s() tmp := make([]uint8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxU8(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint16: data := a.Uint16s() tmp := make([]uint16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxU16(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint32: data := a.Uint32s() tmp := make([]uint32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxU32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint64: data := a.Uint64s() tmp := make([]uint64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxU64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float32: data := a.Float32s() tmp := make([]float32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxF32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float64: data := a.Float64s() tmp := make([]float64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxF64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case String: data := a.Strings() tmp := make([]string, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgmaxStr(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return default: return nil, errors.Errorf("Unsupported type %v for Argmax", t) } } func (e E) ArgminIter(t reflect.Type, a *storage.Header, it Iterator, lastSize int) (indices []int, err error) { var next int switch t { case Int: data := a.Ints() tmp := make([]int, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminI(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int8: data := a.Int8s() tmp := make([]int8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminI8(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int16: data := a.Int16s() tmp := make([]int16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminI16(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int32: data := a.Int32s() tmp := make([]int32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminI32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int64: data := a.Int64s() tmp := make([]int64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminI64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint: data := a.Uints() tmp := make([]uint, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminU(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint8: data := a.Uint8s() tmp := make([]uint8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminU8(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint16: data := a.Uint16s() tmp := make([]uint16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminU16(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint32: data := a.Uint32s() tmp := make([]uint32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminU32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint64: data := a.Uint64s() tmp := make([]uint64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminU64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float32: data := a.Float32s() tmp := make([]float32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminF32(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float64: data := a.Float64s() tmp := make([]float64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminF64(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case String: data := a.Strings() tmp := make([]string, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) if len(tmp) == lastSize { am := ArgminStr(tmp) indices = append(indices, am) // reset tmp = tmp[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return default: return nil, errors.Errorf("Unsupported type %v for Argmin", t) } } func (e E) ArgmaxIterMasked(t reflect.Type, a *storage.Header, mask []bool, it Iterator, lastSize int) (indices []int, err error) { newMask := make([]bool, 0, lastSize) var next int switch t { case Int: data := a.Ints() tmp := make([]int, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedI(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int8: data := a.Int8s() tmp := make([]int8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedI8(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int16: data := a.Int16s() tmp := make([]int16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedI16(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int32: data := a.Int32s() tmp := make([]int32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedI32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int64: data := a.Int64s() tmp := make([]int64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedI64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint: data := a.Uints() tmp := make([]uint, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedU(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint8: data := a.Uint8s() tmp := make([]uint8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedU8(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint16: data := a.Uint16s() tmp := make([]uint16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedU16(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint32: data := a.Uint32s() tmp := make([]uint32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedU32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint64: data := a.Uint64s() tmp := make([]uint64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedU64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float32: data := a.Float32s() tmp := make([]float32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedF32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float64: data := a.Float64s() tmp := make([]float64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedF64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case String: data := a.Strings() tmp := make([]string, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgmaxMaskedStr(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return default: return nil, errors.Errorf("Unsupported type %v for Argmax", t) } } func (e E) ArgminIterMasked(t reflect.Type, a *storage.Header, mask []bool, it Iterator, lastSize int) (indices []int, err error) { newMask := make([]bool, 0, lastSize) var next int switch t { case Int: data := a.Ints() tmp := make([]int, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedI(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int8: data := a.Int8s() tmp := make([]int8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedI8(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int16: data := a.Int16s() tmp := make([]int16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedI16(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int32: data := a.Int32s() tmp := make([]int32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedI32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Int64: data := a.Int64s() tmp := make([]int64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedI64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint: data := a.Uints() tmp := make([]uint, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedU(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint8: data := a.Uint8s() tmp := make([]uint8, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedU8(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint16: data := a.Uint16s() tmp := make([]uint16, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedU16(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint32: data := a.Uint32s() tmp := make([]uint32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedU32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Uint64: data := a.Uint64s() tmp := make([]uint64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedU64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float32: data := a.Float32s() tmp := make([]float32, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedF32(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case Float64: data := a.Float64s() tmp := make([]float64, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedF64(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return case String: data := a.Strings() tmp := make([]string, 0, lastSize) for next, err = it.Next(); err == nil; next, err = it.Next() { tmp = append(tmp, data[next]) newMask = append(newMask, mask[next]) if len(tmp) == lastSize { am := ArgminMaskedStr(tmp, mask) indices = append(indices, am) // reset tmp = tmp[:0] newMask = newMask[:0] } } if _, ok := err.(NoOpError); ok { err = nil } return default: return nil, errors.Errorf("Unsupported type %v for Argmin", t) } } func (e E) ArgmaxFlatMasked(t reflect.Type, a *storage.Header, mask []bool) (retVal int) { switch t { case Int: return ArgmaxMaskedI(a.Ints(), mask) case Int8: return ArgmaxMaskedI8(a.Int8s(), mask) case Int16: return ArgmaxMaskedI16(a.Int16s(), mask) case Int32: return ArgmaxMaskedI32(a.Int32s(), mask) case Int64: return ArgmaxMaskedI64(a.Int64s(), mask) case Uint: return ArgmaxMaskedU(a.Uints(), mask) case Uint8: return ArgmaxMaskedU8(a.Uint8s(), mask) case Uint16: return ArgmaxMaskedU16(a.Uint16s(), mask) case Uint32: return ArgmaxMaskedU32(a.Uint32s(), mask) case Uint64: return ArgmaxMaskedU64(a.Uint64s(), mask) case Float32: return ArgmaxMaskedF32(a.Float32s(), mask) case Float64: return ArgmaxMaskedF64(a.Float64s(), mask) case String: return ArgmaxMaskedStr(a.Strings(), mask) default: return -1 } } func (e E) ArgminFlatMasked(t reflect.Type, a *storage.Header, mask []bool) (retVal int) { switch t { case Int: return ArgminMaskedI(a.Ints(), mask) case Int8: return ArgminMaskedI8(a.Int8s(), mask) case Int16: return ArgminMaskedI16(a.Int16s(), mask) case Int32: return ArgminMaskedI32(a.Int32s(), mask) case Int64: return ArgminMaskedI64(a.Int64s(), mask) case Uint: return ArgminMaskedU(a.Uints(), mask) case Uint8: return ArgminMaskedU8(a.Uint8s(), mask) case Uint16: return ArgminMaskedU16(a.Uint16s(), mask) case Uint32: return ArgminMaskedU32(a.Uint32s(), mask) case Uint64: return ArgminMaskedU64(a.Uint64s(), mask) case Float32: return ArgminMaskedF32(a.Float32s(), mask) case Float64: return ArgminMaskedF64(a.Float64s(), mask) case String: return ArgminMaskedStr(a.Strings(), mask) default: return -1 } } func (e E) ArgmaxFlat(t reflect.Type, a *storage.Header) (retVal int) { switch t { case Int: return ArgmaxI(a.Ints()) case Int8: return ArgmaxI8(a.Int8s()) case Int16: return ArgmaxI16(a.Int16s()) case Int32: return ArgmaxI32(a.Int32s()) case Int64: return ArgmaxI64(a.Int64s()) case Uint: return ArgmaxU(a.Uints()) case Uint8: return ArgmaxU8(a.Uint8s()) case Uint16: return ArgmaxU16(a.Uint16s()) case Uint32: return ArgmaxU32(a.Uint32s()) case Uint64: return ArgmaxU64(a.Uint64s()) case Float32: return ArgmaxF32(a.Float32s()) case Float64: return ArgmaxF64(a.Float64s()) case String: return ArgmaxStr(a.Strings()) default: return -1 } } func (e E) ArgminFlat(t reflect.Type, a *storage.Header) (retVal int) { switch t { case Int: return ArgminI(a.Ints()) case Int8: return ArgminI8(a.Int8s()) case Int16: return ArgminI16(a.Int16s()) case Int32: return ArgminI32(a.Int32s()) case Int64: return ArgminI64(a.Int64s()) case Uint: return ArgminU(a.Uints()) case Uint8: return ArgminU8(a.Uint8s()) case Uint16: return ArgminU16(a.Uint16s()) case Uint32: return ArgminU32(a.Uint32s()) case Uint64: return ArgminU64(a.Uint64s()) case Float32: return ArgminF32(a.Float32s()) case Float64: return ArgminF64(a.Float64s()) case String: return ArgminStr(a.Strings()) default: return -1 } } tensor-0.9.24/internal/execution/eng_arith.go000066400000000000000000003263121426512615100212400ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) Add(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecAddI(at, bt) case as && !bs: AddSVI(at[0], bt) case !as && bs: AddVSI(at, bt[0]) default: VecAddI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecAddI8(at, bt) case as && !bs: AddSVI8(at[0], bt) case !as && bs: AddVSI8(at, bt[0]) default: VecAddI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecAddI16(at, bt) case as && !bs: AddSVI16(at[0], bt) case !as && bs: AddVSI16(at, bt[0]) default: VecAddI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecAddI32(at, bt) case as && !bs: AddSVI32(at[0], bt) case !as && bs: AddVSI32(at, bt[0]) default: VecAddI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecAddI64(at, bt) case as && !bs: AddSVI64(at[0], bt) case !as && bs: AddVSI64(at, bt[0]) default: VecAddI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecAddU(at, bt) case as && !bs: AddSVU(at[0], bt) case !as && bs: AddVSU(at, bt[0]) default: VecAddU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecAddU8(at, bt) case as && !bs: AddSVU8(at[0], bt) case !as && bs: AddVSU8(at, bt[0]) default: VecAddU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecAddU16(at, bt) case as && !bs: AddSVU16(at[0], bt) case !as && bs: AddVSU16(at, bt[0]) default: VecAddU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecAddU32(at, bt) case as && !bs: AddSVU32(at[0], bt) case !as && bs: AddVSU32(at, bt[0]) default: VecAddU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecAddU64(at, bt) case as && !bs: AddSVU64(at[0], bt) case !as && bs: AddVSU64(at, bt[0]) default: VecAddU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecAddF32(at, bt) case as && !bs: AddSVF32(at[0], bt) case !as && bs: AddVSF32(at, bt[0]) default: VecAddF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecAddF64(at, bt) case as && !bs: AddSVF64(at[0], bt) case !as && bs: AddVSF64(at, bt[0]) default: VecAddF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecAddC64(at, bt) case as && !bs: AddSVC64(at[0], bt) case !as && bs: AddVSC64(at, bt[0]) default: VecAddC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecAddC128(at, bt) case as && !bs: AddSVC128(at[0], bt) case !as && bs: AddVSC128(at, bt[0]) default: VecAddC128(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecAddStr(at, bt) case as && !bs: AddSVStr(at[0], bt) case !as && bs: AddVSStr(at, bt[0]) default: VecAddStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Add", t) } } func (e E) Sub(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecSubI(at, bt) case as && !bs: SubSVI(at[0], bt) case !as && bs: SubVSI(at, bt[0]) default: VecSubI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecSubI8(at, bt) case as && !bs: SubSVI8(at[0], bt) case !as && bs: SubVSI8(at, bt[0]) default: VecSubI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecSubI16(at, bt) case as && !bs: SubSVI16(at[0], bt) case !as && bs: SubVSI16(at, bt[0]) default: VecSubI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecSubI32(at, bt) case as && !bs: SubSVI32(at[0], bt) case !as && bs: SubVSI32(at, bt[0]) default: VecSubI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecSubI64(at, bt) case as && !bs: SubSVI64(at[0], bt) case !as && bs: SubVSI64(at, bt[0]) default: VecSubI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecSubU(at, bt) case as && !bs: SubSVU(at[0], bt) case !as && bs: SubVSU(at, bt[0]) default: VecSubU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecSubU8(at, bt) case as && !bs: SubSVU8(at[0], bt) case !as && bs: SubVSU8(at, bt[0]) default: VecSubU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecSubU16(at, bt) case as && !bs: SubSVU16(at[0], bt) case !as && bs: SubVSU16(at, bt[0]) default: VecSubU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecSubU32(at, bt) case as && !bs: SubSVU32(at[0], bt) case !as && bs: SubVSU32(at, bt[0]) default: VecSubU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecSubU64(at, bt) case as && !bs: SubSVU64(at[0], bt) case !as && bs: SubVSU64(at, bt[0]) default: VecSubU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecSubF32(at, bt) case as && !bs: SubSVF32(at[0], bt) case !as && bs: SubVSF32(at, bt[0]) default: VecSubF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecSubF64(at, bt) case as && !bs: SubSVF64(at[0], bt) case !as && bs: SubVSF64(at, bt[0]) default: VecSubF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecSubC64(at, bt) case as && !bs: SubSVC64(at[0], bt) case !as && bs: SubVSC64(at, bt[0]) default: VecSubC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecSubC128(at, bt) case as && !bs: SubSVC128(at[0], bt) case !as && bs: SubVSC128(at, bt[0]) default: VecSubC128(at, bt) } return default: return errors.Errorf("Unsupported type %v for Sub", t) } } func (e E) Mul(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMulI(at, bt) case as && !bs: MulSVI(at[0], bt) case !as && bs: MulVSI(at, bt[0]) default: VecMulI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMulI8(at, bt) case as && !bs: MulSVI8(at[0], bt) case !as && bs: MulVSI8(at, bt[0]) default: VecMulI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMulI16(at, bt) case as && !bs: MulSVI16(at[0], bt) case !as && bs: MulVSI16(at, bt[0]) default: VecMulI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMulI32(at, bt) case as && !bs: MulSVI32(at[0], bt) case !as && bs: MulVSI32(at, bt[0]) default: VecMulI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMulI64(at, bt) case as && !bs: MulSVI64(at[0], bt) case !as && bs: MulVSI64(at, bt[0]) default: VecMulI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMulU(at, bt) case as && !bs: MulSVU(at[0], bt) case !as && bs: MulVSU(at, bt[0]) default: VecMulU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMulU8(at, bt) case as && !bs: MulSVU8(at[0], bt) case !as && bs: MulVSU8(at, bt[0]) default: VecMulU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMulU16(at, bt) case as && !bs: MulSVU16(at[0], bt) case !as && bs: MulVSU16(at, bt[0]) default: VecMulU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMulU32(at, bt) case as && !bs: MulSVU32(at[0], bt) case !as && bs: MulVSU32(at, bt[0]) default: VecMulU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMulU64(at, bt) case as && !bs: MulSVU64(at[0], bt) case !as && bs: MulVSU64(at, bt[0]) default: VecMulU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMulF32(at, bt) case as && !bs: MulSVF32(at[0], bt) case !as && bs: MulVSF32(at, bt[0]) default: VecMulF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMulF64(at, bt) case as && !bs: MulSVF64(at[0], bt) case !as && bs: MulVSF64(at, bt[0]) default: VecMulF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecMulC64(at, bt) case as && !bs: MulSVC64(at[0], bt) case !as && bs: MulVSC64(at, bt[0]) default: VecMulC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecMulC128(at, bt) case as && !bs: MulSVC128(at[0], bt) case !as && bs: MulVSC128(at, bt[0]) default: VecMulC128(at, bt) } return default: return errors.Errorf("Unsupported type %v for Mul", t) } } func (e E) Div(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecDivI(at, bt) case as && !bs: err = DivSVI(at[0], bt) case !as && bs: err = DivVSI(at, bt[0]) default: err = VecDivI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecDivI8(at, bt) case as && !bs: err = DivSVI8(at[0], bt) case !as && bs: err = DivVSI8(at, bt[0]) default: err = VecDivI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecDivI16(at, bt) case as && !bs: err = DivSVI16(at[0], bt) case !as && bs: err = DivVSI16(at, bt[0]) default: err = VecDivI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecDivI32(at, bt) case as && !bs: err = DivSVI32(at[0], bt) case !as && bs: err = DivVSI32(at, bt[0]) default: err = VecDivI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecDivI64(at, bt) case as && !bs: err = DivSVI64(at[0], bt) case !as && bs: err = DivVSI64(at, bt[0]) default: err = VecDivI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecDivU(at, bt) case as && !bs: err = DivSVU(at[0], bt) case !as && bs: err = DivVSU(at, bt[0]) default: err = VecDivU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecDivU8(at, bt) case as && !bs: err = DivSVU8(at[0], bt) case !as && bs: err = DivVSU8(at, bt[0]) default: err = VecDivU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecDivU16(at, bt) case as && !bs: err = DivSVU16(at[0], bt) case !as && bs: err = DivVSU16(at, bt[0]) default: err = VecDivU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecDivU32(at, bt) case as && !bs: err = DivSVU32(at[0], bt) case !as && bs: err = DivVSU32(at, bt[0]) default: err = VecDivU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecDivU64(at, bt) case as && !bs: err = DivSVU64(at[0], bt) case !as && bs: err = DivVSU64(at, bt[0]) default: err = VecDivU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecDivF32(at, bt) case as && !bs: DivSVF32(at[0], bt) case !as && bs: DivVSF32(at, bt[0]) default: VecDivF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecDivF64(at, bt) case as && !bs: DivSVF64(at[0], bt) case !as && bs: DivVSF64(at, bt[0]) default: VecDivF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecDivC64(at, bt) case as && !bs: DivSVC64(at[0], bt) case !as && bs: DivVSC64(at, bt[0]) default: VecDivC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecDivC128(at, bt) case as && !bs: DivSVC128(at[0], bt) case !as && bs: DivVSC128(at, bt[0]) default: VecDivC128(at, bt) } return default: return errors.Errorf("Unsupported type %v for Div", t) } } func (e E) Pow(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecPowF32(at, bt) case as && !bs: PowSVF32(at[0], bt) case !as && bs: PowVSF32(at, bt[0]) default: VecPowF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecPowF64(at, bt) case as && !bs: PowSVF64(at[0], bt) case !as && bs: PowVSF64(at, bt[0]) default: VecPowF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecPowC64(at, bt) case as && !bs: PowSVC64(at[0], bt) case !as && bs: PowVSC64(at, bt[0]) default: VecPowC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecPowC128(at, bt) case as && !bs: PowSVC128(at[0], bt) case !as && bs: PowVSC128(at, bt[0]) default: VecPowC128(at, bt) } return default: return errors.Errorf("Unsupported type %v for Pow", t) } } func (e E) Mod(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecModI(at, bt) case as && !bs: ModSVI(at[0], bt) case !as && bs: ModVSI(at, bt[0]) default: VecModI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecModI8(at, bt) case as && !bs: ModSVI8(at[0], bt) case !as && bs: ModVSI8(at, bt[0]) default: VecModI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecModI16(at, bt) case as && !bs: ModSVI16(at[0], bt) case !as && bs: ModVSI16(at, bt[0]) default: VecModI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecModI32(at, bt) case as && !bs: ModSVI32(at[0], bt) case !as && bs: ModVSI32(at, bt[0]) default: VecModI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecModI64(at, bt) case as && !bs: ModSVI64(at[0], bt) case !as && bs: ModVSI64(at, bt[0]) default: VecModI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecModU(at, bt) case as && !bs: ModSVU(at[0], bt) case !as && bs: ModVSU(at, bt[0]) default: VecModU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecModU8(at, bt) case as && !bs: ModSVU8(at[0], bt) case !as && bs: ModVSU8(at, bt[0]) default: VecModU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecModU16(at, bt) case as && !bs: ModSVU16(at[0], bt) case !as && bs: ModVSU16(at, bt[0]) default: VecModU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecModU32(at, bt) case as && !bs: ModSVU32(at[0], bt) case !as && bs: ModVSU32(at, bt[0]) default: VecModU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecModU64(at, bt) case as && !bs: ModSVU64(at[0], bt) case !as && bs: ModVSU64(at, bt[0]) default: VecModU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecModF32(at, bt) case as && !bs: ModSVF32(at[0], bt) case !as && bs: ModVSF32(at, bt[0]) default: VecModF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecModF64(at, bt) case as && !bs: ModSVF64(at[0], bt) case !as && bs: ModVSF64(at, bt[0]) default: VecModF64(at, bt) } return default: return errors.Errorf("Unsupported type %v for Mod", t) } } func (e E) AddIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecAddI(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVI(at[0], bt, it) case !as && bs: AddIncrVSI(at, bt[0], it) default: AddIncrI(at, bt, it) } return case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecAddI8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVI8(at[0], bt, it) case !as && bs: AddIncrVSI8(at, bt[0], it) default: AddIncrI8(at, bt, it) } return case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecAddI16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVI16(at[0], bt, it) case !as && bs: AddIncrVSI16(at, bt[0], it) default: AddIncrI16(at, bt, it) } return case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecAddI32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVI32(at[0], bt, it) case !as && bs: AddIncrVSI32(at, bt[0], it) default: AddIncrI32(at, bt, it) } return case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecAddI64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVI64(at[0], bt, it) case !as && bs: AddIncrVSI64(at, bt[0], it) default: AddIncrI64(at, bt, it) } return case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecAddU(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVU(at[0], bt, it) case !as && bs: AddIncrVSU(at, bt[0], it) default: AddIncrU(at, bt, it) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecAddU8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVU8(at[0], bt, it) case !as && bs: AddIncrVSU8(at, bt[0], it) default: AddIncrU8(at, bt, it) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecAddU16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVU16(at[0], bt, it) case !as && bs: AddIncrVSU16(at, bt[0], it) default: AddIncrU16(at, bt, it) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecAddU32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVU32(at[0], bt, it) case !as && bs: AddIncrVSU32(at, bt[0], it) default: AddIncrU32(at, bt, it) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecAddU64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVU64(at[0], bt, it) case !as && bs: AddIncrVSU64(at, bt[0], it) default: AddIncrU64(at, bt, it) } return case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecAddF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVF32(at[0], bt, it) case !as && bs: AddIncrVSF32(at, bt[0], it) default: AddIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecAddF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVF64(at[0], bt, it) case !as && bs: AddIncrVSF64(at, bt[0], it) default: AddIncrF64(at, bt, it) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecAddC64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVC64(at[0], bt, it) case !as && bs: AddIncrVSC64(at, bt[0], it) default: AddIncrC64(at, bt, it) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecAddC128(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVC128(at[0], bt, it) case !as && bs: AddIncrVSC128(at, bt[0], it) default: AddIncrC128(at, bt, it) } return case String: at := a.Strings() bt := b.Strings() it := incr.Strings() switch { case as && bs: VecAddStr(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: AddIncrSVStr(at[0], bt, it) case !as && bs: AddIncrVSStr(at, bt[0], it) default: AddIncrStr(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Add", t) } } func (e E) SubIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecSubI(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVI(at[0], bt, it) case !as && bs: SubIncrVSI(at, bt[0], it) default: SubIncrI(at, bt, it) } return case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecSubI8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVI8(at[0], bt, it) case !as && bs: SubIncrVSI8(at, bt[0], it) default: SubIncrI8(at, bt, it) } return case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecSubI16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVI16(at[0], bt, it) case !as && bs: SubIncrVSI16(at, bt[0], it) default: SubIncrI16(at, bt, it) } return case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecSubI32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVI32(at[0], bt, it) case !as && bs: SubIncrVSI32(at, bt[0], it) default: SubIncrI32(at, bt, it) } return case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecSubI64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVI64(at[0], bt, it) case !as && bs: SubIncrVSI64(at, bt[0], it) default: SubIncrI64(at, bt, it) } return case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecSubU(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVU(at[0], bt, it) case !as && bs: SubIncrVSU(at, bt[0], it) default: SubIncrU(at, bt, it) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecSubU8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVU8(at[0], bt, it) case !as && bs: SubIncrVSU8(at, bt[0], it) default: SubIncrU8(at, bt, it) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecSubU16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVU16(at[0], bt, it) case !as && bs: SubIncrVSU16(at, bt[0], it) default: SubIncrU16(at, bt, it) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecSubU32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVU32(at[0], bt, it) case !as && bs: SubIncrVSU32(at, bt[0], it) default: SubIncrU32(at, bt, it) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecSubU64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVU64(at[0], bt, it) case !as && bs: SubIncrVSU64(at, bt[0], it) default: SubIncrU64(at, bt, it) } return case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecSubF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVF32(at[0], bt, it) case !as && bs: SubIncrVSF32(at, bt[0], it) default: SubIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecSubF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVF64(at[0], bt, it) case !as && bs: SubIncrVSF64(at, bt[0], it) default: SubIncrF64(at, bt, it) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecSubC64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVC64(at[0], bt, it) case !as && bs: SubIncrVSC64(at, bt[0], it) default: SubIncrC64(at, bt, it) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecSubC128(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: SubIncrSVC128(at[0], bt, it) case !as && bs: SubIncrVSC128(at, bt[0], it) default: SubIncrC128(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Sub", t) } } func (e E) MulIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecMulI(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVI(at[0], bt, it) case !as && bs: MulIncrVSI(at, bt[0], it) default: MulIncrI(at, bt, it) } return case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecMulI8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVI8(at[0], bt, it) case !as && bs: MulIncrVSI8(at, bt[0], it) default: MulIncrI8(at, bt, it) } return case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecMulI16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVI16(at[0], bt, it) case !as && bs: MulIncrVSI16(at, bt[0], it) default: MulIncrI16(at, bt, it) } return case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecMulI32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVI32(at[0], bt, it) case !as && bs: MulIncrVSI32(at, bt[0], it) default: MulIncrI32(at, bt, it) } return case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecMulI64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVI64(at[0], bt, it) case !as && bs: MulIncrVSI64(at, bt[0], it) default: MulIncrI64(at, bt, it) } return case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecMulU(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVU(at[0], bt, it) case !as && bs: MulIncrVSU(at, bt[0], it) default: MulIncrU(at, bt, it) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecMulU8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVU8(at[0], bt, it) case !as && bs: MulIncrVSU8(at, bt[0], it) default: MulIncrU8(at, bt, it) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecMulU16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVU16(at[0], bt, it) case !as && bs: MulIncrVSU16(at, bt[0], it) default: MulIncrU16(at, bt, it) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecMulU32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVU32(at[0], bt, it) case !as && bs: MulIncrVSU32(at, bt[0], it) default: MulIncrU32(at, bt, it) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecMulU64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVU64(at[0], bt, it) case !as && bs: MulIncrVSU64(at, bt[0], it) default: MulIncrU64(at, bt, it) } return case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecMulF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVF32(at[0], bt, it) case !as && bs: MulIncrVSF32(at, bt[0], it) default: MulIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecMulF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVF64(at[0], bt, it) case !as && bs: MulIncrVSF64(at, bt[0], it) default: MulIncrF64(at, bt, it) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecMulC64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVC64(at[0], bt, it) case !as && bs: MulIncrVSC64(at, bt[0], it) default: MulIncrC64(at, bt, it) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecMulC128(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: MulIncrSVC128(at[0], bt, it) case !as && bs: MulIncrVSC128(at, bt[0], it) default: MulIncrC128(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Mul", t) } } func (e E) DivIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecDivI(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVI(at[0], bt, it) case !as && bs: DivIncrVSI(at, bt[0], it) default: DivIncrI(at, bt, it) } return case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecDivI8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVI8(at[0], bt, it) case !as && bs: DivIncrVSI8(at, bt[0], it) default: DivIncrI8(at, bt, it) } return case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecDivI16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVI16(at[0], bt, it) case !as && bs: DivIncrVSI16(at, bt[0], it) default: DivIncrI16(at, bt, it) } return case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecDivI32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVI32(at[0], bt, it) case !as && bs: DivIncrVSI32(at, bt[0], it) default: DivIncrI32(at, bt, it) } return case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecDivI64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVI64(at[0], bt, it) case !as && bs: DivIncrVSI64(at, bt[0], it) default: DivIncrI64(at, bt, it) } return case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecDivU(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVU(at[0], bt, it) case !as && bs: DivIncrVSU(at, bt[0], it) default: DivIncrU(at, bt, it) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecDivU8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVU8(at[0], bt, it) case !as && bs: DivIncrVSU8(at, bt[0], it) default: DivIncrU8(at, bt, it) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecDivU16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVU16(at[0], bt, it) case !as && bs: DivIncrVSU16(at, bt[0], it) default: DivIncrU16(at, bt, it) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecDivU32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVU32(at[0], bt, it) case !as && bs: DivIncrVSU32(at, bt[0], it) default: DivIncrU32(at, bt, it) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecDivU64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVU64(at[0], bt, it) case !as && bs: DivIncrVSU64(at, bt[0], it) default: DivIncrU64(at, bt, it) } return case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecDivF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVF32(at[0], bt, it) case !as && bs: DivIncrVSF32(at, bt[0], it) default: DivIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecDivF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVF64(at[0], bt, it) case !as && bs: DivIncrVSF64(at, bt[0], it) default: DivIncrF64(at, bt, it) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecDivC64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVC64(at[0], bt, it) case !as && bs: DivIncrVSC64(at, bt[0], it) default: DivIncrC64(at, bt, it) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecDivC128(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: DivIncrSVC128(at[0], bt, it) case !as && bs: DivIncrVSC128(at, bt[0], it) default: DivIncrC128(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Div", t) } } func (e E) PowIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecPowF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: PowIncrSVF32(at[0], bt, it) case !as && bs: PowIncrVSF32(at, bt[0], it) default: PowIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecPowF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: PowIncrSVF64(at[0], bt, it) case !as && bs: PowIncrVSF64(at, bt[0], it) default: PowIncrF64(at, bt, it) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecPowC64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: PowIncrSVC64(at[0], bt, it) case !as && bs: PowIncrVSC64(at, bt[0], it) default: PowIncrC64(at, bt, it) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecPowC128(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: PowIncrSVC128(at[0], bt, it) case !as && bs: PowIncrVSC128(at, bt[0], it) default: PowIncrC128(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Pow", t) } } func (e E) ModIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecModI(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVI(at[0], bt, it) case !as && bs: ModIncrVSI(at, bt[0], it) default: ModIncrI(at, bt, it) } return case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecModI8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVI8(at[0], bt, it) case !as && bs: ModIncrVSI8(at, bt[0], it) default: ModIncrI8(at, bt, it) } return case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecModI16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVI16(at[0], bt, it) case !as && bs: ModIncrVSI16(at, bt[0], it) default: ModIncrI16(at, bt, it) } return case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecModI32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVI32(at[0], bt, it) case !as && bs: ModIncrVSI32(at, bt[0], it) default: ModIncrI32(at, bt, it) } return case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecModI64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVI64(at[0], bt, it) case !as && bs: ModIncrVSI64(at, bt[0], it) default: ModIncrI64(at, bt, it) } return case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecModU(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVU(at[0], bt, it) case !as && bs: ModIncrVSU(at, bt[0], it) default: ModIncrU(at, bt, it) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecModU8(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVU8(at[0], bt, it) case !as && bs: ModIncrVSU8(at, bt[0], it) default: ModIncrU8(at, bt, it) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecModU16(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVU16(at[0], bt, it) case !as && bs: ModIncrVSU16(at, bt[0], it) default: ModIncrU16(at, bt, it) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecModU32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVU32(at[0], bt, it) case !as && bs: ModIncrVSU32(at, bt[0], it) default: ModIncrU32(at, bt, it) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecModU64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVU64(at[0], bt, it) case !as && bs: ModIncrVSU64(at, bt[0], it) default: ModIncrU64(at, bt, it) } return case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecModF32(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVF32(at[0], bt, it) case !as && bs: ModIncrVSF32(at, bt[0], it) default: ModIncrF32(at, bt, it) } return case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecModF64(at, bt) if !is { return e.Add(t, incr, a) } it[0] += at[0] case as && !bs: ModIncrSVF64(at[0], bt, it) case !as && bs: ModIncrVSF64(at, bt[0], it) default: ModIncrF64(at, bt, it) } return default: return errors.Errorf("Unsupported type %v for Mod", t) } } func (e E) AddIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecAddI(at, bt) case as && !bs: AddIterSVI(at[0], bt, bit) case !as && bs: AddIterVSI(at, bt[0], ait) default: AddIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecAddI8(at, bt) case as && !bs: AddIterSVI8(at[0], bt, bit) case !as && bs: AddIterVSI8(at, bt[0], ait) default: AddIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecAddI16(at, bt) case as && !bs: AddIterSVI16(at[0], bt, bit) case !as && bs: AddIterVSI16(at, bt[0], ait) default: AddIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecAddI32(at, bt) case as && !bs: AddIterSVI32(at[0], bt, bit) case !as && bs: AddIterVSI32(at, bt[0], ait) default: AddIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecAddI64(at, bt) case as && !bs: AddIterSVI64(at[0], bt, bit) case !as && bs: AddIterVSI64(at, bt[0], ait) default: AddIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecAddU(at, bt) case as && !bs: AddIterSVU(at[0], bt, bit) case !as && bs: AddIterVSU(at, bt[0], ait) default: AddIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecAddU8(at, bt) case as && !bs: AddIterSVU8(at[0], bt, bit) case !as && bs: AddIterVSU8(at, bt[0], ait) default: AddIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecAddU16(at, bt) case as && !bs: AddIterSVU16(at[0], bt, bit) case !as && bs: AddIterVSU16(at, bt[0], ait) default: AddIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecAddU32(at, bt) case as && !bs: AddIterSVU32(at[0], bt, bit) case !as && bs: AddIterVSU32(at, bt[0], ait) default: AddIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecAddU64(at, bt) case as && !bs: AddIterSVU64(at[0], bt, bit) case !as && bs: AddIterVSU64(at, bt[0], ait) default: AddIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecAddF32(at, bt) case as && !bs: AddIterSVF32(at[0], bt, bit) case !as && bs: AddIterVSF32(at, bt[0], ait) default: AddIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecAddF64(at, bt) case as && !bs: AddIterSVF64(at[0], bt, bit) case !as && bs: AddIterVSF64(at, bt[0], ait) default: AddIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecAddC64(at, bt) case as && !bs: AddIterSVC64(at[0], bt, bit) case !as && bs: AddIterVSC64(at, bt[0], ait) default: AddIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecAddC128(at, bt) case as && !bs: AddIterSVC128(at[0], bt, bit) case !as && bs: AddIterVSC128(at, bt[0], ait) default: AddIterC128(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecAddStr(at, bt) case as && !bs: AddIterSVStr(at[0], bt, bit) case !as && bs: AddIterVSStr(at, bt[0], ait) default: AddIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for AddIter", t) } } func (e E) SubIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecSubI(at, bt) case as && !bs: SubIterSVI(at[0], bt, bit) case !as && bs: SubIterVSI(at, bt[0], ait) default: SubIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecSubI8(at, bt) case as && !bs: SubIterSVI8(at[0], bt, bit) case !as && bs: SubIterVSI8(at, bt[0], ait) default: SubIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecSubI16(at, bt) case as && !bs: SubIterSVI16(at[0], bt, bit) case !as && bs: SubIterVSI16(at, bt[0], ait) default: SubIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecSubI32(at, bt) case as && !bs: SubIterSVI32(at[0], bt, bit) case !as && bs: SubIterVSI32(at, bt[0], ait) default: SubIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecSubI64(at, bt) case as && !bs: SubIterSVI64(at[0], bt, bit) case !as && bs: SubIterVSI64(at, bt[0], ait) default: SubIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecSubU(at, bt) case as && !bs: SubIterSVU(at[0], bt, bit) case !as && bs: SubIterVSU(at, bt[0], ait) default: SubIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecSubU8(at, bt) case as && !bs: SubIterSVU8(at[0], bt, bit) case !as && bs: SubIterVSU8(at, bt[0], ait) default: SubIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecSubU16(at, bt) case as && !bs: SubIterSVU16(at[0], bt, bit) case !as && bs: SubIterVSU16(at, bt[0], ait) default: SubIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecSubU32(at, bt) case as && !bs: SubIterSVU32(at[0], bt, bit) case !as && bs: SubIterVSU32(at, bt[0], ait) default: SubIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecSubU64(at, bt) case as && !bs: SubIterSVU64(at[0], bt, bit) case !as && bs: SubIterVSU64(at, bt[0], ait) default: SubIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecSubF32(at, bt) case as && !bs: SubIterSVF32(at[0], bt, bit) case !as && bs: SubIterVSF32(at, bt[0], ait) default: SubIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecSubF64(at, bt) case as && !bs: SubIterSVF64(at[0], bt, bit) case !as && bs: SubIterVSF64(at, bt[0], ait) default: SubIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecSubC64(at, bt) case as && !bs: SubIterSVC64(at[0], bt, bit) case !as && bs: SubIterVSC64(at, bt[0], ait) default: SubIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecSubC128(at, bt) case as && !bs: SubIterSVC128(at[0], bt, bit) case !as && bs: SubIterVSC128(at, bt[0], ait) default: SubIterC128(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for SubIter", t) } } func (e E) MulIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMulI(at, bt) case as && !bs: MulIterSVI(at[0], bt, bit) case !as && bs: MulIterVSI(at, bt[0], ait) default: MulIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMulI8(at, bt) case as && !bs: MulIterSVI8(at[0], bt, bit) case !as && bs: MulIterVSI8(at, bt[0], ait) default: MulIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMulI16(at, bt) case as && !bs: MulIterSVI16(at[0], bt, bit) case !as && bs: MulIterVSI16(at, bt[0], ait) default: MulIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMulI32(at, bt) case as && !bs: MulIterSVI32(at[0], bt, bit) case !as && bs: MulIterVSI32(at, bt[0], ait) default: MulIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMulI64(at, bt) case as && !bs: MulIterSVI64(at[0], bt, bit) case !as && bs: MulIterVSI64(at, bt[0], ait) default: MulIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMulU(at, bt) case as && !bs: MulIterSVU(at[0], bt, bit) case !as && bs: MulIterVSU(at, bt[0], ait) default: MulIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMulU8(at, bt) case as && !bs: MulIterSVU8(at[0], bt, bit) case !as && bs: MulIterVSU8(at, bt[0], ait) default: MulIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMulU16(at, bt) case as && !bs: MulIterSVU16(at[0], bt, bit) case !as && bs: MulIterVSU16(at, bt[0], ait) default: MulIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMulU32(at, bt) case as && !bs: MulIterSVU32(at[0], bt, bit) case !as && bs: MulIterVSU32(at, bt[0], ait) default: MulIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMulU64(at, bt) case as && !bs: MulIterSVU64(at[0], bt, bit) case !as && bs: MulIterVSU64(at, bt[0], ait) default: MulIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMulF32(at, bt) case as && !bs: MulIterSVF32(at[0], bt, bit) case !as && bs: MulIterVSF32(at, bt[0], ait) default: MulIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMulF64(at, bt) case as && !bs: MulIterSVF64(at[0], bt, bit) case !as && bs: MulIterVSF64(at, bt[0], ait) default: MulIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecMulC64(at, bt) case as && !bs: MulIterSVC64(at[0], bt, bit) case !as && bs: MulIterVSC64(at, bt[0], ait) default: MulIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecMulC128(at, bt) case as && !bs: MulIterSVC128(at[0], bt, bit) case !as && bs: MulIterVSC128(at, bt[0], ait) default: MulIterC128(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for MulIter", t) } } func (e E) DivIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecDivI(at, bt) case as && !bs: DivIterSVI(at[0], bt, bit) case !as && bs: DivIterVSI(at, bt[0], ait) default: DivIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecDivI8(at, bt) case as && !bs: DivIterSVI8(at[0], bt, bit) case !as && bs: DivIterVSI8(at, bt[0], ait) default: DivIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecDivI16(at, bt) case as && !bs: DivIterSVI16(at[0], bt, bit) case !as && bs: DivIterVSI16(at, bt[0], ait) default: DivIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecDivI32(at, bt) case as && !bs: DivIterSVI32(at[0], bt, bit) case !as && bs: DivIterVSI32(at, bt[0], ait) default: DivIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecDivI64(at, bt) case as && !bs: DivIterSVI64(at[0], bt, bit) case !as && bs: DivIterVSI64(at, bt[0], ait) default: DivIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecDivU(at, bt) case as && !bs: DivIterSVU(at[0], bt, bit) case !as && bs: DivIterVSU(at, bt[0], ait) default: DivIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecDivU8(at, bt) case as && !bs: DivIterSVU8(at[0], bt, bit) case !as && bs: DivIterVSU8(at, bt[0], ait) default: DivIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecDivU16(at, bt) case as && !bs: DivIterSVU16(at[0], bt, bit) case !as && bs: DivIterVSU16(at, bt[0], ait) default: DivIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecDivU32(at, bt) case as && !bs: DivIterSVU32(at[0], bt, bit) case !as && bs: DivIterVSU32(at, bt[0], ait) default: DivIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecDivU64(at, bt) case as && !bs: DivIterSVU64(at[0], bt, bit) case !as && bs: DivIterVSU64(at, bt[0], ait) default: DivIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecDivF32(at, bt) case as && !bs: DivIterSVF32(at[0], bt, bit) case !as && bs: DivIterVSF32(at, bt[0], ait) default: DivIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecDivF64(at, bt) case as && !bs: DivIterSVF64(at[0], bt, bit) case !as && bs: DivIterVSF64(at, bt[0], ait) default: DivIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecDivC64(at, bt) case as && !bs: DivIterSVC64(at[0], bt, bit) case !as && bs: DivIterVSC64(at, bt[0], ait) default: DivIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecDivC128(at, bt) case as && !bs: DivIterSVC128(at[0], bt, bit) case !as && bs: DivIterVSC128(at, bt[0], ait) default: DivIterC128(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for DivIter", t) } } func (e E) PowIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecPowF32(at, bt) case as && !bs: PowIterSVF32(at[0], bt, bit) case !as && bs: PowIterVSF32(at, bt[0], ait) default: PowIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecPowF64(at, bt) case as && !bs: PowIterSVF64(at[0], bt, bit) case !as && bs: PowIterVSF64(at, bt[0], ait) default: PowIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecPowC64(at, bt) case as && !bs: PowIterSVC64(at[0], bt, bit) case !as && bs: PowIterVSC64(at, bt[0], ait) default: PowIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecPowC128(at, bt) case as && !bs: PowIterSVC128(at[0], bt, bit) case !as && bs: PowIterVSC128(at, bt[0], ait) default: PowIterC128(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for PowIter", t) } } func (e E) ModIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecModI(at, bt) case as && !bs: ModIterSVI(at[0], bt, bit) case !as && bs: ModIterVSI(at, bt[0], ait) default: ModIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecModI8(at, bt) case as && !bs: ModIterSVI8(at[0], bt, bit) case !as && bs: ModIterVSI8(at, bt[0], ait) default: ModIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecModI16(at, bt) case as && !bs: ModIterSVI16(at[0], bt, bit) case !as && bs: ModIterVSI16(at, bt[0], ait) default: ModIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecModI32(at, bt) case as && !bs: ModIterSVI32(at[0], bt, bit) case !as && bs: ModIterVSI32(at, bt[0], ait) default: ModIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecModI64(at, bt) case as && !bs: ModIterSVI64(at[0], bt, bit) case !as && bs: ModIterVSI64(at, bt[0], ait) default: ModIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecModU(at, bt) case as && !bs: ModIterSVU(at[0], bt, bit) case !as && bs: ModIterVSU(at, bt[0], ait) default: ModIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecModU8(at, bt) case as && !bs: ModIterSVU8(at[0], bt, bit) case !as && bs: ModIterVSU8(at, bt[0], ait) default: ModIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecModU16(at, bt) case as && !bs: ModIterSVU16(at[0], bt, bit) case !as && bs: ModIterVSU16(at, bt[0], ait) default: ModIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecModU32(at, bt) case as && !bs: ModIterSVU32(at[0], bt, bit) case !as && bs: ModIterVSU32(at, bt[0], ait) default: ModIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecModU64(at, bt) case as && !bs: ModIterSVU64(at[0], bt, bit) case !as && bs: ModIterVSU64(at, bt[0], ait) default: ModIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecModF32(at, bt) case as && !bs: ModIterSVF32(at[0], bt, bit) case !as && bs: ModIterVSF32(at, bt[0], ait) default: ModIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecModF64(at, bt) case as && !bs: ModIterSVF64(at[0], bt, bit) case !as && bs: ModIterVSF64(at, bt[0], ait) default: ModIterF64(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for ModIter", t) } } func (e E) AddIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecAddI(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVI(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSI(at, bt[0], it, ait, iit) default: return AddIterIncrI(at, bt, it, ait, bit, iit) } case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecAddI8(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVI8(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSI8(at, bt[0], it, ait, iit) default: return AddIterIncrI8(at, bt, it, ait, bit, iit) } case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecAddI16(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVI16(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSI16(at, bt[0], it, ait, iit) default: return AddIterIncrI16(at, bt, it, ait, bit, iit) } case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecAddI32(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVI32(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSI32(at, bt[0], it, ait, iit) default: return AddIterIncrI32(at, bt, it, ait, bit, iit) } case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecAddI64(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVI64(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSI64(at, bt[0], it, ait, iit) default: return AddIterIncrI64(at, bt, it, ait, bit, iit) } case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecAddU(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVU(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSU(at, bt[0], it, ait, iit) default: return AddIterIncrU(at, bt, it, ait, bit, iit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecAddU8(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVU8(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSU8(at, bt[0], it, ait, iit) default: return AddIterIncrU8(at, bt, it, ait, bit, iit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecAddU16(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVU16(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSU16(at, bt[0], it, ait, iit) default: return AddIterIncrU16(at, bt, it, ait, bit, iit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecAddU32(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVU32(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSU32(at, bt[0], it, ait, iit) default: return AddIterIncrU32(at, bt, it, ait, bit, iit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecAddU64(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVU64(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSU64(at, bt[0], it, ait, iit) default: return AddIterIncrU64(at, bt, it, ait, bit, iit) } case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecAddF32(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSF32(at, bt[0], it, ait, iit) default: return AddIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecAddF64(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSF64(at, bt[0], it, ait, iit) default: return AddIterIncrF64(at, bt, it, ait, bit, iit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecAddC64(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVC64(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSC64(at, bt[0], it, ait, iit) default: return AddIterIncrC64(at, bt, it, ait, bit, iit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecAddC128(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVC128(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSC128(at, bt[0], it, ait, iit) default: return AddIterIncrC128(at, bt, it, ait, bit, iit) } case String: at := a.Strings() bt := b.Strings() it := incr.Strings() switch { case as && bs: VecAddStr(at, bt) if !is { return e.AddIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return AddIterIncrSVStr(at[0], bt, it, bit, iit) case !as && bs: return AddIterIncrVSStr(at, bt[0], it, ait, iit) default: return AddIterIncrStr(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for AddIterIncr", t) } } func (e E) SubIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecSubI(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVI(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSI(at, bt[0], it, ait, iit) default: return SubIterIncrI(at, bt, it, ait, bit, iit) } case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecSubI8(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVI8(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSI8(at, bt[0], it, ait, iit) default: return SubIterIncrI8(at, bt, it, ait, bit, iit) } case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecSubI16(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVI16(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSI16(at, bt[0], it, ait, iit) default: return SubIterIncrI16(at, bt, it, ait, bit, iit) } case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecSubI32(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVI32(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSI32(at, bt[0], it, ait, iit) default: return SubIterIncrI32(at, bt, it, ait, bit, iit) } case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecSubI64(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVI64(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSI64(at, bt[0], it, ait, iit) default: return SubIterIncrI64(at, bt, it, ait, bit, iit) } case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecSubU(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVU(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSU(at, bt[0], it, ait, iit) default: return SubIterIncrU(at, bt, it, ait, bit, iit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecSubU8(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVU8(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSU8(at, bt[0], it, ait, iit) default: return SubIterIncrU8(at, bt, it, ait, bit, iit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecSubU16(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVU16(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSU16(at, bt[0], it, ait, iit) default: return SubIterIncrU16(at, bt, it, ait, bit, iit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecSubU32(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVU32(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSU32(at, bt[0], it, ait, iit) default: return SubIterIncrU32(at, bt, it, ait, bit, iit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecSubU64(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVU64(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSU64(at, bt[0], it, ait, iit) default: return SubIterIncrU64(at, bt, it, ait, bit, iit) } case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecSubF32(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSF32(at, bt[0], it, ait, iit) default: return SubIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecSubF64(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSF64(at, bt[0], it, ait, iit) default: return SubIterIncrF64(at, bt, it, ait, bit, iit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecSubC64(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVC64(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSC64(at, bt[0], it, ait, iit) default: return SubIterIncrC64(at, bt, it, ait, bit, iit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecSubC128(at, bt) if !is { return e.SubIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return SubIterIncrSVC128(at[0], bt, it, bit, iit) case !as && bs: return SubIterIncrVSC128(at, bt[0], it, ait, iit) default: return SubIterIncrC128(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for SubIterIncr", t) } } func (e E) MulIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecMulI(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVI(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSI(at, bt[0], it, ait, iit) default: return MulIterIncrI(at, bt, it, ait, bit, iit) } case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecMulI8(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVI8(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSI8(at, bt[0], it, ait, iit) default: return MulIterIncrI8(at, bt, it, ait, bit, iit) } case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecMulI16(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVI16(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSI16(at, bt[0], it, ait, iit) default: return MulIterIncrI16(at, bt, it, ait, bit, iit) } case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecMulI32(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVI32(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSI32(at, bt[0], it, ait, iit) default: return MulIterIncrI32(at, bt, it, ait, bit, iit) } case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecMulI64(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVI64(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSI64(at, bt[0], it, ait, iit) default: return MulIterIncrI64(at, bt, it, ait, bit, iit) } case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecMulU(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVU(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSU(at, bt[0], it, ait, iit) default: return MulIterIncrU(at, bt, it, ait, bit, iit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecMulU8(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVU8(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSU8(at, bt[0], it, ait, iit) default: return MulIterIncrU8(at, bt, it, ait, bit, iit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecMulU16(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVU16(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSU16(at, bt[0], it, ait, iit) default: return MulIterIncrU16(at, bt, it, ait, bit, iit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecMulU32(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVU32(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSU32(at, bt[0], it, ait, iit) default: return MulIterIncrU32(at, bt, it, ait, bit, iit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecMulU64(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVU64(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSU64(at, bt[0], it, ait, iit) default: return MulIterIncrU64(at, bt, it, ait, bit, iit) } case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecMulF32(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSF32(at, bt[0], it, ait, iit) default: return MulIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecMulF64(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSF64(at, bt[0], it, ait, iit) default: return MulIterIncrF64(at, bt, it, ait, bit, iit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecMulC64(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVC64(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSC64(at, bt[0], it, ait, iit) default: return MulIterIncrC64(at, bt, it, ait, bit, iit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecMulC128(at, bt) if !is { return e.MulIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return MulIterIncrSVC128(at[0], bt, it, bit, iit) case !as && bs: return MulIterIncrVSC128(at, bt[0], it, ait, iit) default: return MulIterIncrC128(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for MulIterIncr", t) } } func (e E) DivIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecDivI(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVI(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSI(at, bt[0], it, ait, iit) default: return DivIterIncrI(at, bt, it, ait, bit, iit) } case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecDivI8(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVI8(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSI8(at, bt[0], it, ait, iit) default: return DivIterIncrI8(at, bt, it, ait, bit, iit) } case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecDivI16(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVI16(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSI16(at, bt[0], it, ait, iit) default: return DivIterIncrI16(at, bt, it, ait, bit, iit) } case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecDivI32(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVI32(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSI32(at, bt[0], it, ait, iit) default: return DivIterIncrI32(at, bt, it, ait, bit, iit) } case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecDivI64(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVI64(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSI64(at, bt[0], it, ait, iit) default: return DivIterIncrI64(at, bt, it, ait, bit, iit) } case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecDivU(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVU(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSU(at, bt[0], it, ait, iit) default: return DivIterIncrU(at, bt, it, ait, bit, iit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecDivU8(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVU8(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSU8(at, bt[0], it, ait, iit) default: return DivIterIncrU8(at, bt, it, ait, bit, iit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecDivU16(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVU16(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSU16(at, bt[0], it, ait, iit) default: return DivIterIncrU16(at, bt, it, ait, bit, iit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecDivU32(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVU32(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSU32(at, bt[0], it, ait, iit) default: return DivIterIncrU32(at, bt, it, ait, bit, iit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecDivU64(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVU64(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSU64(at, bt[0], it, ait, iit) default: return DivIterIncrU64(at, bt, it, ait, bit, iit) } case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecDivF32(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSF32(at, bt[0], it, ait, iit) default: return DivIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecDivF64(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSF64(at, bt[0], it, ait, iit) default: return DivIterIncrF64(at, bt, it, ait, bit, iit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecDivC64(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVC64(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSC64(at, bt[0], it, ait, iit) default: return DivIterIncrC64(at, bt, it, ait, bit, iit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecDivC128(at, bt) if !is { return e.DivIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return DivIterIncrSVC128(at[0], bt, it, bit, iit) case !as && bs: return DivIterIncrVSC128(at, bt[0], it, ait, iit) default: return DivIterIncrC128(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for DivIterIncr", t) } } func (e E) PowIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecPowF32(at, bt) if !is { return e.PowIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return PowIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return PowIterIncrVSF32(at, bt[0], it, ait, iit) default: return PowIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecPowF64(at, bt) if !is { return e.PowIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return PowIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return PowIterIncrVSF64(at, bt[0], it, ait, iit) default: return PowIterIncrF64(at, bt, it, ait, bit, iit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() it := incr.Complex64s() switch { case as && bs: VecPowC64(at, bt) if !is { return e.PowIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return PowIterIncrSVC64(at[0], bt, it, bit, iit) case !as && bs: return PowIterIncrVSC64(at, bt[0], it, ait, iit) default: return PowIterIncrC64(at, bt, it, ait, bit, iit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() it := incr.Complex128s() switch { case as && bs: VecPowC128(at, bt) if !is { return e.PowIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return PowIterIncrSVC128(at[0], bt, it, bit, iit) case !as && bs: return PowIterIncrVSC128(at, bt[0], it, ait, iit) default: return PowIterIncrC128(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for PowIterIncr", t) } } func (e E) ModIterIncr(t reflect.Type, a *storage.Header, b *storage.Header, incr *storage.Header, ait Iterator, bit Iterator, iit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) is := isScalar(incr, t) if ((as && !bs) || (bs && !as)) && is { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() it := incr.Ints() switch { case as && bs: VecModI(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVI(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSI(at, bt[0], it, ait, iit) default: return ModIterIncrI(at, bt, it, ait, bit, iit) } case Int8: at := a.Int8s() bt := b.Int8s() it := incr.Int8s() switch { case as && bs: VecModI8(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVI8(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSI8(at, bt[0], it, ait, iit) default: return ModIterIncrI8(at, bt, it, ait, bit, iit) } case Int16: at := a.Int16s() bt := b.Int16s() it := incr.Int16s() switch { case as && bs: VecModI16(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVI16(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSI16(at, bt[0], it, ait, iit) default: return ModIterIncrI16(at, bt, it, ait, bit, iit) } case Int32: at := a.Int32s() bt := b.Int32s() it := incr.Int32s() switch { case as && bs: VecModI32(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVI32(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSI32(at, bt[0], it, ait, iit) default: return ModIterIncrI32(at, bt, it, ait, bit, iit) } case Int64: at := a.Int64s() bt := b.Int64s() it := incr.Int64s() switch { case as && bs: VecModI64(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVI64(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSI64(at, bt[0], it, ait, iit) default: return ModIterIncrI64(at, bt, it, ait, bit, iit) } case Uint: at := a.Uints() bt := b.Uints() it := incr.Uints() switch { case as && bs: VecModU(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVU(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSU(at, bt[0], it, ait, iit) default: return ModIterIncrU(at, bt, it, ait, bit, iit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() it := incr.Uint8s() switch { case as && bs: VecModU8(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVU8(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSU8(at, bt[0], it, ait, iit) default: return ModIterIncrU8(at, bt, it, ait, bit, iit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() it := incr.Uint16s() switch { case as && bs: VecModU16(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVU16(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSU16(at, bt[0], it, ait, iit) default: return ModIterIncrU16(at, bt, it, ait, bit, iit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() it := incr.Uint32s() switch { case as && bs: VecModU32(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVU32(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSU32(at, bt[0], it, ait, iit) default: return ModIterIncrU32(at, bt, it, ait, bit, iit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() it := incr.Uint64s() switch { case as && bs: VecModU64(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVU64(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSU64(at, bt[0], it, ait, iit) default: return ModIterIncrU64(at, bt, it, ait, bit, iit) } case Float32: at := a.Float32s() bt := b.Float32s() it := incr.Float32s() switch { case as && bs: VecModF32(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVF32(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSF32(at, bt[0], it, ait, iit) default: return ModIterIncrF32(at, bt, it, ait, bit, iit) } case Float64: at := a.Float64s() bt := b.Float64s() it := incr.Float64s() switch { case as && bs: VecModF64(at, bt) if !is { return e.ModIter(t, incr, a, iit, ait) } it[0] += at[0] return case as && !bs: return ModIterIncrSVF64(at[0], bt, it, bit, iit) case !as && bs: return ModIterIncrVSF64(at, bt[0], it, ait, iit) default: return ModIterIncrF64(at, bt, it, ait, bit, iit) } default: return errors.Errorf("Unsupported type %v for ModIterIncr", t) } } func (e E) AddRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() rt := recv.Ints() AddRecvI(at, bt, rt) return case Int8: at := a.Int8s() bt := b.Int8s() rt := recv.Int8s() AddRecvI8(at, bt, rt) return case Int16: at := a.Int16s() bt := b.Int16s() rt := recv.Int16s() AddRecvI16(at, bt, rt) return case Int32: at := a.Int32s() bt := b.Int32s() rt := recv.Int32s() AddRecvI32(at, bt, rt) return case Int64: at := a.Int64s() bt := b.Int64s() rt := recv.Int64s() AddRecvI64(at, bt, rt) return case Uint: at := a.Uints() bt := b.Uints() rt := recv.Uints() AddRecvU(at, bt, rt) return case Uint8: at := a.Uint8s() bt := b.Uint8s() rt := recv.Uint8s() AddRecvU8(at, bt, rt) return case Uint16: at := a.Uint16s() bt := b.Uint16s() rt := recv.Uint16s() AddRecvU16(at, bt, rt) return case Uint32: at := a.Uint32s() bt := b.Uint32s() rt := recv.Uint32s() AddRecvU32(at, bt, rt) return case Uint64: at := a.Uint64s() bt := b.Uint64s() rt := recv.Uint64s() AddRecvU64(at, bt, rt) return case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() AddRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() AddRecvF64(at, bt, rt) return case Complex64: at := a.Complex64s() bt := b.Complex64s() rt := recv.Complex64s() AddRecvC64(at, bt, rt) return case Complex128: at := a.Complex128s() bt := b.Complex128s() rt := recv.Complex128s() AddRecvC128(at, bt, rt) return case String: at := a.Strings() bt := b.Strings() rt := recv.Strings() AddRecvStr(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for AddRecv", t) } } func (e E) SubRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() rt := recv.Ints() SubRecvI(at, bt, rt) return case Int8: at := a.Int8s() bt := b.Int8s() rt := recv.Int8s() SubRecvI8(at, bt, rt) return case Int16: at := a.Int16s() bt := b.Int16s() rt := recv.Int16s() SubRecvI16(at, bt, rt) return case Int32: at := a.Int32s() bt := b.Int32s() rt := recv.Int32s() SubRecvI32(at, bt, rt) return case Int64: at := a.Int64s() bt := b.Int64s() rt := recv.Int64s() SubRecvI64(at, bt, rt) return case Uint: at := a.Uints() bt := b.Uints() rt := recv.Uints() SubRecvU(at, bt, rt) return case Uint8: at := a.Uint8s() bt := b.Uint8s() rt := recv.Uint8s() SubRecvU8(at, bt, rt) return case Uint16: at := a.Uint16s() bt := b.Uint16s() rt := recv.Uint16s() SubRecvU16(at, bt, rt) return case Uint32: at := a.Uint32s() bt := b.Uint32s() rt := recv.Uint32s() SubRecvU32(at, bt, rt) return case Uint64: at := a.Uint64s() bt := b.Uint64s() rt := recv.Uint64s() SubRecvU64(at, bt, rt) return case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() SubRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() SubRecvF64(at, bt, rt) return case Complex64: at := a.Complex64s() bt := b.Complex64s() rt := recv.Complex64s() SubRecvC64(at, bt, rt) return case Complex128: at := a.Complex128s() bt := b.Complex128s() rt := recv.Complex128s() SubRecvC128(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for SubRecv", t) } } func (e E) MulRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() rt := recv.Ints() MulRecvI(at, bt, rt) return case Int8: at := a.Int8s() bt := b.Int8s() rt := recv.Int8s() MulRecvI8(at, bt, rt) return case Int16: at := a.Int16s() bt := b.Int16s() rt := recv.Int16s() MulRecvI16(at, bt, rt) return case Int32: at := a.Int32s() bt := b.Int32s() rt := recv.Int32s() MulRecvI32(at, bt, rt) return case Int64: at := a.Int64s() bt := b.Int64s() rt := recv.Int64s() MulRecvI64(at, bt, rt) return case Uint: at := a.Uints() bt := b.Uints() rt := recv.Uints() MulRecvU(at, bt, rt) return case Uint8: at := a.Uint8s() bt := b.Uint8s() rt := recv.Uint8s() MulRecvU8(at, bt, rt) return case Uint16: at := a.Uint16s() bt := b.Uint16s() rt := recv.Uint16s() MulRecvU16(at, bt, rt) return case Uint32: at := a.Uint32s() bt := b.Uint32s() rt := recv.Uint32s() MulRecvU32(at, bt, rt) return case Uint64: at := a.Uint64s() bt := b.Uint64s() rt := recv.Uint64s() MulRecvU64(at, bt, rt) return case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() MulRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() MulRecvF64(at, bt, rt) return case Complex64: at := a.Complex64s() bt := b.Complex64s() rt := recv.Complex64s() MulRecvC64(at, bt, rt) return case Complex128: at := a.Complex128s() bt := b.Complex128s() rt := recv.Complex128s() MulRecvC128(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for MulRecv", t) } } func (e E) DivRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() rt := recv.Ints() DivRecvI(at, bt, rt) return case Int8: at := a.Int8s() bt := b.Int8s() rt := recv.Int8s() DivRecvI8(at, bt, rt) return case Int16: at := a.Int16s() bt := b.Int16s() rt := recv.Int16s() DivRecvI16(at, bt, rt) return case Int32: at := a.Int32s() bt := b.Int32s() rt := recv.Int32s() DivRecvI32(at, bt, rt) return case Int64: at := a.Int64s() bt := b.Int64s() rt := recv.Int64s() DivRecvI64(at, bt, rt) return case Uint: at := a.Uints() bt := b.Uints() rt := recv.Uints() DivRecvU(at, bt, rt) return case Uint8: at := a.Uint8s() bt := b.Uint8s() rt := recv.Uint8s() DivRecvU8(at, bt, rt) return case Uint16: at := a.Uint16s() bt := b.Uint16s() rt := recv.Uint16s() DivRecvU16(at, bt, rt) return case Uint32: at := a.Uint32s() bt := b.Uint32s() rt := recv.Uint32s() DivRecvU32(at, bt, rt) return case Uint64: at := a.Uint64s() bt := b.Uint64s() rt := recv.Uint64s() DivRecvU64(at, bt, rt) return case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() DivRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() DivRecvF64(at, bt, rt) return case Complex64: at := a.Complex64s() bt := b.Complex64s() rt := recv.Complex64s() DivRecvC64(at, bt, rt) return case Complex128: at := a.Complex128s() bt := b.Complex128s() rt := recv.Complex128s() DivRecvC128(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for DivRecv", t) } } func (e E) PowRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() PowRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() PowRecvF64(at, bt, rt) return case Complex64: at := a.Complex64s() bt := b.Complex64s() rt := recv.Complex64s() PowRecvC64(at, bt, rt) return case Complex128: at := a.Complex128s() bt := b.Complex128s() rt := recv.Complex128s() PowRecvC128(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for PowRecv", t) } } func (e E) ModRecv(t reflect.Type, a *storage.Header, b *storage.Header, recv *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(recv, t) if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() rt := recv.Ints() ModRecvI(at, bt, rt) return case Int8: at := a.Int8s() bt := b.Int8s() rt := recv.Int8s() ModRecvI8(at, bt, rt) return case Int16: at := a.Int16s() bt := b.Int16s() rt := recv.Int16s() ModRecvI16(at, bt, rt) return case Int32: at := a.Int32s() bt := b.Int32s() rt := recv.Int32s() ModRecvI32(at, bt, rt) return case Int64: at := a.Int64s() bt := b.Int64s() rt := recv.Int64s() ModRecvI64(at, bt, rt) return case Uint: at := a.Uints() bt := b.Uints() rt := recv.Uints() ModRecvU(at, bt, rt) return case Uint8: at := a.Uint8s() bt := b.Uint8s() rt := recv.Uint8s() ModRecvU8(at, bt, rt) return case Uint16: at := a.Uint16s() bt := b.Uint16s() rt := recv.Uint16s() ModRecvU16(at, bt, rt) return case Uint32: at := a.Uint32s() bt := b.Uint32s() rt := recv.Uint32s() ModRecvU32(at, bt, rt) return case Uint64: at := a.Uint64s() bt := b.Uint64s() rt := recv.Uint64s() ModRecvU64(at, bt, rt) return case Float32: at := a.Float32s() bt := b.Float32s() rt := recv.Float32s() ModRecvF32(at, bt, rt) return case Float64: at := a.Float64s() bt := b.Float64s() rt := recv.Float64s() ModRecvF64(at, bt, rt) return default: return errors.Errorf("Unsupported type %v for ModRecv", t) } } tensor-0.9.24/internal/execution/eng_arith_manual.go000066400000000000000000000076771426512615100226070ustar00rootroot00000000000000package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) AddSliced(t reflect.Type, dataA *storage.Header, dstStart, dstEnd int, dataB *storage.Header, srcStart, srcEnd int) (err error) { ds := dstStart * int(t.Size()) de := dstEnd * int(t.Size()) a := &storage.Header{ Raw: dataA.Raw[ds:de], } ss := srcStart * int(t.Size()) se := srcEnd * int(t.Size()) b := &storage.Header{ Raw: dataB.Raw[ss:se], } as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecAddI(at, bt) case as && !bs: AddSVI(at[0], bt) case !as && bs: AddVSI(at, bt[0]) default: VecAddI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecAddI8(at, bt) case as && !bs: AddSVI8(at[0], bt) case !as && bs: AddVSI8(at, bt[0]) default: VecAddI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecAddI16(at, bt) case as && !bs: AddSVI16(at[0], bt) case !as && bs: AddVSI16(at, bt[0]) default: VecAddI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecAddI32(at, bt) case as && !bs: AddSVI32(at[0], bt) case !as && bs: AddVSI32(at, bt[0]) default: VecAddI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecAddI64(at, bt) case as && !bs: AddSVI64(at[0], bt) case !as && bs: AddVSI64(at, bt[0]) default: VecAddI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecAddU(at, bt) case as && !bs: AddSVU(at[0], bt) case !as && bs: AddVSU(at, bt[0]) default: VecAddU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecAddU8(at, bt) case as && !bs: AddSVU8(at[0], bt) case !as && bs: AddVSU8(at, bt[0]) default: VecAddU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecAddU16(at, bt) case as && !bs: AddSVU16(at[0], bt) case !as && bs: AddVSU16(at, bt[0]) default: VecAddU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecAddU32(at, bt) case as && !bs: AddSVU32(at[0], bt) case !as && bs: AddVSU32(at, bt[0]) default: VecAddU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecAddU64(at, bt) case as && !bs: AddSVU64(at[0], bt) case !as && bs: AddVSU64(at, bt[0]) default: VecAddU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecAddF32(at, bt) case as && !bs: AddSVF32(at[0], bt) case !as && bs: AddVSF32(at, bt[0]) default: VecAddF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecAddF64(at, bt) case as && !bs: AddSVF64(at[0], bt) case !as && bs: AddVSF64(at, bt[0]) default: VecAddF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: VecAddC64(at, bt) case as && !bs: AddSVC64(at[0], bt) case !as && bs: AddVSC64(at, bt[0]) default: VecAddC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: VecAddC128(at, bt) case as && !bs: AddSVC128(at[0], bt) case !as && bs: AddVSC128(at, bt[0]) default: VecAddC128(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecAddStr(at, bt) case as && !bs: AddSVStr(at[0], bt) case !as && bs: AddVSStr(at, bt[0]) default: VecAddStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Add", t) } } tensor-0.9.24/internal/execution/eng_cmp.go000066400000000000000000003013511426512615100207040ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) Gt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GtI(at, bt, rt) case as && !bs: GtSVI(at[0], bt, rt) case !as && bs: GtVSI(at, bt[0], rt) default: GtI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GtI8(at, bt, rt) case as && !bs: GtSVI8(at[0], bt, rt) case !as && bs: GtVSI8(at, bt[0], rt) default: GtI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GtI16(at, bt, rt) case as && !bs: GtSVI16(at[0], bt, rt) case !as && bs: GtVSI16(at, bt[0], rt) default: GtI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GtI32(at, bt, rt) case as && !bs: GtSVI32(at[0], bt, rt) case !as && bs: GtVSI32(at, bt[0], rt) default: GtI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GtI64(at, bt, rt) case as && !bs: GtSVI64(at[0], bt, rt) case !as && bs: GtVSI64(at, bt[0], rt) default: GtI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GtU(at, bt, rt) case as && !bs: GtSVU(at[0], bt, rt) case !as && bs: GtVSU(at, bt[0], rt) default: GtU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GtU8(at, bt, rt) case as && !bs: GtSVU8(at[0], bt, rt) case !as && bs: GtVSU8(at, bt[0], rt) default: GtU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GtU16(at, bt, rt) case as && !bs: GtSVU16(at[0], bt, rt) case !as && bs: GtVSU16(at, bt[0], rt) default: GtU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GtU32(at, bt, rt) case as && !bs: GtSVU32(at[0], bt, rt) case !as && bs: GtVSU32(at, bt[0], rt) default: GtU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GtU64(at, bt, rt) case as && !bs: GtSVU64(at[0], bt, rt) case !as && bs: GtVSU64(at, bt[0], rt) default: GtU64(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GtF32(at, bt, rt) case as && !bs: GtSVF32(at[0], bt, rt) case !as && bs: GtVSF32(at, bt[0], rt) default: GtF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GtF64(at, bt, rt) case as && !bs: GtSVF64(at[0], bt, rt) case !as && bs: GtVSF64(at, bt[0], rt) default: GtF64(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GtStr(at, bt, rt) case as && !bs: GtSVStr(at[0], bt, rt) case !as && bs: GtVSStr(at, bt[0], rt) default: GtStr(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Gt", t) } } func (e E) Gte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GteI(at, bt, rt) case as && !bs: GteSVI(at[0], bt, rt) case !as && bs: GteVSI(at, bt[0], rt) default: GteI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GteI8(at, bt, rt) case as && !bs: GteSVI8(at[0], bt, rt) case !as && bs: GteVSI8(at, bt[0], rt) default: GteI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GteI16(at, bt, rt) case as && !bs: GteSVI16(at[0], bt, rt) case !as && bs: GteVSI16(at, bt[0], rt) default: GteI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GteI32(at, bt, rt) case as && !bs: GteSVI32(at[0], bt, rt) case !as && bs: GteVSI32(at, bt[0], rt) default: GteI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GteI64(at, bt, rt) case as && !bs: GteSVI64(at[0], bt, rt) case !as && bs: GteVSI64(at, bt[0], rt) default: GteI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GteU(at, bt, rt) case as && !bs: GteSVU(at[0], bt, rt) case !as && bs: GteVSU(at, bt[0], rt) default: GteU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GteU8(at, bt, rt) case as && !bs: GteSVU8(at[0], bt, rt) case !as && bs: GteVSU8(at, bt[0], rt) default: GteU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GteU16(at, bt, rt) case as && !bs: GteSVU16(at[0], bt, rt) case !as && bs: GteVSU16(at, bt[0], rt) default: GteU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GteU32(at, bt, rt) case as && !bs: GteSVU32(at[0], bt, rt) case !as && bs: GteVSU32(at, bt[0], rt) default: GteU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GteU64(at, bt, rt) case as && !bs: GteSVU64(at[0], bt, rt) case !as && bs: GteVSU64(at, bt[0], rt) default: GteU64(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GteF32(at, bt, rt) case as && !bs: GteSVF32(at[0], bt, rt) case !as && bs: GteVSF32(at, bt[0], rt) default: GteF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GteF64(at, bt, rt) case as && !bs: GteSVF64(at[0], bt, rt) case !as && bs: GteVSF64(at, bt[0], rt) default: GteF64(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GteStr(at, bt, rt) case as && !bs: GteSVStr(at[0], bt, rt) case !as && bs: GteVSStr(at, bt[0], rt) default: GteStr(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Gte", t) } } func (e E) Lt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LtI(at, bt, rt) case as && !bs: LtSVI(at[0], bt, rt) case !as && bs: LtVSI(at, bt[0], rt) default: LtI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LtI8(at, bt, rt) case as && !bs: LtSVI8(at[0], bt, rt) case !as && bs: LtVSI8(at, bt[0], rt) default: LtI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LtI16(at, bt, rt) case as && !bs: LtSVI16(at[0], bt, rt) case !as && bs: LtVSI16(at, bt[0], rt) default: LtI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LtI32(at, bt, rt) case as && !bs: LtSVI32(at[0], bt, rt) case !as && bs: LtVSI32(at, bt[0], rt) default: LtI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LtI64(at, bt, rt) case as && !bs: LtSVI64(at[0], bt, rt) case !as && bs: LtVSI64(at, bt[0], rt) default: LtI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LtU(at, bt, rt) case as && !bs: LtSVU(at[0], bt, rt) case !as && bs: LtVSU(at, bt[0], rt) default: LtU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LtU8(at, bt, rt) case as && !bs: LtSVU8(at[0], bt, rt) case !as && bs: LtVSU8(at, bt[0], rt) default: LtU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LtU16(at, bt, rt) case as && !bs: LtSVU16(at[0], bt, rt) case !as && bs: LtVSU16(at, bt[0], rt) default: LtU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LtU32(at, bt, rt) case as && !bs: LtSVU32(at[0], bt, rt) case !as && bs: LtVSU32(at, bt[0], rt) default: LtU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LtU64(at, bt, rt) case as && !bs: LtSVU64(at[0], bt, rt) case !as && bs: LtVSU64(at, bt[0], rt) default: LtU64(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LtF32(at, bt, rt) case as && !bs: LtSVF32(at[0], bt, rt) case !as && bs: LtVSF32(at, bt[0], rt) default: LtF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LtF64(at, bt, rt) case as && !bs: LtSVF64(at[0], bt, rt) case !as && bs: LtVSF64(at, bt[0], rt) default: LtF64(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LtStr(at, bt, rt) case as && !bs: LtSVStr(at[0], bt, rt) case !as && bs: LtVSStr(at, bt[0], rt) default: LtStr(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Lt", t) } } func (e E) Lte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LteI(at, bt, rt) case as && !bs: LteSVI(at[0], bt, rt) case !as && bs: LteVSI(at, bt[0], rt) default: LteI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LteI8(at, bt, rt) case as && !bs: LteSVI8(at[0], bt, rt) case !as && bs: LteVSI8(at, bt[0], rt) default: LteI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LteI16(at, bt, rt) case as && !bs: LteSVI16(at[0], bt, rt) case !as && bs: LteVSI16(at, bt[0], rt) default: LteI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LteI32(at, bt, rt) case as && !bs: LteSVI32(at[0], bt, rt) case !as && bs: LteVSI32(at, bt[0], rt) default: LteI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LteI64(at, bt, rt) case as && !bs: LteSVI64(at[0], bt, rt) case !as && bs: LteVSI64(at, bt[0], rt) default: LteI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LteU(at, bt, rt) case as && !bs: LteSVU(at[0], bt, rt) case !as && bs: LteVSU(at, bt[0], rt) default: LteU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LteU8(at, bt, rt) case as && !bs: LteSVU8(at[0], bt, rt) case !as && bs: LteVSU8(at, bt[0], rt) default: LteU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LteU16(at, bt, rt) case as && !bs: LteSVU16(at[0], bt, rt) case !as && bs: LteVSU16(at, bt[0], rt) default: LteU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LteU32(at, bt, rt) case as && !bs: LteSVU32(at[0], bt, rt) case !as && bs: LteVSU32(at, bt[0], rt) default: LteU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LteU64(at, bt, rt) case as && !bs: LteSVU64(at[0], bt, rt) case !as && bs: LteVSU64(at, bt[0], rt) default: LteU64(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LteF32(at, bt, rt) case as && !bs: LteSVF32(at[0], bt, rt) case !as && bs: LteVSF32(at, bt[0], rt) default: LteF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LteF64(at, bt, rt) case as && !bs: LteSVF64(at[0], bt, rt) case !as && bs: LteVSF64(at, bt[0], rt) default: LteF64(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LteStr(at, bt, rt) case as && !bs: LteSVStr(at[0], bt, rt) case !as && bs: LteVSStr(at, bt[0], rt) default: LteStr(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Lte", t) } } func (e E) Eq(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: EqB(at, bt, rt) case as && !bs: EqSVB(at[0], bt, rt) case !as && bs: EqVSB(at, bt[0], rt) default: EqB(at, bt, rt) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: EqI(at, bt, rt) case as && !bs: EqSVI(at[0], bt, rt) case !as && bs: EqVSI(at, bt[0], rt) default: EqI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: EqI8(at, bt, rt) case as && !bs: EqSVI8(at[0], bt, rt) case !as && bs: EqVSI8(at, bt[0], rt) default: EqI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: EqI16(at, bt, rt) case as && !bs: EqSVI16(at[0], bt, rt) case !as && bs: EqVSI16(at, bt[0], rt) default: EqI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: EqI32(at, bt, rt) case as && !bs: EqSVI32(at[0], bt, rt) case !as && bs: EqVSI32(at, bt[0], rt) default: EqI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: EqI64(at, bt, rt) case as && !bs: EqSVI64(at[0], bt, rt) case !as && bs: EqVSI64(at, bt[0], rt) default: EqI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: EqU(at, bt, rt) case as && !bs: EqSVU(at[0], bt, rt) case !as && bs: EqVSU(at, bt[0], rt) default: EqU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: EqU8(at, bt, rt) case as && !bs: EqSVU8(at[0], bt, rt) case !as && bs: EqVSU8(at, bt[0], rt) default: EqU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: EqU16(at, bt, rt) case as && !bs: EqSVU16(at[0], bt, rt) case !as && bs: EqVSU16(at, bt[0], rt) default: EqU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: EqU32(at, bt, rt) case as && !bs: EqSVU32(at[0], bt, rt) case !as && bs: EqVSU32(at, bt[0], rt) default: EqU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: EqU64(at, bt, rt) case as && !bs: EqSVU64(at[0], bt, rt) case !as && bs: EqVSU64(at, bt[0], rt) default: EqU64(at, bt, rt) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: EqUintptr(at, bt, rt) case as && !bs: EqSVUintptr(at[0], bt, rt) case !as && bs: EqVSUintptr(at, bt[0], rt) default: EqUintptr(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: EqF32(at, bt, rt) case as && !bs: EqSVF32(at[0], bt, rt) case !as && bs: EqVSF32(at, bt[0], rt) default: EqF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: EqF64(at, bt, rt) case as && !bs: EqSVF64(at[0], bt, rt) case !as && bs: EqVSF64(at, bt[0], rt) default: EqF64(at, bt, rt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: EqC64(at, bt, rt) case as && !bs: EqSVC64(at[0], bt, rt) case !as && bs: EqVSC64(at, bt[0], rt) default: EqC64(at, bt, rt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: EqC128(at, bt, rt) case as && !bs: EqSVC128(at[0], bt, rt) case !as && bs: EqVSC128(at, bt[0], rt) default: EqC128(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: EqStr(at, bt, rt) case as && !bs: EqSVStr(at[0], bt, rt) case !as && bs: EqVSStr(at, bt[0], rt) default: EqStr(at, bt, rt) } return case UnsafePointer: at := a.UnsafePointers() bt := b.UnsafePointers() switch { case as && bs: EqUnsafePointer(at, bt, rt) case as && !bs: EqSVUnsafePointer(at[0], bt, rt) case !as && bs: EqVSUnsafePointer(at, bt[0], rt) default: EqUnsafePointer(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Eq", t) } } func (e E) Ne(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: NeB(at, bt, rt) case as && !bs: NeSVB(at[0], bt, rt) case !as && bs: NeVSB(at, bt[0], rt) default: NeB(at, bt, rt) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: NeI(at, bt, rt) case as && !bs: NeSVI(at[0], bt, rt) case !as && bs: NeVSI(at, bt[0], rt) default: NeI(at, bt, rt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: NeI8(at, bt, rt) case as && !bs: NeSVI8(at[0], bt, rt) case !as && bs: NeVSI8(at, bt[0], rt) default: NeI8(at, bt, rt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: NeI16(at, bt, rt) case as && !bs: NeSVI16(at[0], bt, rt) case !as && bs: NeVSI16(at, bt[0], rt) default: NeI16(at, bt, rt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: NeI32(at, bt, rt) case as && !bs: NeSVI32(at[0], bt, rt) case !as && bs: NeVSI32(at, bt[0], rt) default: NeI32(at, bt, rt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: NeI64(at, bt, rt) case as && !bs: NeSVI64(at[0], bt, rt) case !as && bs: NeVSI64(at, bt[0], rt) default: NeI64(at, bt, rt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: NeU(at, bt, rt) case as && !bs: NeSVU(at[0], bt, rt) case !as && bs: NeVSU(at, bt[0], rt) default: NeU(at, bt, rt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: NeU8(at, bt, rt) case as && !bs: NeSVU8(at[0], bt, rt) case !as && bs: NeVSU8(at, bt[0], rt) default: NeU8(at, bt, rt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: NeU16(at, bt, rt) case as && !bs: NeSVU16(at[0], bt, rt) case !as && bs: NeVSU16(at, bt[0], rt) default: NeU16(at, bt, rt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: NeU32(at, bt, rt) case as && !bs: NeSVU32(at[0], bt, rt) case !as && bs: NeVSU32(at, bt[0], rt) default: NeU32(at, bt, rt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: NeU64(at, bt, rt) case as && !bs: NeSVU64(at[0], bt, rt) case !as && bs: NeVSU64(at, bt[0], rt) default: NeU64(at, bt, rt) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: NeUintptr(at, bt, rt) case as && !bs: NeSVUintptr(at[0], bt, rt) case !as && bs: NeVSUintptr(at, bt[0], rt) default: NeUintptr(at, bt, rt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: NeF32(at, bt, rt) case as && !bs: NeSVF32(at[0], bt, rt) case !as && bs: NeVSF32(at, bt[0], rt) default: NeF32(at, bt, rt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: NeF64(at, bt, rt) case as && !bs: NeSVF64(at[0], bt, rt) case !as && bs: NeVSF64(at, bt[0], rt) default: NeF64(at, bt, rt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: NeC64(at, bt, rt) case as && !bs: NeSVC64(at[0], bt, rt) case !as && bs: NeVSC64(at, bt[0], rt) default: NeC64(at, bt, rt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: NeC128(at, bt, rt) case as && !bs: NeSVC128(at[0], bt, rt) case !as && bs: NeVSC128(at, bt[0], rt) default: NeC128(at, bt, rt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: NeStr(at, bt, rt) case as && !bs: NeSVStr(at[0], bt, rt) case !as && bs: NeVSStr(at, bt[0], rt) default: NeStr(at, bt, rt) } return case UnsafePointer: at := a.UnsafePointers() bt := b.UnsafePointers() switch { case as && bs: NeUnsafePointer(at, bt, rt) case as && !bs: NeSVUnsafePointer(at[0], bt, rt) case !as && bs: NeVSUnsafePointer(at, bt[0], rt) default: NeUnsafePointer(at, bt, rt) } return default: return errors.Errorf("Unsupported type %v for Ne", t) } } func (e E) GtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GtSameI(at, bt) case as && !bs: GtSameSVI(at[0], bt) case !as && bs: GtSameVSI(at, bt[0]) default: GtSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GtSameI8(at, bt) case as && !bs: GtSameSVI8(at[0], bt) case !as && bs: GtSameVSI8(at, bt[0]) default: GtSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GtSameI16(at, bt) case as && !bs: GtSameSVI16(at[0], bt) case !as && bs: GtSameVSI16(at, bt[0]) default: GtSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GtSameI32(at, bt) case as && !bs: GtSameSVI32(at[0], bt) case !as && bs: GtSameVSI32(at, bt[0]) default: GtSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GtSameI64(at, bt) case as && !bs: GtSameSVI64(at[0], bt) case !as && bs: GtSameVSI64(at, bt[0]) default: GtSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GtSameU(at, bt) case as && !bs: GtSameSVU(at[0], bt) case !as && bs: GtSameVSU(at, bt[0]) default: GtSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GtSameU8(at, bt) case as && !bs: GtSameSVU8(at[0], bt) case !as && bs: GtSameVSU8(at, bt[0]) default: GtSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GtSameU16(at, bt) case as && !bs: GtSameSVU16(at[0], bt) case !as && bs: GtSameVSU16(at, bt[0]) default: GtSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GtSameU32(at, bt) case as && !bs: GtSameSVU32(at[0], bt) case !as && bs: GtSameVSU32(at, bt[0]) default: GtSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GtSameU64(at, bt) case as && !bs: GtSameSVU64(at[0], bt) case !as && bs: GtSameVSU64(at, bt[0]) default: GtSameU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GtSameF32(at, bt) case as && !bs: GtSameSVF32(at[0], bt) case !as && bs: GtSameVSF32(at, bt[0]) default: GtSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GtSameF64(at, bt) case as && !bs: GtSameSVF64(at[0], bt) case !as && bs: GtSameVSF64(at, bt[0]) default: GtSameF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GtSameStr(at, bt) case as && !bs: GtSameSVStr(at[0], bt) case !as && bs: GtSameVSStr(at, bt[0]) default: GtSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Gt", t) } } func (e E) GteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GteSameI(at, bt) case as && !bs: GteSameSVI(at[0], bt) case !as && bs: GteSameVSI(at, bt[0]) default: GteSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GteSameI8(at, bt) case as && !bs: GteSameSVI8(at[0], bt) case !as && bs: GteSameVSI8(at, bt[0]) default: GteSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GteSameI16(at, bt) case as && !bs: GteSameSVI16(at[0], bt) case !as && bs: GteSameVSI16(at, bt[0]) default: GteSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GteSameI32(at, bt) case as && !bs: GteSameSVI32(at[0], bt) case !as && bs: GteSameVSI32(at, bt[0]) default: GteSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GteSameI64(at, bt) case as && !bs: GteSameSVI64(at[0], bt) case !as && bs: GteSameVSI64(at, bt[0]) default: GteSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GteSameU(at, bt) case as && !bs: GteSameSVU(at[0], bt) case !as && bs: GteSameVSU(at, bt[0]) default: GteSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GteSameU8(at, bt) case as && !bs: GteSameSVU8(at[0], bt) case !as && bs: GteSameVSU8(at, bt[0]) default: GteSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GteSameU16(at, bt) case as && !bs: GteSameSVU16(at[0], bt) case !as && bs: GteSameVSU16(at, bt[0]) default: GteSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GteSameU32(at, bt) case as && !bs: GteSameSVU32(at[0], bt) case !as && bs: GteSameVSU32(at, bt[0]) default: GteSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GteSameU64(at, bt) case as && !bs: GteSameSVU64(at[0], bt) case !as && bs: GteSameVSU64(at, bt[0]) default: GteSameU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GteSameF32(at, bt) case as && !bs: GteSameSVF32(at[0], bt) case !as && bs: GteSameVSF32(at, bt[0]) default: GteSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GteSameF64(at, bt) case as && !bs: GteSameSVF64(at[0], bt) case !as && bs: GteSameVSF64(at, bt[0]) default: GteSameF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GteSameStr(at, bt) case as && !bs: GteSameSVStr(at[0], bt) case !as && bs: GteSameVSStr(at, bt[0]) default: GteSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Gte", t) } } func (e E) LtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LtSameI(at, bt) case as && !bs: LtSameSVI(at[0], bt) case !as && bs: LtSameVSI(at, bt[0]) default: LtSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LtSameI8(at, bt) case as && !bs: LtSameSVI8(at[0], bt) case !as && bs: LtSameVSI8(at, bt[0]) default: LtSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LtSameI16(at, bt) case as && !bs: LtSameSVI16(at[0], bt) case !as && bs: LtSameVSI16(at, bt[0]) default: LtSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LtSameI32(at, bt) case as && !bs: LtSameSVI32(at[0], bt) case !as && bs: LtSameVSI32(at, bt[0]) default: LtSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LtSameI64(at, bt) case as && !bs: LtSameSVI64(at[0], bt) case !as && bs: LtSameVSI64(at, bt[0]) default: LtSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LtSameU(at, bt) case as && !bs: LtSameSVU(at[0], bt) case !as && bs: LtSameVSU(at, bt[0]) default: LtSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LtSameU8(at, bt) case as && !bs: LtSameSVU8(at[0], bt) case !as && bs: LtSameVSU8(at, bt[0]) default: LtSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LtSameU16(at, bt) case as && !bs: LtSameSVU16(at[0], bt) case !as && bs: LtSameVSU16(at, bt[0]) default: LtSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LtSameU32(at, bt) case as && !bs: LtSameSVU32(at[0], bt) case !as && bs: LtSameVSU32(at, bt[0]) default: LtSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LtSameU64(at, bt) case as && !bs: LtSameSVU64(at[0], bt) case !as && bs: LtSameVSU64(at, bt[0]) default: LtSameU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LtSameF32(at, bt) case as && !bs: LtSameSVF32(at[0], bt) case !as && bs: LtSameVSF32(at, bt[0]) default: LtSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LtSameF64(at, bt) case as && !bs: LtSameSVF64(at[0], bt) case !as && bs: LtSameVSF64(at, bt[0]) default: LtSameF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LtSameStr(at, bt) case as && !bs: LtSameSVStr(at[0], bt) case !as && bs: LtSameVSStr(at, bt[0]) default: LtSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Lt", t) } } func (e E) LteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LteSameI(at, bt) case as && !bs: LteSameSVI(at[0], bt) case !as && bs: LteSameVSI(at, bt[0]) default: LteSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LteSameI8(at, bt) case as && !bs: LteSameSVI8(at[0], bt) case !as && bs: LteSameVSI8(at, bt[0]) default: LteSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LteSameI16(at, bt) case as && !bs: LteSameSVI16(at[0], bt) case !as && bs: LteSameVSI16(at, bt[0]) default: LteSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LteSameI32(at, bt) case as && !bs: LteSameSVI32(at[0], bt) case !as && bs: LteSameVSI32(at, bt[0]) default: LteSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LteSameI64(at, bt) case as && !bs: LteSameSVI64(at[0], bt) case !as && bs: LteSameVSI64(at, bt[0]) default: LteSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LteSameU(at, bt) case as && !bs: LteSameSVU(at[0], bt) case !as && bs: LteSameVSU(at, bt[0]) default: LteSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LteSameU8(at, bt) case as && !bs: LteSameSVU8(at[0], bt) case !as && bs: LteSameVSU8(at, bt[0]) default: LteSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LteSameU16(at, bt) case as && !bs: LteSameSVU16(at[0], bt) case !as && bs: LteSameVSU16(at, bt[0]) default: LteSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LteSameU32(at, bt) case as && !bs: LteSameSVU32(at[0], bt) case !as && bs: LteSameVSU32(at, bt[0]) default: LteSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LteSameU64(at, bt) case as && !bs: LteSameSVU64(at[0], bt) case !as && bs: LteSameVSU64(at, bt[0]) default: LteSameU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LteSameF32(at, bt) case as && !bs: LteSameSVF32(at[0], bt) case !as && bs: LteSameVSF32(at, bt[0]) default: LteSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LteSameF64(at, bt) case as && !bs: LteSameSVF64(at[0], bt) case !as && bs: LteSameVSF64(at, bt[0]) default: LteSameF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LteSameStr(at, bt) case as && !bs: LteSameSVStr(at[0], bt) case !as && bs: LteSameVSStr(at, bt[0]) default: LteSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Lte", t) } } func (e E) EqSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: EqSameB(at, bt) case as && !bs: EqSameSVB(at[0], bt) case !as && bs: EqSameVSB(at, bt[0]) default: EqSameB(at, bt) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: EqSameI(at, bt) case as && !bs: EqSameSVI(at[0], bt) case !as && bs: EqSameVSI(at, bt[0]) default: EqSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: EqSameI8(at, bt) case as && !bs: EqSameSVI8(at[0], bt) case !as && bs: EqSameVSI8(at, bt[0]) default: EqSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: EqSameI16(at, bt) case as && !bs: EqSameSVI16(at[0], bt) case !as && bs: EqSameVSI16(at, bt[0]) default: EqSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: EqSameI32(at, bt) case as && !bs: EqSameSVI32(at[0], bt) case !as && bs: EqSameVSI32(at, bt[0]) default: EqSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: EqSameI64(at, bt) case as && !bs: EqSameSVI64(at[0], bt) case !as && bs: EqSameVSI64(at, bt[0]) default: EqSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: EqSameU(at, bt) case as && !bs: EqSameSVU(at[0], bt) case !as && bs: EqSameVSU(at, bt[0]) default: EqSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: EqSameU8(at, bt) case as && !bs: EqSameSVU8(at[0], bt) case !as && bs: EqSameVSU8(at, bt[0]) default: EqSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: EqSameU16(at, bt) case as && !bs: EqSameSVU16(at[0], bt) case !as && bs: EqSameVSU16(at, bt[0]) default: EqSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: EqSameU32(at, bt) case as && !bs: EqSameSVU32(at[0], bt) case !as && bs: EqSameVSU32(at, bt[0]) default: EqSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: EqSameU64(at, bt) case as && !bs: EqSameSVU64(at[0], bt) case !as && bs: EqSameVSU64(at, bt[0]) default: EqSameU64(at, bt) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: EqSameUintptr(at, bt) case as && !bs: EqSameSVUintptr(at[0], bt) case !as && bs: EqSameVSUintptr(at, bt[0]) default: EqSameUintptr(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: EqSameF32(at, bt) case as && !bs: EqSameSVF32(at[0], bt) case !as && bs: EqSameVSF32(at, bt[0]) default: EqSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: EqSameF64(at, bt) case as && !bs: EqSameSVF64(at[0], bt) case !as && bs: EqSameVSF64(at, bt[0]) default: EqSameF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: EqSameC64(at, bt) case as && !bs: EqSameSVC64(at[0], bt) case !as && bs: EqSameVSC64(at, bt[0]) default: EqSameC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: EqSameC128(at, bt) case as && !bs: EqSameSVC128(at[0], bt) case !as && bs: EqSameVSC128(at, bt[0]) default: EqSameC128(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: EqSameStr(at, bt) case as && !bs: EqSameSVStr(at[0], bt) case !as && bs: EqSameVSStr(at, bt[0]) default: EqSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Eq", t) } } func (e E) NeSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: NeSameB(at, bt) case as && !bs: NeSameSVB(at[0], bt) case !as && bs: NeSameVSB(at, bt[0]) default: NeSameB(at, bt) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: NeSameI(at, bt) case as && !bs: NeSameSVI(at[0], bt) case !as && bs: NeSameVSI(at, bt[0]) default: NeSameI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: NeSameI8(at, bt) case as && !bs: NeSameSVI8(at[0], bt) case !as && bs: NeSameVSI8(at, bt[0]) default: NeSameI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: NeSameI16(at, bt) case as && !bs: NeSameSVI16(at[0], bt) case !as && bs: NeSameVSI16(at, bt[0]) default: NeSameI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: NeSameI32(at, bt) case as && !bs: NeSameSVI32(at[0], bt) case !as && bs: NeSameVSI32(at, bt[0]) default: NeSameI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: NeSameI64(at, bt) case as && !bs: NeSameSVI64(at[0], bt) case !as && bs: NeSameVSI64(at, bt[0]) default: NeSameI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: NeSameU(at, bt) case as && !bs: NeSameSVU(at[0], bt) case !as && bs: NeSameVSU(at, bt[0]) default: NeSameU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: NeSameU8(at, bt) case as && !bs: NeSameSVU8(at[0], bt) case !as && bs: NeSameVSU8(at, bt[0]) default: NeSameU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: NeSameU16(at, bt) case as && !bs: NeSameSVU16(at[0], bt) case !as && bs: NeSameVSU16(at, bt[0]) default: NeSameU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: NeSameU32(at, bt) case as && !bs: NeSameSVU32(at[0], bt) case !as && bs: NeSameVSU32(at, bt[0]) default: NeSameU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: NeSameU64(at, bt) case as && !bs: NeSameSVU64(at[0], bt) case !as && bs: NeSameVSU64(at, bt[0]) default: NeSameU64(at, bt) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: NeSameUintptr(at, bt) case as && !bs: NeSameSVUintptr(at[0], bt) case !as && bs: NeSameVSUintptr(at, bt[0]) default: NeSameUintptr(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: NeSameF32(at, bt) case as && !bs: NeSameSVF32(at[0], bt) case !as && bs: NeSameVSF32(at, bt[0]) default: NeSameF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: NeSameF64(at, bt) case as && !bs: NeSameSVF64(at[0], bt) case !as && bs: NeSameVSF64(at, bt[0]) default: NeSameF64(at, bt) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: NeSameC64(at, bt) case as && !bs: NeSameSVC64(at[0], bt) case !as && bs: NeSameVSC64(at, bt[0]) default: NeSameC64(at, bt) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: NeSameC128(at, bt) case as && !bs: NeSameSVC128(at[0], bt) case !as && bs: NeSameVSC128(at, bt[0]) default: NeSameC128(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: NeSameStr(at, bt) case as && !bs: NeSameSVStr(at[0], bt) case !as && bs: NeSameVSStr(at, bt[0]) default: NeSameStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Ne", t) } } func (e E) GtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GtI(at, bt, rt) return case as && !bs: return GtIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSI(at, bt[0], rt, ait, rit) default: return GtIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GtI8(at, bt, rt) return case as && !bs: return GtIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSI8(at, bt[0], rt, ait, rit) default: return GtIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GtI16(at, bt, rt) return case as && !bs: return GtIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSI16(at, bt[0], rt, ait, rit) default: return GtIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GtI32(at, bt, rt) return case as && !bs: return GtIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSI32(at, bt[0], rt, ait, rit) default: return GtIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GtI64(at, bt, rt) return case as && !bs: return GtIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSI64(at, bt[0], rt, ait, rit) default: return GtIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GtU(at, bt, rt) return case as && !bs: return GtIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSU(at, bt[0], rt, ait, rit) default: return GtIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GtU8(at, bt, rt) return case as && !bs: return GtIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSU8(at, bt[0], rt, ait, rit) default: return GtIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GtU16(at, bt, rt) return case as && !bs: return GtIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSU16(at, bt[0], rt, ait, rit) default: return GtIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GtU32(at, bt, rt) return case as && !bs: return GtIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSU32(at, bt[0], rt, ait, rit) default: return GtIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GtU64(at, bt, rt) return case as && !bs: return GtIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSU64(at, bt[0], rt, ait, rit) default: return GtIterU64(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GtF32(at, bt, rt) return case as && !bs: return GtIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSF32(at, bt[0], rt, ait, rit) default: return GtIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GtF64(at, bt, rt) return case as && !bs: return GtIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSF64(at, bt[0], rt, ait, rit) default: return GtIterF64(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GtStr(at, bt, rt) return case as && !bs: return GtIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return GtIterVSStr(at, bt[0], rt, ait, rit) default: return GtIterStr(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Gt", t) } } func (e E) GteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GteI(at, bt, rt) return case as && !bs: return GteIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSI(at, bt[0], rt, ait, rit) default: return GteIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GteI8(at, bt, rt) return case as && !bs: return GteIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSI8(at, bt[0], rt, ait, rit) default: return GteIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GteI16(at, bt, rt) return case as && !bs: return GteIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSI16(at, bt[0], rt, ait, rit) default: return GteIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GteI32(at, bt, rt) return case as && !bs: return GteIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSI32(at, bt[0], rt, ait, rit) default: return GteIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GteI64(at, bt, rt) return case as && !bs: return GteIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSI64(at, bt[0], rt, ait, rit) default: return GteIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GteU(at, bt, rt) return case as && !bs: return GteIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSU(at, bt[0], rt, ait, rit) default: return GteIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GteU8(at, bt, rt) return case as && !bs: return GteIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSU8(at, bt[0], rt, ait, rit) default: return GteIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GteU16(at, bt, rt) return case as && !bs: return GteIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSU16(at, bt[0], rt, ait, rit) default: return GteIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GteU32(at, bt, rt) return case as && !bs: return GteIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSU32(at, bt[0], rt, ait, rit) default: return GteIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GteU64(at, bt, rt) return case as && !bs: return GteIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSU64(at, bt[0], rt, ait, rit) default: return GteIterU64(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GteF32(at, bt, rt) return case as && !bs: return GteIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSF32(at, bt[0], rt, ait, rit) default: return GteIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GteF64(at, bt, rt) return case as && !bs: return GteIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSF64(at, bt[0], rt, ait, rit) default: return GteIterF64(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GteStr(at, bt, rt) return case as && !bs: return GteIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return GteIterVSStr(at, bt[0], rt, ait, rit) default: return GteIterStr(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Gte", t) } } func (e E) LtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LtI(at, bt, rt) return case as && !bs: return LtIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSI(at, bt[0], rt, ait, rit) default: return LtIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LtI8(at, bt, rt) return case as && !bs: return LtIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSI8(at, bt[0], rt, ait, rit) default: return LtIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LtI16(at, bt, rt) return case as && !bs: return LtIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSI16(at, bt[0], rt, ait, rit) default: return LtIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LtI32(at, bt, rt) return case as && !bs: return LtIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSI32(at, bt[0], rt, ait, rit) default: return LtIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LtI64(at, bt, rt) return case as && !bs: return LtIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSI64(at, bt[0], rt, ait, rit) default: return LtIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LtU(at, bt, rt) return case as && !bs: return LtIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSU(at, bt[0], rt, ait, rit) default: return LtIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LtU8(at, bt, rt) return case as && !bs: return LtIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSU8(at, bt[0], rt, ait, rit) default: return LtIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LtU16(at, bt, rt) return case as && !bs: return LtIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSU16(at, bt[0], rt, ait, rit) default: return LtIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LtU32(at, bt, rt) return case as && !bs: return LtIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSU32(at, bt[0], rt, ait, rit) default: return LtIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LtU64(at, bt, rt) return case as && !bs: return LtIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSU64(at, bt[0], rt, ait, rit) default: return LtIterU64(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LtF32(at, bt, rt) return case as && !bs: return LtIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSF32(at, bt[0], rt, ait, rit) default: return LtIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LtF64(at, bt, rt) return case as && !bs: return LtIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSF64(at, bt[0], rt, ait, rit) default: return LtIterF64(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LtStr(at, bt, rt) return case as && !bs: return LtIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return LtIterVSStr(at, bt[0], rt, ait, rit) default: return LtIterStr(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Lt", t) } } func (e E) LteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LteI(at, bt, rt) return case as && !bs: return LteIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSI(at, bt[0], rt, ait, rit) default: return LteIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LteI8(at, bt, rt) return case as && !bs: return LteIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSI8(at, bt[0], rt, ait, rit) default: return LteIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LteI16(at, bt, rt) return case as && !bs: return LteIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSI16(at, bt[0], rt, ait, rit) default: return LteIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LteI32(at, bt, rt) return case as && !bs: return LteIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSI32(at, bt[0], rt, ait, rit) default: return LteIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LteI64(at, bt, rt) return case as && !bs: return LteIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSI64(at, bt[0], rt, ait, rit) default: return LteIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LteU(at, bt, rt) return case as && !bs: return LteIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSU(at, bt[0], rt, ait, rit) default: return LteIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LteU8(at, bt, rt) return case as && !bs: return LteIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSU8(at, bt[0], rt, ait, rit) default: return LteIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LteU16(at, bt, rt) return case as && !bs: return LteIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSU16(at, bt[0], rt, ait, rit) default: return LteIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LteU32(at, bt, rt) return case as && !bs: return LteIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSU32(at, bt[0], rt, ait, rit) default: return LteIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LteU64(at, bt, rt) return case as && !bs: return LteIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSU64(at, bt[0], rt, ait, rit) default: return LteIterU64(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LteF32(at, bt, rt) return case as && !bs: return LteIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSF32(at, bt[0], rt, ait, rit) default: return LteIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LteF64(at, bt, rt) return case as && !bs: return LteIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSF64(at, bt[0], rt, ait, rit) default: return LteIterF64(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LteStr(at, bt, rt) return case as && !bs: return LteIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return LteIterVSStr(at, bt[0], rt, ait, rit) default: return LteIterStr(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Lte", t) } } func (e E) EqIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: EqB(at, bt, rt) return case as && !bs: return EqIterSVB(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSB(at, bt[0], rt, ait, rit) default: return EqIterB(at, bt, rt, ait, bit, rit) } case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: EqI(at, bt, rt) return case as && !bs: return EqIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSI(at, bt[0], rt, ait, rit) default: return EqIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: EqI8(at, bt, rt) return case as && !bs: return EqIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSI8(at, bt[0], rt, ait, rit) default: return EqIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: EqI16(at, bt, rt) return case as && !bs: return EqIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSI16(at, bt[0], rt, ait, rit) default: return EqIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: EqI32(at, bt, rt) return case as && !bs: return EqIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSI32(at, bt[0], rt, ait, rit) default: return EqIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: EqI64(at, bt, rt) return case as && !bs: return EqIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSI64(at, bt[0], rt, ait, rit) default: return EqIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: EqU(at, bt, rt) return case as && !bs: return EqIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSU(at, bt[0], rt, ait, rit) default: return EqIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: EqU8(at, bt, rt) return case as && !bs: return EqIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSU8(at, bt[0], rt, ait, rit) default: return EqIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: EqU16(at, bt, rt) return case as && !bs: return EqIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSU16(at, bt[0], rt, ait, rit) default: return EqIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: EqU32(at, bt, rt) return case as && !bs: return EqIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSU32(at, bt[0], rt, ait, rit) default: return EqIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: EqU64(at, bt, rt) return case as && !bs: return EqIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSU64(at, bt[0], rt, ait, rit) default: return EqIterU64(at, bt, rt, ait, bit, rit) } case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: EqUintptr(at, bt, rt) return case as && !bs: return EqIterSVUintptr(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSUintptr(at, bt[0], rt, ait, rit) default: return EqIterUintptr(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: EqF32(at, bt, rt) return case as && !bs: return EqIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSF32(at, bt[0], rt, ait, rit) default: return EqIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: EqF64(at, bt, rt) return case as && !bs: return EqIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSF64(at, bt[0], rt, ait, rit) default: return EqIterF64(at, bt, rt, ait, bit, rit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: EqC64(at, bt, rt) return case as && !bs: return EqIterSVC64(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSC64(at, bt[0], rt, ait, rit) default: return EqIterC64(at, bt, rt, ait, bit, rit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: EqC128(at, bt, rt) return case as && !bs: return EqIterSVC128(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSC128(at, bt[0], rt, ait, rit) default: return EqIterC128(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: EqStr(at, bt, rt) return case as && !bs: return EqIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSStr(at, bt[0], rt, ait, rit) default: return EqIterStr(at, bt, rt, ait, bit, rit) } case UnsafePointer: at := a.UnsafePointers() bt := b.UnsafePointers() switch { case as && bs: EqUnsafePointer(at, bt, rt) return case as && !bs: return EqIterSVUnsafePointer(at[0], bt, rt, bit, rit) case !as && bs: return EqIterVSUnsafePointer(at, bt[0], rt, ait, rit) default: return EqIterUnsafePointer(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Eq", t) } } func (e E) NeIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) rs := isScalar(retVal, Bool) rt := retVal.Bools() if ((as && !bs) || (bs && !as)) && rs { return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t)) } switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: NeB(at, bt, rt) return case as && !bs: return NeIterSVB(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSB(at, bt[0], rt, ait, rit) default: return NeIterB(at, bt, rt, ait, bit, rit) } case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: NeI(at, bt, rt) return case as && !bs: return NeIterSVI(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSI(at, bt[0], rt, ait, rit) default: return NeIterI(at, bt, rt, ait, bit, rit) } case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: NeI8(at, bt, rt) return case as && !bs: return NeIterSVI8(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSI8(at, bt[0], rt, ait, rit) default: return NeIterI8(at, bt, rt, ait, bit, rit) } case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: NeI16(at, bt, rt) return case as && !bs: return NeIterSVI16(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSI16(at, bt[0], rt, ait, rit) default: return NeIterI16(at, bt, rt, ait, bit, rit) } case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: NeI32(at, bt, rt) return case as && !bs: return NeIterSVI32(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSI32(at, bt[0], rt, ait, rit) default: return NeIterI32(at, bt, rt, ait, bit, rit) } case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: NeI64(at, bt, rt) return case as && !bs: return NeIterSVI64(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSI64(at, bt[0], rt, ait, rit) default: return NeIterI64(at, bt, rt, ait, bit, rit) } case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: NeU(at, bt, rt) return case as && !bs: return NeIterSVU(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSU(at, bt[0], rt, ait, rit) default: return NeIterU(at, bt, rt, ait, bit, rit) } case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: NeU8(at, bt, rt) return case as && !bs: return NeIterSVU8(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSU8(at, bt[0], rt, ait, rit) default: return NeIterU8(at, bt, rt, ait, bit, rit) } case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: NeU16(at, bt, rt) return case as && !bs: return NeIterSVU16(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSU16(at, bt[0], rt, ait, rit) default: return NeIterU16(at, bt, rt, ait, bit, rit) } case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: NeU32(at, bt, rt) return case as && !bs: return NeIterSVU32(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSU32(at, bt[0], rt, ait, rit) default: return NeIterU32(at, bt, rt, ait, bit, rit) } case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: NeU64(at, bt, rt) return case as && !bs: return NeIterSVU64(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSU64(at, bt[0], rt, ait, rit) default: return NeIterU64(at, bt, rt, ait, bit, rit) } case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: NeUintptr(at, bt, rt) return case as && !bs: return NeIterSVUintptr(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSUintptr(at, bt[0], rt, ait, rit) default: return NeIterUintptr(at, bt, rt, ait, bit, rit) } case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: NeF32(at, bt, rt) return case as && !bs: return NeIterSVF32(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSF32(at, bt[0], rt, ait, rit) default: return NeIterF32(at, bt, rt, ait, bit, rit) } case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: NeF64(at, bt, rt) return case as && !bs: return NeIterSVF64(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSF64(at, bt[0], rt, ait, rit) default: return NeIterF64(at, bt, rt, ait, bit, rit) } case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: NeC64(at, bt, rt) return case as && !bs: return NeIterSVC64(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSC64(at, bt[0], rt, ait, rit) default: return NeIterC64(at, bt, rt, ait, bit, rit) } case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: NeC128(at, bt, rt) return case as && !bs: return NeIterSVC128(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSC128(at, bt[0], rt, ait, rit) default: return NeIterC128(at, bt, rt, ait, bit, rit) } case String: at := a.Strings() bt := b.Strings() switch { case as && bs: NeStr(at, bt, rt) return case as && !bs: return NeIterSVStr(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSStr(at, bt[0], rt, ait, rit) default: return NeIterStr(at, bt, rt, ait, bit, rit) } case UnsafePointer: at := a.UnsafePointers() bt := b.UnsafePointers() switch { case as && bs: NeUnsafePointer(at, bt, rt) return case as && !bs: return NeIterSVUnsafePointer(at[0], bt, rt, bit, rit) case !as && bs: return NeIterVSUnsafePointer(at, bt[0], rt, ait, rit) default: return NeIterUnsafePointer(at, bt, rt, ait, bit, rit) } default: return errors.Errorf("Unsupported type %v for Ne", t) } } func (e E) GtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GtSameI(at, bt) case as && !bs: GtSameIterSVI(at[0], bt, bit) case !as && bs: GtSameIterVSI(at, bt[0], ait) default: GtSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GtSameI8(at, bt) case as && !bs: GtSameIterSVI8(at[0], bt, bit) case !as && bs: GtSameIterVSI8(at, bt[0], ait) default: GtSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GtSameI16(at, bt) case as && !bs: GtSameIterSVI16(at[0], bt, bit) case !as && bs: GtSameIterVSI16(at, bt[0], ait) default: GtSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GtSameI32(at, bt) case as && !bs: GtSameIterSVI32(at[0], bt, bit) case !as && bs: GtSameIterVSI32(at, bt[0], ait) default: GtSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GtSameI64(at, bt) case as && !bs: GtSameIterSVI64(at[0], bt, bit) case !as && bs: GtSameIterVSI64(at, bt[0], ait) default: GtSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GtSameU(at, bt) case as && !bs: GtSameIterSVU(at[0], bt, bit) case !as && bs: GtSameIterVSU(at, bt[0], ait) default: GtSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GtSameU8(at, bt) case as && !bs: GtSameIterSVU8(at[0], bt, bit) case !as && bs: GtSameIterVSU8(at, bt[0], ait) default: GtSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GtSameU16(at, bt) case as && !bs: GtSameIterSVU16(at[0], bt, bit) case !as && bs: GtSameIterVSU16(at, bt[0], ait) default: GtSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GtSameU32(at, bt) case as && !bs: GtSameIterSVU32(at[0], bt, bit) case !as && bs: GtSameIterVSU32(at, bt[0], ait) default: GtSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GtSameU64(at, bt) case as && !bs: GtSameIterSVU64(at[0], bt, bit) case !as && bs: GtSameIterVSU64(at, bt[0], ait) default: GtSameIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GtSameF32(at, bt) case as && !bs: GtSameIterSVF32(at[0], bt, bit) case !as && bs: GtSameIterVSF32(at, bt[0], ait) default: GtSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GtSameF64(at, bt) case as && !bs: GtSameIterSVF64(at[0], bt, bit) case !as && bs: GtSameIterVSF64(at, bt[0], ait) default: GtSameIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GtSameStr(at, bt) case as && !bs: GtSameIterSVStr(at[0], bt, bit) case !as && bs: GtSameIterVSStr(at, bt[0], ait) default: GtSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Gt", t) } } func (e E) GteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: GteSameI(at, bt) case as && !bs: GteSameIterSVI(at[0], bt, bit) case !as && bs: GteSameIterVSI(at, bt[0], ait) default: GteSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: GteSameI8(at, bt) case as && !bs: GteSameIterSVI8(at[0], bt, bit) case !as && bs: GteSameIterVSI8(at, bt[0], ait) default: GteSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: GteSameI16(at, bt) case as && !bs: GteSameIterSVI16(at[0], bt, bit) case !as && bs: GteSameIterVSI16(at, bt[0], ait) default: GteSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: GteSameI32(at, bt) case as && !bs: GteSameIterSVI32(at[0], bt, bit) case !as && bs: GteSameIterVSI32(at, bt[0], ait) default: GteSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: GteSameI64(at, bt) case as && !bs: GteSameIterSVI64(at[0], bt, bit) case !as && bs: GteSameIterVSI64(at, bt[0], ait) default: GteSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: GteSameU(at, bt) case as && !bs: GteSameIterSVU(at[0], bt, bit) case !as && bs: GteSameIterVSU(at, bt[0], ait) default: GteSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: GteSameU8(at, bt) case as && !bs: GteSameIterSVU8(at[0], bt, bit) case !as && bs: GteSameIterVSU8(at, bt[0], ait) default: GteSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: GteSameU16(at, bt) case as && !bs: GteSameIterSVU16(at[0], bt, bit) case !as && bs: GteSameIterVSU16(at, bt[0], ait) default: GteSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: GteSameU32(at, bt) case as && !bs: GteSameIterSVU32(at[0], bt, bit) case !as && bs: GteSameIterVSU32(at, bt[0], ait) default: GteSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: GteSameU64(at, bt) case as && !bs: GteSameIterSVU64(at[0], bt, bit) case !as && bs: GteSameIterVSU64(at, bt[0], ait) default: GteSameIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: GteSameF32(at, bt) case as && !bs: GteSameIterSVF32(at[0], bt, bit) case !as && bs: GteSameIterVSF32(at, bt[0], ait) default: GteSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: GteSameF64(at, bt) case as && !bs: GteSameIterSVF64(at[0], bt, bit) case !as && bs: GteSameIterVSF64(at, bt[0], ait) default: GteSameIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: GteSameStr(at, bt) case as && !bs: GteSameIterSVStr(at[0], bt, bit) case !as && bs: GteSameIterVSStr(at, bt[0], ait) default: GteSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Gte", t) } } func (e E) LtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LtSameI(at, bt) case as && !bs: LtSameIterSVI(at[0], bt, bit) case !as && bs: LtSameIterVSI(at, bt[0], ait) default: LtSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LtSameI8(at, bt) case as && !bs: LtSameIterSVI8(at[0], bt, bit) case !as && bs: LtSameIterVSI8(at, bt[0], ait) default: LtSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LtSameI16(at, bt) case as && !bs: LtSameIterSVI16(at[0], bt, bit) case !as && bs: LtSameIterVSI16(at, bt[0], ait) default: LtSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LtSameI32(at, bt) case as && !bs: LtSameIterSVI32(at[0], bt, bit) case !as && bs: LtSameIterVSI32(at, bt[0], ait) default: LtSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LtSameI64(at, bt) case as && !bs: LtSameIterSVI64(at[0], bt, bit) case !as && bs: LtSameIterVSI64(at, bt[0], ait) default: LtSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LtSameU(at, bt) case as && !bs: LtSameIterSVU(at[0], bt, bit) case !as && bs: LtSameIterVSU(at, bt[0], ait) default: LtSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LtSameU8(at, bt) case as && !bs: LtSameIterSVU8(at[0], bt, bit) case !as && bs: LtSameIterVSU8(at, bt[0], ait) default: LtSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LtSameU16(at, bt) case as && !bs: LtSameIterSVU16(at[0], bt, bit) case !as && bs: LtSameIterVSU16(at, bt[0], ait) default: LtSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LtSameU32(at, bt) case as && !bs: LtSameIterSVU32(at[0], bt, bit) case !as && bs: LtSameIterVSU32(at, bt[0], ait) default: LtSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LtSameU64(at, bt) case as && !bs: LtSameIterSVU64(at[0], bt, bit) case !as && bs: LtSameIterVSU64(at, bt[0], ait) default: LtSameIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LtSameF32(at, bt) case as && !bs: LtSameIterSVF32(at[0], bt, bit) case !as && bs: LtSameIterVSF32(at, bt[0], ait) default: LtSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LtSameF64(at, bt) case as && !bs: LtSameIterSVF64(at[0], bt, bit) case !as && bs: LtSameIterVSF64(at, bt[0], ait) default: LtSameIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LtSameStr(at, bt) case as && !bs: LtSameIterSVStr(at[0], bt, bit) case !as && bs: LtSameIterVSStr(at, bt[0], ait) default: LtSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Lt", t) } } func (e E) LteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: LteSameI(at, bt) case as && !bs: LteSameIterSVI(at[0], bt, bit) case !as && bs: LteSameIterVSI(at, bt[0], ait) default: LteSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: LteSameI8(at, bt) case as && !bs: LteSameIterSVI8(at[0], bt, bit) case !as && bs: LteSameIterVSI8(at, bt[0], ait) default: LteSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: LteSameI16(at, bt) case as && !bs: LteSameIterSVI16(at[0], bt, bit) case !as && bs: LteSameIterVSI16(at, bt[0], ait) default: LteSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: LteSameI32(at, bt) case as && !bs: LteSameIterSVI32(at[0], bt, bit) case !as && bs: LteSameIterVSI32(at, bt[0], ait) default: LteSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: LteSameI64(at, bt) case as && !bs: LteSameIterSVI64(at[0], bt, bit) case !as && bs: LteSameIterVSI64(at, bt[0], ait) default: LteSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: LteSameU(at, bt) case as && !bs: LteSameIterSVU(at[0], bt, bit) case !as && bs: LteSameIterVSU(at, bt[0], ait) default: LteSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: LteSameU8(at, bt) case as && !bs: LteSameIterSVU8(at[0], bt, bit) case !as && bs: LteSameIterVSU8(at, bt[0], ait) default: LteSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: LteSameU16(at, bt) case as && !bs: LteSameIterSVU16(at[0], bt, bit) case !as && bs: LteSameIterVSU16(at, bt[0], ait) default: LteSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: LteSameU32(at, bt) case as && !bs: LteSameIterSVU32(at[0], bt, bit) case !as && bs: LteSameIterVSU32(at, bt[0], ait) default: LteSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: LteSameU64(at, bt) case as && !bs: LteSameIterSVU64(at[0], bt, bit) case !as && bs: LteSameIterVSU64(at, bt[0], ait) default: LteSameIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: LteSameF32(at, bt) case as && !bs: LteSameIterSVF32(at[0], bt, bit) case !as && bs: LteSameIterVSF32(at, bt[0], ait) default: LteSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: LteSameF64(at, bt) case as && !bs: LteSameIterSVF64(at[0], bt, bit) case !as && bs: LteSameIterVSF64(at, bt[0], ait) default: LteSameIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: LteSameStr(at, bt) case as && !bs: LteSameIterSVStr(at[0], bt, bit) case !as && bs: LteSameIterVSStr(at, bt[0], ait) default: LteSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Lte", t) } } func (e E) EqSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: EqSameB(at, bt) case as && !bs: EqSameIterSVB(at[0], bt, bit) case !as && bs: EqSameIterVSB(at, bt[0], ait) default: EqSameIterB(at, bt, ait, bit) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: EqSameI(at, bt) case as && !bs: EqSameIterSVI(at[0], bt, bit) case !as && bs: EqSameIterVSI(at, bt[0], ait) default: EqSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: EqSameI8(at, bt) case as && !bs: EqSameIterSVI8(at[0], bt, bit) case !as && bs: EqSameIterVSI8(at, bt[0], ait) default: EqSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: EqSameI16(at, bt) case as && !bs: EqSameIterSVI16(at[0], bt, bit) case !as && bs: EqSameIterVSI16(at, bt[0], ait) default: EqSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: EqSameI32(at, bt) case as && !bs: EqSameIterSVI32(at[0], bt, bit) case !as && bs: EqSameIterVSI32(at, bt[0], ait) default: EqSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: EqSameI64(at, bt) case as && !bs: EqSameIterSVI64(at[0], bt, bit) case !as && bs: EqSameIterVSI64(at, bt[0], ait) default: EqSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: EqSameU(at, bt) case as && !bs: EqSameIterSVU(at[0], bt, bit) case !as && bs: EqSameIterVSU(at, bt[0], ait) default: EqSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: EqSameU8(at, bt) case as && !bs: EqSameIterSVU8(at[0], bt, bit) case !as && bs: EqSameIterVSU8(at, bt[0], ait) default: EqSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: EqSameU16(at, bt) case as && !bs: EqSameIterSVU16(at[0], bt, bit) case !as && bs: EqSameIterVSU16(at, bt[0], ait) default: EqSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: EqSameU32(at, bt) case as && !bs: EqSameIterSVU32(at[0], bt, bit) case !as && bs: EqSameIterVSU32(at, bt[0], ait) default: EqSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: EqSameU64(at, bt) case as && !bs: EqSameIterSVU64(at[0], bt, bit) case !as && bs: EqSameIterVSU64(at, bt[0], ait) default: EqSameIterU64(at, bt, ait, bit) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: EqSameUintptr(at, bt) case as && !bs: EqSameIterSVUintptr(at[0], bt, bit) case !as && bs: EqSameIterVSUintptr(at, bt[0], ait) default: EqSameIterUintptr(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: EqSameF32(at, bt) case as && !bs: EqSameIterSVF32(at[0], bt, bit) case !as && bs: EqSameIterVSF32(at, bt[0], ait) default: EqSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: EqSameF64(at, bt) case as && !bs: EqSameIterSVF64(at[0], bt, bit) case !as && bs: EqSameIterVSF64(at, bt[0], ait) default: EqSameIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: EqSameC64(at, bt) case as && !bs: EqSameIterSVC64(at[0], bt, bit) case !as && bs: EqSameIterVSC64(at, bt[0], ait) default: EqSameIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: EqSameC128(at, bt) case as && !bs: EqSameIterSVC128(at[0], bt, bit) case !as && bs: EqSameIterVSC128(at, bt[0], ait) default: EqSameIterC128(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: EqSameStr(at, bt) case as && !bs: EqSameIterSVStr(at[0], bt, bit) case !as && bs: EqSameIterVSStr(at, bt[0], ait) default: EqSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Eq", t) } } func (e E) NeSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Bool: at := a.Bools() bt := b.Bools() switch { case as && bs: NeSameB(at, bt) case as && !bs: NeSameIterSVB(at[0], bt, bit) case !as && bs: NeSameIterVSB(at, bt[0], ait) default: NeSameIterB(at, bt, ait, bit) } return case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: NeSameI(at, bt) case as && !bs: NeSameIterSVI(at[0], bt, bit) case !as && bs: NeSameIterVSI(at, bt[0], ait) default: NeSameIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: NeSameI8(at, bt) case as && !bs: NeSameIterSVI8(at[0], bt, bit) case !as && bs: NeSameIterVSI8(at, bt[0], ait) default: NeSameIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: NeSameI16(at, bt) case as && !bs: NeSameIterSVI16(at[0], bt, bit) case !as && bs: NeSameIterVSI16(at, bt[0], ait) default: NeSameIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: NeSameI32(at, bt) case as && !bs: NeSameIterSVI32(at[0], bt, bit) case !as && bs: NeSameIterVSI32(at, bt[0], ait) default: NeSameIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: NeSameI64(at, bt) case as && !bs: NeSameIterSVI64(at[0], bt, bit) case !as && bs: NeSameIterVSI64(at, bt[0], ait) default: NeSameIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: NeSameU(at, bt) case as && !bs: NeSameIterSVU(at[0], bt, bit) case !as && bs: NeSameIterVSU(at, bt[0], ait) default: NeSameIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: NeSameU8(at, bt) case as && !bs: NeSameIterSVU8(at[0], bt, bit) case !as && bs: NeSameIterVSU8(at, bt[0], ait) default: NeSameIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: NeSameU16(at, bt) case as && !bs: NeSameIterSVU16(at[0], bt, bit) case !as && bs: NeSameIterVSU16(at, bt[0], ait) default: NeSameIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: NeSameU32(at, bt) case as && !bs: NeSameIterSVU32(at[0], bt, bit) case !as && bs: NeSameIterVSU32(at, bt[0], ait) default: NeSameIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: NeSameU64(at, bt) case as && !bs: NeSameIterSVU64(at[0], bt, bit) case !as && bs: NeSameIterVSU64(at, bt[0], ait) default: NeSameIterU64(at, bt, ait, bit) } return case Uintptr: at := a.Uintptrs() bt := b.Uintptrs() switch { case as && bs: NeSameUintptr(at, bt) case as && !bs: NeSameIterSVUintptr(at[0], bt, bit) case !as && bs: NeSameIterVSUintptr(at, bt[0], ait) default: NeSameIterUintptr(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: NeSameF32(at, bt) case as && !bs: NeSameIterSVF32(at[0], bt, bit) case !as && bs: NeSameIterVSF32(at, bt[0], ait) default: NeSameIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: NeSameF64(at, bt) case as && !bs: NeSameIterSVF64(at[0], bt, bit) case !as && bs: NeSameIterVSF64(at, bt[0], ait) default: NeSameIterF64(at, bt, ait, bit) } return case Complex64: at := a.Complex64s() bt := b.Complex64s() switch { case as && bs: NeSameC64(at, bt) case as && !bs: NeSameIterSVC64(at[0], bt, bit) case !as && bs: NeSameIterVSC64(at, bt[0], ait) default: NeSameIterC64(at, bt, ait, bit) } return case Complex128: at := a.Complex128s() bt := b.Complex128s() switch { case as && bs: NeSameC128(at, bt) case as && !bs: NeSameIterSVC128(at[0], bt, bit) case !as && bs: NeSameIterVSC128(at, bt[0], ait) default: NeSameIterC128(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: NeSameStr(at, bt) case as && !bs: NeSameIterSVStr(at[0], bt, bit) case !as && bs: NeSameIterVSStr(at, bt[0], ait) default: NeSameIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Ne", t) } } tensor-0.9.24/internal/execution/eng_map.go000066400000000000000000000562751426512615100207160ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "unsafe" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) Map(t reflect.Type, fn interface{}, a *storage.Header, incr bool) (err error) { as := isScalar(a, t) switch t { case Bool: var f0 func(bool) bool var f1 func(bool) (bool, error) switch f := fn.(type) { case func(bool) bool: f0 = f case func(bool) (bool, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Bools() if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case as && f0 != nil: at[0] = f0(at[0]) case as && f0 == nil: at[0], err = f1(at[0]) case !as && f0 == nil: err = MapErrB(f1, at) default: MapB(f0, at) } case Int: var f0 func(int) int var f1 func(int) (int, error) switch f := fn.(type) { case func(int) int: f0 = f case func(int) (int, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Ints() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp int if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrI(f0, at) case !as && incr && f0 == nil: err = MapIncrErrI(f1, at) case !as && !incr && f0 == nil: err = MapErrI(f1, at) default: MapI(f0, at) } case Int8: var f0 func(int8) int8 var f1 func(int8) (int8, error) switch f := fn.(type) { case func(int8) int8: f0 = f case func(int8) (int8, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Int8s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp int8 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrI8(f0, at) case !as && incr && f0 == nil: err = MapIncrErrI8(f1, at) case !as && !incr && f0 == nil: err = MapErrI8(f1, at) default: MapI8(f0, at) } case Int16: var f0 func(int16) int16 var f1 func(int16) (int16, error) switch f := fn.(type) { case func(int16) int16: f0 = f case func(int16) (int16, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Int16s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp int16 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrI16(f0, at) case !as && incr && f0 == nil: err = MapIncrErrI16(f1, at) case !as && !incr && f0 == nil: err = MapErrI16(f1, at) default: MapI16(f0, at) } case Int32: var f0 func(int32) int32 var f1 func(int32) (int32, error) switch f := fn.(type) { case func(int32) int32: f0 = f case func(int32) (int32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Int32s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp int32 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrI32(f0, at) case !as && incr && f0 == nil: err = MapIncrErrI32(f1, at) case !as && !incr && f0 == nil: err = MapErrI32(f1, at) default: MapI32(f0, at) } case Int64: var f0 func(int64) int64 var f1 func(int64) (int64, error) switch f := fn.(type) { case func(int64) int64: f0 = f case func(int64) (int64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Int64s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp int64 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrI64(f0, at) case !as && incr && f0 == nil: err = MapIncrErrI64(f1, at) case !as && !incr && f0 == nil: err = MapErrI64(f1, at) default: MapI64(f0, at) } case Uint: var f0 func(uint) uint var f1 func(uint) (uint, error) switch f := fn.(type) { case func(uint) uint: f0 = f case func(uint) (uint, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uints() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp uint if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrU(f0, at) case !as && incr && f0 == nil: err = MapIncrErrU(f1, at) case !as && !incr && f0 == nil: err = MapErrU(f1, at) default: MapU(f0, at) } case Uint8: var f0 func(uint8) uint8 var f1 func(uint8) (uint8, error) switch f := fn.(type) { case func(uint8) uint8: f0 = f case func(uint8) (uint8, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uint8s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp uint8 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrU8(f0, at) case !as && incr && f0 == nil: err = MapIncrErrU8(f1, at) case !as && !incr && f0 == nil: err = MapErrU8(f1, at) default: MapU8(f0, at) } case Uint16: var f0 func(uint16) uint16 var f1 func(uint16) (uint16, error) switch f := fn.(type) { case func(uint16) uint16: f0 = f case func(uint16) (uint16, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uint16s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp uint16 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrU16(f0, at) case !as && incr && f0 == nil: err = MapIncrErrU16(f1, at) case !as && !incr && f0 == nil: err = MapErrU16(f1, at) default: MapU16(f0, at) } case Uint32: var f0 func(uint32) uint32 var f1 func(uint32) (uint32, error) switch f := fn.(type) { case func(uint32) uint32: f0 = f case func(uint32) (uint32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uint32s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp uint32 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrU32(f0, at) case !as && incr && f0 == nil: err = MapIncrErrU32(f1, at) case !as && !incr && f0 == nil: err = MapErrU32(f1, at) default: MapU32(f0, at) } case Uint64: var f0 func(uint64) uint64 var f1 func(uint64) (uint64, error) switch f := fn.(type) { case func(uint64) uint64: f0 = f case func(uint64) (uint64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uint64s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp uint64 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrU64(f0, at) case !as && incr && f0 == nil: err = MapIncrErrU64(f1, at) case !as && !incr && f0 == nil: err = MapErrU64(f1, at) default: MapU64(f0, at) } case Uintptr: var f0 func(uintptr) uintptr var f1 func(uintptr) (uintptr, error) switch f := fn.(type) { case func(uintptr) uintptr: f0 = f case func(uintptr) (uintptr, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Uintptrs() if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case as && f0 != nil: at[0] = f0(at[0]) case as && f0 == nil: at[0], err = f1(at[0]) case !as && f0 == nil: err = MapErrUintptr(f1, at) default: MapUintptr(f0, at) } case Float32: var f0 func(float32) float32 var f1 func(float32) (float32, error) switch f := fn.(type) { case func(float32) float32: f0 = f case func(float32) (float32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Float32s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp float32 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrF32(f0, at) case !as && incr && f0 == nil: err = MapIncrErrF32(f1, at) case !as && !incr && f0 == nil: err = MapErrF32(f1, at) default: MapF32(f0, at) } case Float64: var f0 func(float64) float64 var f1 func(float64) (float64, error) switch f := fn.(type) { case func(float64) float64: f0 = f case func(float64) (float64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Float64s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp float64 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrF64(f0, at) case !as && incr && f0 == nil: err = MapIncrErrF64(f1, at) case !as && !incr && f0 == nil: err = MapErrF64(f1, at) default: MapF64(f0, at) } case Complex64: var f0 func(complex64) complex64 var f1 func(complex64) (complex64, error) switch f := fn.(type) { case func(complex64) complex64: f0 = f case func(complex64) (complex64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Complex64s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp complex64 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrC64(f0, at) case !as && incr && f0 == nil: err = MapIncrErrC64(f1, at) case !as && !incr && f0 == nil: err = MapErrC64(f1, at) default: MapC64(f0, at) } case Complex128: var f0 func(complex128) complex128 var f1 func(complex128) (complex128, error) switch f := fn.(type) { case func(complex128) complex128: f0 = f case func(complex128) (complex128, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Complex128s() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp complex128 if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrC128(f0, at) case !as && incr && f0 == nil: err = MapIncrErrC128(f1, at) case !as && !incr && f0 == nil: err = MapErrC128(f1, at) default: MapC128(f0, at) } case String: var f0 func(string) string var f1 func(string) (string, error) switch f := fn.(type) { case func(string) string: f0 = f case func(string) (string, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.Strings() switch { case as && incr && f0 != nil: at[0] += f0(at[0]) case as && incr && f0 == nil: var tmp string if tmp, err = f1(at[0]); err != nil { return } at[0] += tmp case as && !incr && f0 != nil: at[0] = f0(at[0]) case as && !incr && f0 == nil: at[0], err = f1(at[0]) case !as && incr && f0 != nil: MapIncrStr(f0, at) case !as && incr && f0 == nil: err = MapIncrErrStr(f1, at) case !as && !incr && f0 == nil: err = MapErrStr(f1, at) default: MapStr(f0, at) } case UnsafePointer: var f0 func(unsafe.Pointer) unsafe.Pointer var f1 func(unsafe.Pointer) (unsafe.Pointer, error) switch f := fn.(type) { case func(unsafe.Pointer) unsafe.Pointer: f0 = f case func(unsafe.Pointer) (unsafe.Pointer, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } at := a.UnsafePointers() if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case as && f0 != nil: at[0] = f0(at[0]) case as && f0 == nil: at[0], err = f1(at[0]) case !as && f0 == nil: err = MapErrUnsafePointer(f1, at) default: MapUnsafePointer(f0, at) } default: return errors.Errorf("Cannot map t of %v", t) } return } func (e E) MapIter(t reflect.Type, fn interface{}, a *storage.Header, incr bool, ait Iterator) (err error) { switch t { case Bool: at := a.Bools() var f0 func(bool) bool var f1 func(bool) (bool, error) switch f := fn.(type) { case func(bool) bool: f0 = f case func(bool) (bool, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case f0 == nil: err = MapIterErrB(f1, at, ait) default: MapIterB(f0, at, ait) } case Int: at := a.Ints() var f0 func(int) int var f1 func(int) (int, error) switch f := fn.(type) { case func(int) int: f0 = f case func(int) (int, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrI(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrI(f1, at, ait) case !incr && f0 == nil: err = MapIterErrI(f1, at, ait) default: MapIterI(f0, at, ait) } case Int8: at := a.Int8s() var f0 func(int8) int8 var f1 func(int8) (int8, error) switch f := fn.(type) { case func(int8) int8: f0 = f case func(int8) (int8, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrI8(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrI8(f1, at, ait) case !incr && f0 == nil: err = MapIterErrI8(f1, at, ait) default: MapIterI8(f0, at, ait) } case Int16: at := a.Int16s() var f0 func(int16) int16 var f1 func(int16) (int16, error) switch f := fn.(type) { case func(int16) int16: f0 = f case func(int16) (int16, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrI16(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrI16(f1, at, ait) case !incr && f0 == nil: err = MapIterErrI16(f1, at, ait) default: MapIterI16(f0, at, ait) } case Int32: at := a.Int32s() var f0 func(int32) int32 var f1 func(int32) (int32, error) switch f := fn.(type) { case func(int32) int32: f0 = f case func(int32) (int32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrI32(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrI32(f1, at, ait) case !incr && f0 == nil: err = MapIterErrI32(f1, at, ait) default: MapIterI32(f0, at, ait) } case Int64: at := a.Int64s() var f0 func(int64) int64 var f1 func(int64) (int64, error) switch f := fn.(type) { case func(int64) int64: f0 = f case func(int64) (int64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrI64(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrI64(f1, at, ait) case !incr && f0 == nil: err = MapIterErrI64(f1, at, ait) default: MapIterI64(f0, at, ait) } case Uint: at := a.Uints() var f0 func(uint) uint var f1 func(uint) (uint, error) switch f := fn.(type) { case func(uint) uint: f0 = f case func(uint) (uint, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrU(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrU(f1, at, ait) case !incr && f0 == nil: err = MapIterErrU(f1, at, ait) default: MapIterU(f0, at, ait) } case Uint8: at := a.Uint8s() var f0 func(uint8) uint8 var f1 func(uint8) (uint8, error) switch f := fn.(type) { case func(uint8) uint8: f0 = f case func(uint8) (uint8, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrU8(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrU8(f1, at, ait) case !incr && f0 == nil: err = MapIterErrU8(f1, at, ait) default: MapIterU8(f0, at, ait) } case Uint16: at := a.Uint16s() var f0 func(uint16) uint16 var f1 func(uint16) (uint16, error) switch f := fn.(type) { case func(uint16) uint16: f0 = f case func(uint16) (uint16, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrU16(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrU16(f1, at, ait) case !incr && f0 == nil: err = MapIterErrU16(f1, at, ait) default: MapIterU16(f0, at, ait) } case Uint32: at := a.Uint32s() var f0 func(uint32) uint32 var f1 func(uint32) (uint32, error) switch f := fn.(type) { case func(uint32) uint32: f0 = f case func(uint32) (uint32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrU32(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrU32(f1, at, ait) case !incr && f0 == nil: err = MapIterErrU32(f1, at, ait) default: MapIterU32(f0, at, ait) } case Uint64: at := a.Uint64s() var f0 func(uint64) uint64 var f1 func(uint64) (uint64, error) switch f := fn.(type) { case func(uint64) uint64: f0 = f case func(uint64) (uint64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrU64(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrU64(f1, at, ait) case !incr && f0 == nil: err = MapIterErrU64(f1, at, ait) default: MapIterU64(f0, at, ait) } case Uintptr: at := a.Uintptrs() var f0 func(uintptr) uintptr var f1 func(uintptr) (uintptr, error) switch f := fn.(type) { case func(uintptr) uintptr: f0 = f case func(uintptr) (uintptr, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case f0 == nil: err = MapIterErrUintptr(f1, at, ait) default: MapIterUintptr(f0, at, ait) } case Float32: at := a.Float32s() var f0 func(float32) float32 var f1 func(float32) (float32, error) switch f := fn.(type) { case func(float32) float32: f0 = f case func(float32) (float32, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrF32(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrF32(f1, at, ait) case !incr && f0 == nil: err = MapIterErrF32(f1, at, ait) default: MapIterF32(f0, at, ait) } case Float64: at := a.Float64s() var f0 func(float64) float64 var f1 func(float64) (float64, error) switch f := fn.(type) { case func(float64) float64: f0 = f case func(float64) (float64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrF64(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrF64(f1, at, ait) case !incr && f0 == nil: err = MapIterErrF64(f1, at, ait) default: MapIterF64(f0, at, ait) } case Complex64: at := a.Complex64s() var f0 func(complex64) complex64 var f1 func(complex64) (complex64, error) switch f := fn.(type) { case func(complex64) complex64: f0 = f case func(complex64) (complex64, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrC64(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrC64(f1, at, ait) case !incr && f0 == nil: err = MapIterErrC64(f1, at, ait) default: MapIterC64(f0, at, ait) } case Complex128: at := a.Complex128s() var f0 func(complex128) complex128 var f1 func(complex128) (complex128, error) switch f := fn.(type) { case func(complex128) complex128: f0 = f case func(complex128) (complex128, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrC128(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrC128(f1, at, ait) case !incr && f0 == nil: err = MapIterErrC128(f1, at, ait) default: MapIterC128(f0, at, ait) } case String: at := a.Strings() var f0 func(string) string var f1 func(string) (string, error) switch f := fn.(type) { case func(string) string: f0 = f case func(string) (string, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } switch { case incr && f0 != nil: MapIterIncrStr(f0, at, ait) case incr && f0 == nil: err = MapIterIncrErrStr(f1, at, ait) case !incr && f0 == nil: err = MapIterErrStr(f1, at, ait) default: MapIterStr(f0, at, ait) } case UnsafePointer: at := a.UnsafePointers() var f0 func(unsafe.Pointer) unsafe.Pointer var f1 func(unsafe.Pointer) (unsafe.Pointer, error) switch f := fn.(type) { case func(unsafe.Pointer) unsafe.Pointer: f0 = f case func(unsafe.Pointer) (unsafe.Pointer, error): f1 = f default: return errors.Errorf("Cannot map fn of %T to array", fn) } if incr { return errors.Errorf("Cannot perform increment on t of %v", t) } switch { case f0 == nil: err = MapIterErrUnsafePointer(f1, at, ait) default: MapIterUnsafePointer(f0, at, ait) } default: return errors.Errorf("Cannot map t of %v", t) } return } tensor-0.9.24/internal/execution/eng_minmaxbetween.go000066400000000000000000000325631426512615100227760ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) MaxBetween(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMaxI(at, bt) case as && !bs: MaxSVI(at[0], bt) case !as && bs: MaxVSI(at, bt[0]) default: VecMaxI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMaxI8(at, bt) case as && !bs: MaxSVI8(at[0], bt) case !as && bs: MaxVSI8(at, bt[0]) default: VecMaxI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMaxI16(at, bt) case as && !bs: MaxSVI16(at[0], bt) case !as && bs: MaxVSI16(at, bt[0]) default: VecMaxI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMaxI32(at, bt) case as && !bs: MaxSVI32(at[0], bt) case !as && bs: MaxVSI32(at, bt[0]) default: VecMaxI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMaxI64(at, bt) case as && !bs: MaxSVI64(at[0], bt) case !as && bs: MaxVSI64(at, bt[0]) default: VecMaxI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMaxU(at, bt) case as && !bs: MaxSVU(at[0], bt) case !as && bs: MaxVSU(at, bt[0]) default: VecMaxU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMaxU8(at, bt) case as && !bs: MaxSVU8(at[0], bt) case !as && bs: MaxVSU8(at, bt[0]) default: VecMaxU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMaxU16(at, bt) case as && !bs: MaxSVU16(at[0], bt) case !as && bs: MaxVSU16(at, bt[0]) default: VecMaxU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMaxU32(at, bt) case as && !bs: MaxSVU32(at[0], bt) case !as && bs: MaxVSU32(at, bt[0]) default: VecMaxU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMaxU64(at, bt) case as && !bs: MaxSVU64(at[0], bt) case !as && bs: MaxVSU64(at, bt[0]) default: VecMaxU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMaxF32(at, bt) case as && !bs: MaxSVF32(at[0], bt) case !as && bs: MaxVSF32(at, bt[0]) default: VecMaxF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMaxF64(at, bt) case as && !bs: MaxSVF64(at[0], bt) case !as && bs: MaxVSF64(at, bt[0]) default: VecMaxF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecMaxStr(at, bt) case as && !bs: MaxSVStr(at[0], bt) case !as && bs: MaxVSStr(at, bt[0]) default: VecMaxStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Max", t) } } func (e E) MinBetween(t reflect.Type, a *storage.Header, b *storage.Header) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMinI(at, bt) case as && !bs: MinSVI(at[0], bt) case !as && bs: MinVSI(at, bt[0]) default: VecMinI(at, bt) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMinI8(at, bt) case as && !bs: MinSVI8(at[0], bt) case !as && bs: MinVSI8(at, bt[0]) default: VecMinI8(at, bt) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMinI16(at, bt) case as && !bs: MinSVI16(at[0], bt) case !as && bs: MinVSI16(at, bt[0]) default: VecMinI16(at, bt) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMinI32(at, bt) case as && !bs: MinSVI32(at[0], bt) case !as && bs: MinVSI32(at, bt[0]) default: VecMinI32(at, bt) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMinI64(at, bt) case as && !bs: MinSVI64(at[0], bt) case !as && bs: MinVSI64(at, bt[0]) default: VecMinI64(at, bt) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMinU(at, bt) case as && !bs: MinSVU(at[0], bt) case !as && bs: MinVSU(at, bt[0]) default: VecMinU(at, bt) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMinU8(at, bt) case as && !bs: MinSVU8(at[0], bt) case !as && bs: MinVSU8(at, bt[0]) default: VecMinU8(at, bt) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMinU16(at, bt) case as && !bs: MinSVU16(at[0], bt) case !as && bs: MinVSU16(at, bt[0]) default: VecMinU16(at, bt) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMinU32(at, bt) case as && !bs: MinSVU32(at[0], bt) case !as && bs: MinVSU32(at, bt[0]) default: VecMinU32(at, bt) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMinU64(at, bt) case as && !bs: MinSVU64(at[0], bt) case !as && bs: MinVSU64(at, bt[0]) default: VecMinU64(at, bt) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMinF32(at, bt) case as && !bs: MinSVF32(at[0], bt) case !as && bs: MinVSF32(at, bt[0]) default: VecMinF32(at, bt) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMinF64(at, bt) case as && !bs: MinSVF64(at[0], bt) case !as && bs: MinVSF64(at, bt[0]) default: VecMinF64(at, bt) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecMinStr(at, bt) case as && !bs: MinSVStr(at[0], bt) case !as && bs: MinVSStr(at, bt[0]) default: VecMinStr(at, bt) } return default: return errors.Errorf("Unsupported type %v for Min", t) } } func (e E) MaxBetweenIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMaxI(at, bt) case as && !bs: MaxIterSVI(at[0], bt, bit) case !as && bs: MaxIterVSI(at, bt[0], ait) default: VecMaxIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMaxI8(at, bt) case as && !bs: MaxIterSVI8(at[0], bt, bit) case !as && bs: MaxIterVSI8(at, bt[0], ait) default: VecMaxIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMaxI16(at, bt) case as && !bs: MaxIterSVI16(at[0], bt, bit) case !as && bs: MaxIterVSI16(at, bt[0], ait) default: VecMaxIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMaxI32(at, bt) case as && !bs: MaxIterSVI32(at[0], bt, bit) case !as && bs: MaxIterVSI32(at, bt[0], ait) default: VecMaxIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMaxI64(at, bt) case as && !bs: MaxIterSVI64(at[0], bt, bit) case !as && bs: MaxIterVSI64(at, bt[0], ait) default: VecMaxIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMaxU(at, bt) case as && !bs: MaxIterSVU(at[0], bt, bit) case !as && bs: MaxIterVSU(at, bt[0], ait) default: VecMaxIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMaxU8(at, bt) case as && !bs: MaxIterSVU8(at[0], bt, bit) case !as && bs: MaxIterVSU8(at, bt[0], ait) default: VecMaxIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMaxU16(at, bt) case as && !bs: MaxIterSVU16(at[0], bt, bit) case !as && bs: MaxIterVSU16(at, bt[0], ait) default: VecMaxIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMaxU32(at, bt) case as && !bs: MaxIterSVU32(at[0], bt, bit) case !as && bs: MaxIterVSU32(at, bt[0], ait) default: VecMaxIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMaxU64(at, bt) case as && !bs: MaxIterSVU64(at[0], bt, bit) case !as && bs: MaxIterVSU64(at, bt[0], ait) default: VecMaxIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMaxF32(at, bt) case as && !bs: MaxIterSVF32(at[0], bt, bit) case !as && bs: MaxIterVSF32(at, bt[0], ait) default: VecMaxIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMaxF64(at, bt) case as && !bs: MaxIterSVF64(at[0], bt, bit) case !as && bs: MaxIterVSF64(at, bt[0], ait) default: VecMaxIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecMaxStr(at, bt) case as && !bs: MaxIterSVStr(at[0], bt, bit) case !as && bs: MaxIterVSStr(at, bt[0], ait) default: VecMaxIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Max", t) } } func (e E) MinBetweenIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) { as := isScalar(a, t) bs := isScalar(b, t) switch t { case Int: at := a.Ints() bt := b.Ints() switch { case as && bs: VecMinI(at, bt) case as && !bs: MinIterSVI(at[0], bt, bit) case !as && bs: MinIterVSI(at, bt[0], ait) default: VecMinIterI(at, bt, ait, bit) } return case Int8: at := a.Int8s() bt := b.Int8s() switch { case as && bs: VecMinI8(at, bt) case as && !bs: MinIterSVI8(at[0], bt, bit) case !as && bs: MinIterVSI8(at, bt[0], ait) default: VecMinIterI8(at, bt, ait, bit) } return case Int16: at := a.Int16s() bt := b.Int16s() switch { case as && bs: VecMinI16(at, bt) case as && !bs: MinIterSVI16(at[0], bt, bit) case !as && bs: MinIterVSI16(at, bt[0], ait) default: VecMinIterI16(at, bt, ait, bit) } return case Int32: at := a.Int32s() bt := b.Int32s() switch { case as && bs: VecMinI32(at, bt) case as && !bs: MinIterSVI32(at[0], bt, bit) case !as && bs: MinIterVSI32(at, bt[0], ait) default: VecMinIterI32(at, bt, ait, bit) } return case Int64: at := a.Int64s() bt := b.Int64s() switch { case as && bs: VecMinI64(at, bt) case as && !bs: MinIterSVI64(at[0], bt, bit) case !as && bs: MinIterVSI64(at, bt[0], ait) default: VecMinIterI64(at, bt, ait, bit) } return case Uint: at := a.Uints() bt := b.Uints() switch { case as && bs: VecMinU(at, bt) case as && !bs: MinIterSVU(at[0], bt, bit) case !as && bs: MinIterVSU(at, bt[0], ait) default: VecMinIterU(at, bt, ait, bit) } return case Uint8: at := a.Uint8s() bt := b.Uint8s() switch { case as && bs: VecMinU8(at, bt) case as && !bs: MinIterSVU8(at[0], bt, bit) case !as && bs: MinIterVSU8(at, bt[0], ait) default: VecMinIterU8(at, bt, ait, bit) } return case Uint16: at := a.Uint16s() bt := b.Uint16s() switch { case as && bs: VecMinU16(at, bt) case as && !bs: MinIterSVU16(at[0], bt, bit) case !as && bs: MinIterVSU16(at, bt[0], ait) default: VecMinIterU16(at, bt, ait, bit) } return case Uint32: at := a.Uint32s() bt := b.Uint32s() switch { case as && bs: VecMinU32(at, bt) case as && !bs: MinIterSVU32(at[0], bt, bit) case !as && bs: MinIterVSU32(at, bt[0], ait) default: VecMinIterU32(at, bt, ait, bit) } return case Uint64: at := a.Uint64s() bt := b.Uint64s() switch { case as && bs: VecMinU64(at, bt) case as && !bs: MinIterSVU64(at[0], bt, bit) case !as && bs: MinIterVSU64(at, bt[0], ait) default: VecMinIterU64(at, bt, ait, bit) } return case Float32: at := a.Float32s() bt := b.Float32s() switch { case as && bs: VecMinF32(at, bt) case as && !bs: MinIterSVF32(at[0], bt, bit) case !as && bs: MinIterVSF32(at, bt[0], ait) default: VecMinIterF32(at, bt, ait, bit) } return case Float64: at := a.Float64s() bt := b.Float64s() switch { case as && bs: VecMinF64(at, bt) case as && !bs: MinIterSVF64(at[0], bt, bit) case !as && bs: MinIterVSF64(at, bt[0], ait) default: VecMinIterF64(at, bt, ait, bit) } return case String: at := a.Strings() bt := b.Strings() switch { case as && bs: VecMinStr(at, bt) case as && !bs: MinIterSVStr(at[0], bt, bit) case !as && bs: MinIterVSStr(at, bt[0], ait) default: VecMinIterStr(at, bt, ait, bit) } return default: return errors.Errorf("Unsupported type %v for Min", t) } } tensor-0.9.24/internal/execution/eng_reduce.go000066400000000000000000000647501426512615100214050ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "unsafe" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) ReduceFirst(t reflect.Type, data *storage.Header, retVal *storage.Header, split int, size int, fn interface{}) (err error) { switch t { case Bool: dt := data.Bools() rt := retVal.Bools() switch f := fn.(type) { case func([]bool, []bool): reduceFirstB(dt, rt, split, size, f) case func(bool, bool) bool: genericReduceFirstB(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int: dt := data.Ints() rt := retVal.Ints() switch f := fn.(type) { case func([]int, []int): reduceFirstI(dt, rt, split, size, f) case func(int, int) int: genericReduceFirstI(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int8: dt := data.Int8s() rt := retVal.Int8s() switch f := fn.(type) { case func([]int8, []int8): reduceFirstI8(dt, rt, split, size, f) case func(int8, int8) int8: genericReduceFirstI8(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int16: dt := data.Int16s() rt := retVal.Int16s() switch f := fn.(type) { case func([]int16, []int16): reduceFirstI16(dt, rt, split, size, f) case func(int16, int16) int16: genericReduceFirstI16(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int32: dt := data.Int32s() rt := retVal.Int32s() switch f := fn.(type) { case func([]int32, []int32): reduceFirstI32(dt, rt, split, size, f) case func(int32, int32) int32: genericReduceFirstI32(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int64: dt := data.Int64s() rt := retVal.Int64s() switch f := fn.(type) { case func([]int64, []int64): reduceFirstI64(dt, rt, split, size, f) case func(int64, int64) int64: genericReduceFirstI64(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint: dt := data.Uints() rt := retVal.Uints() switch f := fn.(type) { case func([]uint, []uint): reduceFirstU(dt, rt, split, size, f) case func(uint, uint) uint: genericReduceFirstU(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint8: dt := data.Uint8s() rt := retVal.Uint8s() switch f := fn.(type) { case func([]uint8, []uint8): reduceFirstU8(dt, rt, split, size, f) case func(uint8, uint8) uint8: genericReduceFirstU8(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint16: dt := data.Uint16s() rt := retVal.Uint16s() switch f := fn.(type) { case func([]uint16, []uint16): reduceFirstU16(dt, rt, split, size, f) case func(uint16, uint16) uint16: genericReduceFirstU16(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint32: dt := data.Uint32s() rt := retVal.Uint32s() switch f := fn.(type) { case func([]uint32, []uint32): reduceFirstU32(dt, rt, split, size, f) case func(uint32, uint32) uint32: genericReduceFirstU32(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint64: dt := data.Uint64s() rt := retVal.Uint64s() switch f := fn.(type) { case func([]uint64, []uint64): reduceFirstU64(dt, rt, split, size, f) case func(uint64, uint64) uint64: genericReduceFirstU64(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uintptr: dt := data.Uintptrs() rt := retVal.Uintptrs() switch f := fn.(type) { case func([]uintptr, []uintptr): reduceFirstUintptr(dt, rt, split, size, f) case func(uintptr, uintptr) uintptr: genericReduceFirstUintptr(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Float32: dt := data.Float32s() rt := retVal.Float32s() switch f := fn.(type) { case func([]float32, []float32): reduceFirstF32(dt, rt, split, size, f) case func(float32, float32) float32: genericReduceFirstF32(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Float64: dt := data.Float64s() rt := retVal.Float64s() switch f := fn.(type) { case func([]float64, []float64): reduceFirstF64(dt, rt, split, size, f) case func(float64, float64) float64: genericReduceFirstF64(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Complex64: dt := data.Complex64s() rt := retVal.Complex64s() switch f := fn.(type) { case func([]complex64, []complex64): reduceFirstC64(dt, rt, split, size, f) case func(complex64, complex64) complex64: genericReduceFirstC64(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Complex128: dt := data.Complex128s() rt := retVal.Complex128s() switch f := fn.(type) { case func([]complex128, []complex128): reduceFirstC128(dt, rt, split, size, f) case func(complex128, complex128) complex128: genericReduceFirstC128(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case String: dt := data.Strings() rt := retVal.Strings() switch f := fn.(type) { case func([]string, []string): reduceFirstStr(dt, rt, split, size, f) case func(string, string) string: genericReduceFirstStr(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case UnsafePointer: dt := data.UnsafePointers() rt := retVal.UnsafePointers() switch f := fn.(type) { case func([]unsafe.Pointer, []unsafe.Pointer): reduceFirstUnsafePointer(dt, rt, split, size, f) case func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer: genericReduceFirstUnsafePointer(dt, rt, split, size, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil default: return errors.Errorf("Unsupported type %v for ReduceFirst", t) } } func (e E) ReduceLast(t reflect.Type, data *storage.Header, retVal *storage.Header, dimSize int, defaultValue interface{}, fn interface{}) (err error) { var ok bool switch t { case Bool: var def bool if def, ok = defaultValue.(bool); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Bools() rt := retVal.Bools() switch f := fn.(type) { case func([]bool) bool: reduceLastB(dt, rt, dimSize, def, f) case func(bool, bool) bool: genericReduceLastB(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int: var def int if def, ok = defaultValue.(int); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Ints() rt := retVal.Ints() switch f := fn.(type) { case func([]int) int: reduceLastI(dt, rt, dimSize, def, f) case func(int, int) int: genericReduceLastI(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int8: var def int8 if def, ok = defaultValue.(int8); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Int8s() rt := retVal.Int8s() switch f := fn.(type) { case func([]int8) int8: reduceLastI8(dt, rt, dimSize, def, f) case func(int8, int8) int8: genericReduceLastI8(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int16: var def int16 if def, ok = defaultValue.(int16); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Int16s() rt := retVal.Int16s() switch f := fn.(type) { case func([]int16) int16: reduceLastI16(dt, rt, dimSize, def, f) case func(int16, int16) int16: genericReduceLastI16(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int32: var def int32 if def, ok = defaultValue.(int32); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Int32s() rt := retVal.Int32s() switch f := fn.(type) { case func([]int32) int32: reduceLastI32(dt, rt, dimSize, def, f) case func(int32, int32) int32: genericReduceLastI32(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Int64: var def int64 if def, ok = defaultValue.(int64); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Int64s() rt := retVal.Int64s() switch f := fn.(type) { case func([]int64) int64: reduceLastI64(dt, rt, dimSize, def, f) case func(int64, int64) int64: genericReduceLastI64(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint: var def uint if def, ok = defaultValue.(uint); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uints() rt := retVal.Uints() switch f := fn.(type) { case func([]uint) uint: reduceLastU(dt, rt, dimSize, def, f) case func(uint, uint) uint: genericReduceLastU(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint8: var def uint8 if def, ok = defaultValue.(uint8); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uint8s() rt := retVal.Uint8s() switch f := fn.(type) { case func([]uint8) uint8: reduceLastU8(dt, rt, dimSize, def, f) case func(uint8, uint8) uint8: genericReduceLastU8(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint16: var def uint16 if def, ok = defaultValue.(uint16); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uint16s() rt := retVal.Uint16s() switch f := fn.(type) { case func([]uint16) uint16: reduceLastU16(dt, rt, dimSize, def, f) case func(uint16, uint16) uint16: genericReduceLastU16(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint32: var def uint32 if def, ok = defaultValue.(uint32); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uint32s() rt := retVal.Uint32s() switch f := fn.(type) { case func([]uint32) uint32: reduceLastU32(dt, rt, dimSize, def, f) case func(uint32, uint32) uint32: genericReduceLastU32(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uint64: var def uint64 if def, ok = defaultValue.(uint64); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uint64s() rt := retVal.Uint64s() switch f := fn.(type) { case func([]uint64) uint64: reduceLastU64(dt, rt, dimSize, def, f) case func(uint64, uint64) uint64: genericReduceLastU64(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Uintptr: var def uintptr if def, ok = defaultValue.(uintptr); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Uintptrs() rt := retVal.Uintptrs() switch f := fn.(type) { case func([]uintptr) uintptr: reduceLastUintptr(dt, rt, dimSize, def, f) case func(uintptr, uintptr) uintptr: genericReduceLastUintptr(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Float32: var def float32 if def, ok = defaultValue.(float32); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Float32s() rt := retVal.Float32s() switch f := fn.(type) { case func([]float32) float32: reduceLastF32(dt, rt, dimSize, def, f) case func(float32, float32) float32: genericReduceLastF32(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Float64: var def float64 if def, ok = defaultValue.(float64); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Float64s() rt := retVal.Float64s() switch f := fn.(type) { case func([]float64) float64: reduceLastF64(dt, rt, dimSize, def, f) case func(float64, float64) float64: genericReduceLastF64(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Complex64: var def complex64 if def, ok = defaultValue.(complex64); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Complex64s() rt := retVal.Complex64s() switch f := fn.(type) { case func([]complex64) complex64: reduceLastC64(dt, rt, dimSize, def, f) case func(complex64, complex64) complex64: genericReduceLastC64(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case Complex128: var def complex128 if def, ok = defaultValue.(complex128); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Complex128s() rt := retVal.Complex128s() switch f := fn.(type) { case func([]complex128) complex128: reduceLastC128(dt, rt, dimSize, def, f) case func(complex128, complex128) complex128: genericReduceLastC128(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case String: var def string if def, ok = defaultValue.(string); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.Strings() rt := retVal.Strings() switch f := fn.(type) { case func([]string) string: reduceLastStr(dt, rt, dimSize, def, f) case func(string, string) string: genericReduceLastStr(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil case UnsafePointer: var def unsafe.Pointer if def, ok = defaultValue.(unsafe.Pointer); !ok { return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } dt := data.UnsafePointers() rt := retVal.UnsafePointers() switch f := fn.(type) { case func([]unsafe.Pointer) unsafe.Pointer: reduceLastUnsafePointer(dt, rt, dimSize, def, f) case func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer: genericReduceLastUnsafePointer(dt, rt, dimSize, def, f) default: return errors.Errorf(reductionErrMsg, fn) } return nil default: return errors.Errorf("Unsupported type %v for ReduceLast", t) } } func (e E) ReduceDefault(t reflect.Type, data *storage.Header, retVal *storage.Header, dim0 int, dimSize int, outerStride int, stride int, expected int, fn interface{}) (err error) { var ok bool switch t { case Bool: var f func(bool, bool) bool if f, ok = fn.(func(bool, bool) bool); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Bools() rt := retVal.Bools() reduceDefaultB(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Int: var f func(int, int) int if f, ok = fn.(func(int, int) int); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Ints() rt := retVal.Ints() reduceDefaultI(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Int8: var f func(int8, int8) int8 if f, ok = fn.(func(int8, int8) int8); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Int8s() rt := retVal.Int8s() reduceDefaultI8(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Int16: var f func(int16, int16) int16 if f, ok = fn.(func(int16, int16) int16); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Int16s() rt := retVal.Int16s() reduceDefaultI16(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Int32: var f func(int32, int32) int32 if f, ok = fn.(func(int32, int32) int32); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Int32s() rt := retVal.Int32s() reduceDefaultI32(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Int64: var f func(int64, int64) int64 if f, ok = fn.(func(int64, int64) int64); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Int64s() rt := retVal.Int64s() reduceDefaultI64(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uint: var f func(uint, uint) uint if f, ok = fn.(func(uint, uint) uint); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uints() rt := retVal.Uints() reduceDefaultU(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uint8: var f func(uint8, uint8) uint8 if f, ok = fn.(func(uint8, uint8) uint8); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uint8s() rt := retVal.Uint8s() reduceDefaultU8(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uint16: var f func(uint16, uint16) uint16 if f, ok = fn.(func(uint16, uint16) uint16); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uint16s() rt := retVal.Uint16s() reduceDefaultU16(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uint32: var f func(uint32, uint32) uint32 if f, ok = fn.(func(uint32, uint32) uint32); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uint32s() rt := retVal.Uint32s() reduceDefaultU32(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uint64: var f func(uint64, uint64) uint64 if f, ok = fn.(func(uint64, uint64) uint64); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uint64s() rt := retVal.Uint64s() reduceDefaultU64(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Uintptr: var f func(uintptr, uintptr) uintptr if f, ok = fn.(func(uintptr, uintptr) uintptr); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Uintptrs() rt := retVal.Uintptrs() reduceDefaultUintptr(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Float32: var f func(float32, float32) float32 if f, ok = fn.(func(float32, float32) float32); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Float32s() rt := retVal.Float32s() reduceDefaultF32(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Float64: var f func(float64, float64) float64 if f, ok = fn.(func(float64, float64) float64); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Float64s() rt := retVal.Float64s() reduceDefaultF64(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Complex64: var f func(complex64, complex64) complex64 if f, ok = fn.(func(complex64, complex64) complex64); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Complex64s() rt := retVal.Complex64s() reduceDefaultC64(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case Complex128: var f func(complex128, complex128) complex128 if f, ok = fn.(func(complex128, complex128) complex128); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Complex128s() rt := retVal.Complex128s() reduceDefaultC128(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case String: var f func(string, string) string if f, ok = fn.(func(string, string) string); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.Strings() rt := retVal.Strings() reduceDefaultStr(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil case UnsafePointer: var f func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer if f, ok = fn.(func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer); !ok { return errors.Errorf(reductionErrMsg, fn) } dt := data.UnsafePointers() rt := retVal.UnsafePointers() reduceDefaultUnsafePointer(dt, rt, dim0, dimSize, outerStride, stride, expected, f) return nil default: return errors.Errorf("Unsupported type %v for ReduceDefault", t) } } func (e E) Reduce(t reflect.Type, a *storage.Header, defaultValue interface{}, fn interface{}) (retVal interface{}, err error) { var ok bool switch t { case Bool: var f func(bool, bool) bool var def bool if f, ok = fn.(func(bool, bool) bool); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(bool); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceB(f, def, a.Bools()...) return case Int: var f func(int, int) int var def int if f, ok = fn.(func(int, int) int); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(int); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceI(f, def, a.Ints()...) return case Int8: var f func(int8, int8) int8 var def int8 if f, ok = fn.(func(int8, int8) int8); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(int8); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceI8(f, def, a.Int8s()...) return case Int16: var f func(int16, int16) int16 var def int16 if f, ok = fn.(func(int16, int16) int16); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(int16); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceI16(f, def, a.Int16s()...) return case Int32: var f func(int32, int32) int32 var def int32 if f, ok = fn.(func(int32, int32) int32); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(int32); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceI32(f, def, a.Int32s()...) return case Int64: var f func(int64, int64) int64 var def int64 if f, ok = fn.(func(int64, int64) int64); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(int64); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceI64(f, def, a.Int64s()...) return case Uint: var f func(uint, uint) uint var def uint if f, ok = fn.(func(uint, uint) uint); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uint); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceU(f, def, a.Uints()...) return case Uint8: var f func(uint8, uint8) uint8 var def uint8 if f, ok = fn.(func(uint8, uint8) uint8); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uint8); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceU8(f, def, a.Uint8s()...) return case Uint16: var f func(uint16, uint16) uint16 var def uint16 if f, ok = fn.(func(uint16, uint16) uint16); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uint16); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceU16(f, def, a.Uint16s()...) return case Uint32: var f func(uint32, uint32) uint32 var def uint32 if f, ok = fn.(func(uint32, uint32) uint32); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uint32); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceU32(f, def, a.Uint32s()...) return case Uint64: var f func(uint64, uint64) uint64 var def uint64 if f, ok = fn.(func(uint64, uint64) uint64); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uint64); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceU64(f, def, a.Uint64s()...) return case Uintptr: var f func(uintptr, uintptr) uintptr var def uintptr if f, ok = fn.(func(uintptr, uintptr) uintptr); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(uintptr); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceUintptr(f, def, a.Uintptrs()...) return case Float32: var f func(float32, float32) float32 var def float32 if f, ok = fn.(func(float32, float32) float32); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(float32); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceF32(f, def, a.Float32s()...) return case Float64: var f func(float64, float64) float64 var def float64 if f, ok = fn.(func(float64, float64) float64); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(float64); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceF64(f, def, a.Float64s()...) return case Complex64: var f func(complex64, complex64) complex64 var def complex64 if f, ok = fn.(func(complex64, complex64) complex64); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(complex64); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceC64(f, def, a.Complex64s()...) return case Complex128: var f func(complex128, complex128) complex128 var def complex128 if f, ok = fn.(func(complex128, complex128) complex128); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(complex128); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceC128(f, def, a.Complex128s()...) return case String: var f func(string, string) string var def string if f, ok = fn.(func(string, string) string); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(string); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceStr(f, def, a.Strings()...) return case UnsafePointer: var f func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer var def unsafe.Pointer if f, ok = fn.(func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer); !ok { return nil, errors.Errorf(reductionErrMsg, fn) } if def, ok = defaultValue.(unsafe.Pointer); !ok { return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue) } retVal = ReduceUnsafePointer(f, def, a.UnsafePointers()...) return default: return nil, errors.Errorf("Unsupported type %v for Reduce", t) } } tensor-0.9.24/internal/execution/eng_unary.go000066400000000000000000000544371426512615100212750ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func (e E) Neg(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: NegI(a.Ints()) return nil case Int8: NegI8(a.Int8s()) return nil case Int16: NegI16(a.Int16s()) return nil case Int32: NegI32(a.Int32s()) return nil case Int64: NegI64(a.Int64s()) return nil case Uint: NegU(a.Uints()) return nil case Uint8: NegU8(a.Uint8s()) return nil case Uint16: NegU16(a.Uint16s()) return nil case Uint32: NegU32(a.Uint32s()) return nil case Uint64: NegU64(a.Uint64s()) return nil case Float32: NegF32(a.Float32s()) return nil case Float64: NegF64(a.Float64s()) return nil case Complex64: NegC64(a.Complex64s()) return nil case Complex128: NegC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Neg", t) } } func (e E) Inv(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: InvI(a.Ints()) return nil case Int8: InvI8(a.Int8s()) return nil case Int16: InvI16(a.Int16s()) return nil case Int32: InvI32(a.Int32s()) return nil case Int64: InvI64(a.Int64s()) return nil case Uint: InvU(a.Uints()) return nil case Uint8: InvU8(a.Uint8s()) return nil case Uint16: InvU16(a.Uint16s()) return nil case Uint32: InvU32(a.Uint32s()) return nil case Uint64: InvU64(a.Uint64s()) return nil case Float32: InvF32(a.Float32s()) return nil case Float64: InvF64(a.Float64s()) return nil case Complex64: InvC64(a.Complex64s()) return nil case Complex128: InvC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Inv", t) } } func (e E) Square(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: SquareI(a.Ints()) return nil case Int8: SquareI8(a.Int8s()) return nil case Int16: SquareI16(a.Int16s()) return nil case Int32: SquareI32(a.Int32s()) return nil case Int64: SquareI64(a.Int64s()) return nil case Uint: SquareU(a.Uints()) return nil case Uint8: SquareU8(a.Uint8s()) return nil case Uint16: SquareU16(a.Uint16s()) return nil case Uint32: SquareU32(a.Uint32s()) return nil case Uint64: SquareU64(a.Uint64s()) return nil case Float32: SquareF32(a.Float32s()) return nil case Float64: SquareF64(a.Float64s()) return nil case Complex64: SquareC64(a.Complex64s()) return nil case Complex128: SquareC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Square", t) } } func (e E) Cube(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: CubeI(a.Ints()) return nil case Int8: CubeI8(a.Int8s()) return nil case Int16: CubeI16(a.Int16s()) return nil case Int32: CubeI32(a.Int32s()) return nil case Int64: CubeI64(a.Int64s()) return nil case Uint: CubeU(a.Uints()) return nil case Uint8: CubeU8(a.Uint8s()) return nil case Uint16: CubeU16(a.Uint16s()) return nil case Uint32: CubeU32(a.Uint32s()) return nil case Uint64: CubeU64(a.Uint64s()) return nil case Float32: CubeF32(a.Float32s()) return nil case Float64: CubeF64(a.Float64s()) return nil case Complex64: CubeC64(a.Complex64s()) return nil case Complex128: CubeC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Cube", t) } } func (e E) Exp(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: ExpF32(a.Float32s()) return nil case Float64: ExpF64(a.Float64s()) return nil case Complex64: ExpC64(a.Complex64s()) return nil case Complex128: ExpC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Exp", t) } } func (e E) Tanh(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: TanhF32(a.Float32s()) return nil case Float64: TanhF64(a.Float64s()) return nil case Complex64: TanhC64(a.Complex64s()) return nil case Complex128: TanhC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Tanh", t) } } func (e E) Log(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: LogF32(a.Float32s()) return nil case Float64: LogF64(a.Float64s()) return nil case Complex64: LogC64(a.Complex64s()) return nil case Complex128: LogC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Log", t) } } func (e E) Log2(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: Log2F32(a.Float32s()) return nil case Float64: Log2F64(a.Float64s()) return nil default: return errors.Errorf("Unsupported type %v for Log2", t) } } func (e E) Log10(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: Log10F32(a.Float32s()) return nil case Float64: Log10F64(a.Float64s()) return nil case Complex64: Log10C64(a.Complex64s()) return nil case Complex128: Log10C128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Log10", t) } } func (e E) Sqrt(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: SqrtF32(a.Float32s()) return nil case Float64: SqrtF64(a.Float64s()) return nil case Complex64: SqrtC64(a.Complex64s()) return nil case Complex128: SqrtC128(a.Complex128s()) return nil default: return errors.Errorf("Unsupported type %v for Sqrt", t) } } func (e E) Cbrt(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: CbrtF32(a.Float32s()) return nil case Float64: CbrtF64(a.Float64s()) return nil default: return errors.Errorf("Unsupported type %v for Cbrt", t) } } func (e E) InvSqrt(t reflect.Type, a *storage.Header) (err error) { switch t { case Float32: InvSqrtF32(a.Float32s()) return nil case Float64: InvSqrtF64(a.Float64s()) return nil default: return errors.Errorf("Unsupported type %v for InvSqrt", t) } } func (e E) NegIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return NegIterI(a.Ints(), ait) case Int8: return NegIterI8(a.Int8s(), ait) case Int16: return NegIterI16(a.Int16s(), ait) case Int32: return NegIterI32(a.Int32s(), ait) case Int64: return NegIterI64(a.Int64s(), ait) case Uint: return NegIterU(a.Uints(), ait) case Uint8: return NegIterU8(a.Uint8s(), ait) case Uint16: return NegIterU16(a.Uint16s(), ait) case Uint32: return NegIterU32(a.Uint32s(), ait) case Uint64: return NegIterU64(a.Uint64s(), ait) case Float32: return NegIterF32(a.Float32s(), ait) case Float64: return NegIterF64(a.Float64s(), ait) case Complex64: return NegIterC64(a.Complex64s(), ait) case Complex128: return NegIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for NegIter", t) } } func (e E) InvIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return InvIterI(a.Ints(), ait) case Int8: return InvIterI8(a.Int8s(), ait) case Int16: return InvIterI16(a.Int16s(), ait) case Int32: return InvIterI32(a.Int32s(), ait) case Int64: return InvIterI64(a.Int64s(), ait) case Uint: return InvIterU(a.Uints(), ait) case Uint8: return InvIterU8(a.Uint8s(), ait) case Uint16: return InvIterU16(a.Uint16s(), ait) case Uint32: return InvIterU32(a.Uint32s(), ait) case Uint64: return InvIterU64(a.Uint64s(), ait) case Float32: return InvIterF32(a.Float32s(), ait) case Float64: return InvIterF64(a.Float64s(), ait) case Complex64: return InvIterC64(a.Complex64s(), ait) case Complex128: return InvIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for InvIter", t) } } func (e E) SquareIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return SquareIterI(a.Ints(), ait) case Int8: return SquareIterI8(a.Int8s(), ait) case Int16: return SquareIterI16(a.Int16s(), ait) case Int32: return SquareIterI32(a.Int32s(), ait) case Int64: return SquareIterI64(a.Int64s(), ait) case Uint: return SquareIterU(a.Uints(), ait) case Uint8: return SquareIterU8(a.Uint8s(), ait) case Uint16: return SquareIterU16(a.Uint16s(), ait) case Uint32: return SquareIterU32(a.Uint32s(), ait) case Uint64: return SquareIterU64(a.Uint64s(), ait) case Float32: return SquareIterF32(a.Float32s(), ait) case Float64: return SquareIterF64(a.Float64s(), ait) case Complex64: return SquareIterC64(a.Complex64s(), ait) case Complex128: return SquareIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for SquareIter", t) } } func (e E) CubeIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return CubeIterI(a.Ints(), ait) case Int8: return CubeIterI8(a.Int8s(), ait) case Int16: return CubeIterI16(a.Int16s(), ait) case Int32: return CubeIterI32(a.Int32s(), ait) case Int64: return CubeIterI64(a.Int64s(), ait) case Uint: return CubeIterU(a.Uints(), ait) case Uint8: return CubeIterU8(a.Uint8s(), ait) case Uint16: return CubeIterU16(a.Uint16s(), ait) case Uint32: return CubeIterU32(a.Uint32s(), ait) case Uint64: return CubeIterU64(a.Uint64s(), ait) case Float32: return CubeIterF32(a.Float32s(), ait) case Float64: return CubeIterF64(a.Float64s(), ait) case Complex64: return CubeIterC64(a.Complex64s(), ait) case Complex128: return CubeIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for CubeIter", t) } } func (e E) ExpIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return ExpIterF32(a.Float32s(), ait) case Float64: return ExpIterF64(a.Float64s(), ait) case Complex64: return ExpIterC64(a.Complex64s(), ait) case Complex128: return ExpIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for ExpIter", t) } } func (e E) TanhIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return TanhIterF32(a.Float32s(), ait) case Float64: return TanhIterF64(a.Float64s(), ait) case Complex64: return TanhIterC64(a.Complex64s(), ait) case Complex128: return TanhIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for TanhIter", t) } } func (e E) LogIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return LogIterF32(a.Float32s(), ait) case Float64: return LogIterF64(a.Float64s(), ait) case Complex64: return LogIterC64(a.Complex64s(), ait) case Complex128: return LogIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for LogIter", t) } } func (e E) Log2Iter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return Log2IterF32(a.Float32s(), ait) case Float64: return Log2IterF64(a.Float64s(), ait) default: return errors.Errorf("Unsupported type %v for Log2Iter", t) } } func (e E) Log10Iter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return Log10IterF32(a.Float32s(), ait) case Float64: return Log10IterF64(a.Float64s(), ait) case Complex64: return Log10IterC64(a.Complex64s(), ait) case Complex128: return Log10IterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for Log10Iter", t) } } func (e E) SqrtIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return SqrtIterF32(a.Float32s(), ait) case Float64: return SqrtIterF64(a.Float64s(), ait) case Complex64: return SqrtIterC64(a.Complex64s(), ait) case Complex128: return SqrtIterC128(a.Complex128s(), ait) default: return errors.Errorf("Unsupported type %v for SqrtIter", t) } } func (e E) CbrtIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return CbrtIterF32(a.Float32s(), ait) case Float64: return CbrtIterF64(a.Float64s(), ait) default: return errors.Errorf("Unsupported type %v for CbrtIter", t) } } func (e E) InvSqrtIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Float32: return InvSqrtIterF32(a.Float32s(), ait) case Float64: return InvSqrtIterF64(a.Float64s(), ait) default: return errors.Errorf("Unsupported type %v for InvSqrtIter", t) } } func (e E) Abs(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: AbsI(a.Ints()) return nil case Int8: AbsI8(a.Int8s()) return nil case Int16: AbsI16(a.Int16s()) return nil case Int32: AbsI32(a.Int32s()) return nil case Int64: AbsI64(a.Int64s()) return nil case Float32: AbsF32(a.Float32s()) return nil case Float64: AbsF64(a.Float64s()) return nil default: return errors.Errorf("Unsupported type %v for Abs", t) } } func (e E) Sign(t reflect.Type, a *storage.Header) (err error) { switch t { case Int: SignI(a.Ints()) return nil case Int8: SignI8(a.Int8s()) return nil case Int16: SignI16(a.Int16s()) return nil case Int32: SignI32(a.Int32s()) return nil case Int64: SignI64(a.Int64s()) return nil case Float32: SignF32(a.Float32s()) return nil case Float64: SignF64(a.Float64s()) return nil default: return errors.Errorf("Unsupported type %v for Sign", t) } } func (e E) AbsIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return AbsIterI(a.Ints(), ait) case Int8: return AbsIterI8(a.Int8s(), ait) case Int16: return AbsIterI16(a.Int16s(), ait) case Int32: return AbsIterI32(a.Int32s(), ait) case Int64: return AbsIterI64(a.Int64s(), ait) case Float32: return AbsIterF32(a.Float32s(), ait) case Float64: return AbsIterF64(a.Float64s(), ait) default: return errors.Errorf("Unsupported type %v for AbsIter", t) } } func (e E) SignIter(t reflect.Type, a *storage.Header, ait Iterator) (err error) { switch t { case Int: return SignIterI(a.Ints(), ait) case Int8: return SignIterI8(a.Int8s(), ait) case Int16: return SignIterI16(a.Int16s(), ait) case Int32: return SignIterI32(a.Int32s(), ait) case Int64: return SignIterI64(a.Int64s(), ait) case Float32: return SignIterF32(a.Float32s(), ait) case Float64: return SignIterF64(a.Float64s(), ait) default: return errors.Errorf("Unsupported type %v for SignIter", t) } } func (e E) Clamp(t reflect.Type, a *storage.Header, minVal interface{}, maxVal interface{}) (err error) { var ok bool switch t { case Int: var min, max int if min, ok = minVal.(int); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampI(a.Ints(), min, max) return nil case Int8: var min, max int8 if min, ok = minVal.(int8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampI8(a.Int8s(), min, max) return nil case Int16: var min, max int16 if min, ok = minVal.(int16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampI16(a.Int16s(), min, max) return nil case Int32: var min, max int32 if min, ok = minVal.(int32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampI32(a.Int32s(), min, max) return nil case Int64: var min, max int64 if min, ok = minVal.(int64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampI64(a.Int64s(), min, max) return nil case Uint: var min, max uint if min, ok = minVal.(uint); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampU(a.Uints(), min, max) return nil case Uint8: var min, max uint8 if min, ok = minVal.(uint8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampU8(a.Uint8s(), min, max) return nil case Uint16: var min, max uint16 if min, ok = minVal.(uint16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampU16(a.Uint16s(), min, max) return nil case Uint32: var min, max uint32 if min, ok = minVal.(uint32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampU32(a.Uint32s(), min, max) return nil case Uint64: var min, max uint64 if min, ok = minVal.(uint64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampU64(a.Uint64s(), min, max) return nil case Float32: var min, max float32 if min, ok = minVal.(float32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(float32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampF32(a.Float32s(), min, max) return nil case Float64: var min, max float64 if min, ok = minVal.(float64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(float64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } ClampF64(a.Float64s(), min, max) return nil default: return errors.Errorf("Unsupported type %v for Clamp", t) } } func (e E) ClampIter(t reflect.Type, a *storage.Header, ait Iterator, minVal interface{}, maxVal interface{}) (err error) { var ok bool switch t { case Int: var min, max int if min, ok = minVal.(int); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterI(a.Ints(), ait, min, max) case Int8: var min, max int8 if min, ok = minVal.(int8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterI8(a.Int8s(), ait, min, max) case Int16: var min, max int16 if min, ok = minVal.(int16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterI16(a.Int16s(), ait, min, max) case Int32: var min, max int32 if min, ok = minVal.(int32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterI32(a.Int32s(), ait, min, max) case Int64: var min, max int64 if min, ok = minVal.(int64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(int64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterI64(a.Int64s(), ait, min, max) case Uint: var min, max uint if min, ok = minVal.(uint); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterU(a.Uints(), ait, min, max) case Uint8: var min, max uint8 if min, ok = minVal.(uint8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint8); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterU8(a.Uint8s(), ait, min, max) case Uint16: var min, max uint16 if min, ok = minVal.(uint16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint16); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterU16(a.Uint16s(), ait, min, max) case Uint32: var min, max uint32 if min, ok = minVal.(uint32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterU32(a.Uint32s(), ait, min, max) case Uint64: var min, max uint64 if min, ok = minVal.(uint64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(uint64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterU64(a.Uint64s(), ait, min, max) case Float32: var min, max float32 if min, ok = minVal.(float32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(float32); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterF32(a.Float32s(), ait, min, max) case Float64: var min, max float64 if min, ok = minVal.(float64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min") } if max, ok = maxVal.(float64); !ok { return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max") } return ClampIterF64(a.Float64s(), ait, min, max) default: return errors.Errorf("Unsupported type %v for Clamp", t) } } tensor-0.9.24/internal/execution/generic_argmethods.go000066400000000000000000000322631426512615100231300ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "math" "github.com/chewxy/math32" ) func ArgmaxI(a []int) int { var set bool var f int var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxI8(a []int8) int { var set bool var f int8 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxI16(a []int16) int { var set bool var f int16 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxI32(a []int32) int { var set bool var f int32 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxI64(a []int64) int { var set bool var f int64 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxU(a []uint) int { var set bool var f uint var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxU8(a []uint8) int { var set bool var f uint8 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxU16(a []uint16) int { var set bool var f uint16 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxU32(a []uint32) int { var set bool var f uint32 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxU64(a []uint64) int { var set bool var f uint64 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxF32(a []float32) int { var set bool var f float32 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if math32.IsNaN(v) || math32.IsInf(v, 1) { max = i return max } if v > f { max = i f = v } } return max } func ArgmaxF64(a []float64) int { var set bool var f float64 var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if math.IsNaN(v) || math.IsInf(v, 1) { max = i return max } if v > f { max = i f = v } } return max } func ArgmaxStr(a []string) int { var set bool var f string var max int for i := range a { v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedI(a []int, mask []bool) int { var set bool var f int var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedI8(a []int8, mask []bool) int { var set bool var f int8 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedI16(a []int16, mask []bool) int { var set bool var f int16 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedI32(a []int32, mask []bool) int { var set bool var f int32 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedI64(a []int64, mask []bool) int { var set bool var f int64 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedU(a []uint, mask []bool) int { var set bool var f uint var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedU8(a []uint8, mask []bool) int { var set bool var f uint8 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedU16(a []uint16, mask []bool) int { var set bool var f uint16 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedU32(a []uint32, mask []bool) int { var set bool var f uint32 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedU64(a []uint64, mask []bool) int { var set bool var f uint64 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgmaxMaskedF32(a []float32, mask []bool) int { var set bool var f float32 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if math32.IsNaN(v) || math32.IsInf(v, 1) { max = i return max } if v > f { max = i f = v } } return max } func ArgmaxMaskedF64(a []float64, mask []bool) int { var set bool var f float64 var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if math.IsNaN(v) || math.IsInf(v, 1) { max = i return max } if v > f { max = i f = v } } return max } func ArgmaxMaskedStr(a []string, mask []bool) int { var set bool var f string var max int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v max = i set = true continue } if v > f { max = i f = v } } return max } func ArgminI(a []int) int { var set bool var f int var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminI8(a []int8) int { var set bool var f int8 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminI16(a []int16) int { var set bool var f int16 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminI32(a []int32) int { var set bool var f int32 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminI64(a []int64) int { var set bool var f int64 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminU(a []uint) int { var set bool var f uint var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminU8(a []uint8) int { var set bool var f uint8 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminU16(a []uint16) int { var set bool var f uint16 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminU32(a []uint32) int { var set bool var f uint32 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminU64(a []uint64) int { var set bool var f uint64 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminF32(a []float32) int { var set bool var f float32 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if math32.IsNaN(v) || math32.IsInf(v, -1) { min = i return min } if v < f { min = i f = v } } return min } func ArgminF64(a []float64) int { var set bool var f float64 var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if math.IsNaN(v) || math.IsInf(v, -1) { min = i return min } if v < f { min = i f = v } } return min } func ArgminStr(a []string) int { var set bool var f string var min int for i := range a { v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedI(a []int, mask []bool) int { var set bool var f int var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedI8(a []int8, mask []bool) int { var set bool var f int8 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedI16(a []int16, mask []bool) int { var set bool var f int16 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedI32(a []int32, mask []bool) int { var set bool var f int32 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedI64(a []int64, mask []bool) int { var set bool var f int64 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedU(a []uint, mask []bool) int { var set bool var f uint var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedU8(a []uint8, mask []bool) int { var set bool var f uint8 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedU16(a []uint16, mask []bool) int { var set bool var f uint16 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedU32(a []uint32, mask []bool) int { var set bool var f uint32 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedU64(a []uint64, mask []bool) int { var set bool var f uint64 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } func ArgminMaskedF32(a []float32, mask []bool) int { var set bool var f float32 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if math32.IsNaN(v) || math32.IsInf(v, -1) { min = i return min } if v < f { min = i f = v } } return min } func ArgminMaskedF64(a []float64, mask []bool) int { var set bool var f float64 var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if math.IsNaN(v) || math.IsInf(v, -1) { min = i return min } if v < f { min = i f = v } } return min } func ArgminMaskedStr(a []string, mask []bool) int { var set bool var f string var min int for i := range a { if mask[i] { continue } v := a[i] if !set { f = v min = i set = true continue } if v < f { min = i f = v } } return min } tensor-0.9.24/internal/execution/generic_arith.go000066400000000000000000000122051426512615100220740ustar00rootroot00000000000000package execution import ( "math" "math/cmplx" "github.com/chewxy/math32" ) /* GENERATED FILE (ONCE). DO NOT EDIT */ func AddI(a int, b int) int { return a + b } func AddI8(a int8, b int8) int8 { return a + b } func AddI16(a int16, b int16) int16 { return a + b } func AddI32(a int32, b int32) int32 { return a + b } func AddI64(a int64, b int64) int64 { return a + b } func AddU(a uint, b uint) uint { return a + b } func AddU8(a uint8, b uint8) uint8 { return a + b } func AddU16(a uint16, b uint16) uint16 { return a + b } func AddU32(a uint32, b uint32) uint32 { return a + b } func AddU64(a uint64, b uint64) uint64 { return a + b } func AddF32(a float32, b float32) float32 { return a + b } func AddF64(a float64, b float64) float64 { return a + b } func AddC64(a complex64, b complex64) complex64 { return a + b } func AddC128(a complex128, b complex128) complex128 { return a + b } func AddStr(a string, b string) string { return a + b } func SubI(a int, b int) int { return a - b } func SubI8(a int8, b int8) int8 { return a - b } func SubI16(a int16, b int16) int16 { return a - b } func SubI32(a int32, b int32) int32 { return a - b } func SubI64(a int64, b int64) int64 { return a - b } func SubU(a uint, b uint) uint { return a - b } func SubU8(a uint8, b uint8) uint8 { return a - b } func SubU16(a uint16, b uint16) uint16 { return a - b } func SubU32(a uint32, b uint32) uint32 { return a - b } func SubU64(a uint64, b uint64) uint64 { return a - b } func SubF32(a float32, b float32) float32 { return a - b } func SubF64(a float64, b float64) float64 { return a - b } func SubC64(a complex64, b complex64) complex64 { return a - b } func SubC128(a complex128, b complex128) complex128 { return a - b } func MulI(a int, b int) int { return a * b } func MulI8(a int8, b int8) int8 { return a * b } func MulI16(a int16, b int16) int16 { return a * b } func MulI32(a int32, b int32) int32 { return a * b } func MulI64(a int64, b int64) int64 { return a * b } func MulU(a uint, b uint) uint { return a * b } func MulU8(a uint8, b uint8) uint8 { return a * b } func MulU16(a uint16, b uint16) uint16 { return a * b } func MulU32(a uint32, b uint32) uint32 { return a * b } func MulU64(a uint64, b uint64) uint64 { return a * b } func MulF32(a float32, b float32) float32 { return a * b } func MulF64(a float64, b float64) float64 { return a * b } func MulC64(a complex64, b complex64) complex64 { return a * b } func MulC128(a complex128, b complex128) complex128 { return a * b } func DivI(a int, b int) int { return a / b } func DivI8(a int8, b int8) int8 { return a / b } func DivI16(a int16, b int16) int16 { return a / b } func DivI32(a int32, b int32) int32 { return a / b } func DivI64(a int64, b int64) int64 { return a / b } func DivU(a uint, b uint) uint { return a / b } func DivU8(a uint8, b uint8) uint8 { return a / b } func DivU16(a uint16, b uint16) uint16 { return a / b } func DivU32(a uint32, b uint32) uint32 { return a / b } func DivU64(a uint64, b uint64) uint64 { return a / b } func DivF32(a float32, b float32) float32 { return a / b } func DivF64(a float64, b float64) float64 { return a / b } func DivC64(a complex64, b complex64) complex64 { return a / b } func DivC128(a complex128, b complex128) complex128 { return a / b } func PowF32(a float32, b float32) float32 { return math32.Pow(a, b) } func PowF64(a float64, b float64) float64 { return math.Pow(a, b) } func PowC64(a complex64, b complex64) complex64 { return complex64(cmplx.Pow(complex128(a), complex128(b))) } func PowC128(a complex128, b complex128) complex128 { return cmplx.Pow(a, b) } func ModI(a int, b int) int { return a % b } func ModI8(a int8, b int8) int8 { return a % b } func ModI16(a int16, b int16) int16 { return a % b } func ModI32(a int32, b int32) int32 { return a % b } func ModI64(a int64, b int64) int64 { return a % b } func ModU(a uint, b uint) uint { return a % b } func ModU8(a uint8, b uint8) uint8 { return a % b } func ModU16(a uint16, b uint16) uint16 { return a % b } func ModU32(a uint32, b uint32) uint32 { return a % b } func ModU64(a uint64, b uint64) uint64 { return a % b } func ModF32(a float32, b float32) float32 { return math32.Mod(a, b) } func ModF64(a float64, b float64) float64 { return math.Mod(a, b) } tensor-0.9.24/internal/execution/generic_arith_mixed.go000066400000000000000000004125531426512615100232740ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "math" "math/cmplx" "github.com/chewxy/math32" ) func AddSVI(a int, b []int) { for i := range b { b[i] = a + b[i] } } func AddSVI8(a int8, b []int8) { for i := range b { b[i] = a + b[i] } } func AddSVI16(a int16, b []int16) { for i := range b { b[i] = a + b[i] } } func AddSVI32(a int32, b []int32) { for i := range b { b[i] = a + b[i] } } func AddSVI64(a int64, b []int64) { for i := range b { b[i] = a + b[i] } } func AddSVU(a uint, b []uint) { for i := range b { b[i] = a + b[i] } } func AddSVU8(a uint8, b []uint8) { for i := range b { b[i] = a + b[i] } } func AddSVU16(a uint16, b []uint16) { for i := range b { b[i] = a + b[i] } } func AddSVU32(a uint32, b []uint32) { for i := range b { b[i] = a + b[i] } } func AddSVU64(a uint64, b []uint64) { for i := range b { b[i] = a + b[i] } } func AddSVF32(a float32, b []float32) { for i := range b { b[i] = a + b[i] } } func AddSVF64(a float64, b []float64) { for i := range b { b[i] = a + b[i] } } func AddSVC64(a complex64, b []complex64) { for i := range b { b[i] = a + b[i] } } func AddSVC128(a complex128, b []complex128) { for i := range b { b[i] = a + b[i] } } func AddSVStr(a string, b []string) { for i := range b { b[i] = a + b[i] } } func SubSVI(a int, b []int) { for i := range b { b[i] = a - b[i] } } func SubSVI8(a int8, b []int8) { for i := range b { b[i] = a - b[i] } } func SubSVI16(a int16, b []int16) { for i := range b { b[i] = a - b[i] } } func SubSVI32(a int32, b []int32) { for i := range b { b[i] = a - b[i] } } func SubSVI64(a int64, b []int64) { for i := range b { b[i] = a - b[i] } } func SubSVU(a uint, b []uint) { for i := range b { b[i] = a - b[i] } } func SubSVU8(a uint8, b []uint8) { for i := range b { b[i] = a - b[i] } } func SubSVU16(a uint16, b []uint16) { for i := range b { b[i] = a - b[i] } } func SubSVU32(a uint32, b []uint32) { for i := range b { b[i] = a - b[i] } } func SubSVU64(a uint64, b []uint64) { for i := range b { b[i] = a - b[i] } } func SubSVF32(a float32, b []float32) { for i := range b { b[i] = a - b[i] } } func SubSVF64(a float64, b []float64) { for i := range b { b[i] = a - b[i] } } func SubSVC64(a complex64, b []complex64) { for i := range b { b[i] = a - b[i] } } func SubSVC128(a complex128, b []complex128) { for i := range b { b[i] = a - b[i] } } func MulSVI(a int, b []int) { for i := range b { b[i] = a * b[i] } } func MulSVI8(a int8, b []int8) { for i := range b { b[i] = a * b[i] } } func MulSVI16(a int16, b []int16) { for i := range b { b[i] = a * b[i] } } func MulSVI32(a int32, b []int32) { for i := range b { b[i] = a * b[i] } } func MulSVI64(a int64, b []int64) { for i := range b { b[i] = a * b[i] } } func MulSVU(a uint, b []uint) { for i := range b { b[i] = a * b[i] } } func MulSVU8(a uint8, b []uint8) { for i := range b { b[i] = a * b[i] } } func MulSVU16(a uint16, b []uint16) { for i := range b { b[i] = a * b[i] } } func MulSVU32(a uint32, b []uint32) { for i := range b { b[i] = a * b[i] } } func MulSVU64(a uint64, b []uint64) { for i := range b { b[i] = a * b[i] } } func MulSVF32(a float32, b []float32) { for i := range b { b[i] = a * b[i] } } func MulSVF64(a float64, b []float64) { for i := range b { b[i] = a * b[i] } } func MulSVC64(a complex64, b []complex64) { for i := range b { b[i] = a * b[i] } } func MulSVC128(a complex128, b []complex128) { for i := range b { b[i] = a * b[i] } } func DivSVI(a int, b []int) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVI8(a int8, b []int8) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVI16(a int16, b []int16) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVI32(a int32, b []int32) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVI64(a int64, b []int64) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVU(a uint, b []uint) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVU8(a uint8, b []uint8) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVU16(a uint16, b []uint16) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVU32(a uint32, b []uint32) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVU64(a uint64, b []uint64) (err error) { var errs errorIndices for i := range b { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivSVF32(a float32, b []float32) { for i := range b { b[i] = a / b[i] } } func DivSVF64(a float64, b []float64) { for i := range b { b[i] = a / b[i] } } func DivSVC64(a complex64, b []complex64) { for i := range b { b[i] = a / b[i] } } func DivSVC128(a complex128, b []complex128) { for i := range b { b[i] = a / b[i] } } func PowSVF32(a float32, b []float32) { for i := range b { b[i] = math32.Pow(a, b[i]) } } func PowSVF64(a float64, b []float64) { for i := range b { b[i] = math.Pow(a, b[i]) } } func PowSVC64(a complex64, b []complex64) { for i := range b { b[i] = complex64(cmplx.Pow(complex128(a), complex128(b[i]))) } } func PowSVC128(a complex128, b []complex128) { for i := range b { b[i] = cmplx.Pow(a, b[i]) } } func ModSVI(a int, b []int) { for i := range b { b[i] = a % b[i] } } func ModSVI8(a int8, b []int8) { for i := range b { b[i] = a % b[i] } } func ModSVI16(a int16, b []int16) { for i := range b { b[i] = a % b[i] } } func ModSVI32(a int32, b []int32) { for i := range b { b[i] = a % b[i] } } func ModSVI64(a int64, b []int64) { for i := range b { b[i] = a % b[i] } } func ModSVU(a uint, b []uint) { for i := range b { b[i] = a % b[i] } } func ModSVU8(a uint8, b []uint8) { for i := range b { b[i] = a % b[i] } } func ModSVU16(a uint16, b []uint16) { for i := range b { b[i] = a % b[i] } } func ModSVU32(a uint32, b []uint32) { for i := range b { b[i] = a % b[i] } } func ModSVU64(a uint64, b []uint64) { for i := range b { b[i] = a % b[i] } } func ModSVF32(a float32, b []float32) { for i := range b { b[i] = math32.Mod(a, b[i]) } } func ModSVF64(a float64, b []float64) { for i := range b { b[i] = math.Mod(a, b[i]) } } func AddIncrSVI(a int, b []int, incr []int) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVI8(a int8, b []int8, incr []int8) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVI16(a int16, b []int16, incr []int16) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVI32(a int32, b []int32, incr []int32) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVI64(a int64, b []int64, incr []int64) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVU(a uint, b []uint, incr []uint) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVU8(a uint8, b []uint8, incr []uint8) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVU16(a uint16, b []uint16, incr []uint16) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVU32(a uint32, b []uint32, incr []uint32) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVU64(a uint64, b []uint64, incr []uint64) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVC64(a complex64, b []complex64, incr []complex64) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVC128(a complex128, b []complex128, incr []complex128) { for i := range incr { incr[i] += a + b[i] } } func AddIncrSVStr(a string, b []string, incr []string) { for i := range incr { incr[i] += a + b[i] } } func SubIncrSVI(a int, b []int, incr []int) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVI8(a int8, b []int8, incr []int8) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVI16(a int16, b []int16, incr []int16) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVI32(a int32, b []int32, incr []int32) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVI64(a int64, b []int64, incr []int64) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVU(a uint, b []uint, incr []uint) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVU8(a uint8, b []uint8, incr []uint8) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVU16(a uint16, b []uint16, incr []uint16) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVU32(a uint32, b []uint32, incr []uint32) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVU64(a uint64, b []uint64, incr []uint64) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVC64(a complex64, b []complex64, incr []complex64) { for i := range incr { incr[i] += a - b[i] } } func SubIncrSVC128(a complex128, b []complex128, incr []complex128) { for i := range incr { incr[i] += a - b[i] } } func MulIncrSVI(a int, b []int, incr []int) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVI8(a int8, b []int8, incr []int8) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVI16(a int16, b []int16, incr []int16) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVI32(a int32, b []int32, incr []int32) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVI64(a int64, b []int64, incr []int64) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVU(a uint, b []uint, incr []uint) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVU8(a uint8, b []uint8, incr []uint8) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVU16(a uint16, b []uint16, incr []uint16) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVU32(a uint32, b []uint32, incr []uint32) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVU64(a uint64, b []uint64, incr []uint64) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVC64(a complex64, b []complex64, incr []complex64) { for i := range incr { incr[i] += a * b[i] } } func MulIncrSVC128(a complex128, b []complex128, incr []complex128) { for i := range incr { incr[i] += a * b[i] } } func DivIncrSVI(a int, b []int, incr []int) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVI8(a int8, b []int8, incr []int8) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVI16(a int16, b []int16, incr []int16) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVI32(a int32, b []int32, incr []int32) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVI64(a int64, b []int64, incr []int64) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVU(a uint, b []uint, incr []uint) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVU8(a uint8, b []uint8, incr []uint8) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVU16(a uint16, b []uint16, incr []uint16) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVU32(a uint32, b []uint32, incr []uint32) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVU64(a uint64, b []uint64, incr []uint64) (err error) { var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += a / b[i] } } func DivIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += a / b[i] } } func DivIncrSVC64(a complex64, b []complex64, incr []complex64) { for i := range incr { incr[i] += a / b[i] } } func DivIncrSVC128(a complex128, b []complex128, incr []complex128) { for i := range incr { incr[i] += a / b[i] } } func PowIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += math32.Pow(a, b[i]) } } func PowIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += math.Pow(a, b[i]) } } func PowIncrSVC64(a complex64, b []complex64, incr []complex64) { for i := range incr { incr[i] += complex64(cmplx.Pow(complex128(a), complex128(b[i]))) } } func PowIncrSVC128(a complex128, b []complex128, incr []complex128) { for i := range incr { incr[i] += cmplx.Pow(a, b[i]) } } func ModIncrSVI(a int, b []int, incr []int) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVI8(a int8, b []int8, incr []int8) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVI16(a int16, b []int16, incr []int16) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVI32(a int32, b []int32, incr []int32) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVI64(a int64, b []int64, incr []int64) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVU(a uint, b []uint, incr []uint) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVU8(a uint8, b []uint8, incr []uint8) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVU16(a uint16, b []uint16, incr []uint16) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVU32(a uint32, b []uint32, incr []uint32) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVU64(a uint64, b []uint64, incr []uint64) { for i := range incr { incr[i] += a % b[i] } } func ModIncrSVF32(a float32, b []float32, incr []float32) { for i := range incr { incr[i] += math32.Mod(a, b[i]) } } func ModIncrSVF64(a float64, b []float64, incr []float64) { for i := range incr { incr[i] += math.Mod(a, b[i]) } } func AddIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func AddIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a + b[i] } } return } func SubIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func SubIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a - b[i] } } return } func MulIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func MulIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a * b[i] } } return } func DivIterSVI(a int, b []int, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVI8(a int8, b []int8, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVI16(a int16, b []int16, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVI32(a int32, b []int32, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVI64(a int64, b []int64, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVU(a uint, b []uint, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b[i] == 0 { errs = append(errs, i) b[i] = 0 continue } b[i] = a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a / b[i] } } return } func DivIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a / b[i] } } return } func DivIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a / b[i] } } return } func DivIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a / b[i] } } return } func PowIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = math32.Pow(a, b[i]) } } return } func PowIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = math.Pow(a, b[i]) } } return } func PowIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = complex64(cmplx.Pow(complex128(a), complex128(b[i]))) } } return } func PowIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = cmplx.Pow(a, b[i]) } } return } func ModIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = a % b[i] } } return } func ModIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = math32.Mod(a, b[i]) } } return } func ModIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { b[i] = math.Mod(a, b[i]) } } return } func AddIterIncrSVI(a int, b []int, incr []int, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVI8(a int8, b []int8, incr []int8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVI16(a int16, b []int16, incr []int16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVI32(a int32, b []int32, incr []int32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVI64(a int64, b []int64, incr []int64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVU(a uint, b []uint, incr []uint, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVU8(a uint8, b []uint8, incr []uint8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVU16(a uint16, b []uint16, incr []uint16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVU32(a uint32, b []uint32, incr []uint32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVU64(a uint64, b []uint64, incr []uint64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVC64(a complex64, b []complex64, incr []complex64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVC128(a complex128, b []complex128, incr []complex128, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func AddIterIncrSVStr(a string, b []string, incr []string, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a + b[i] } } return } func SubIterIncrSVI(a int, b []int, incr []int, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVI8(a int8, b []int8, incr []int8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVI16(a int16, b []int16, incr []int16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVI32(a int32, b []int32, incr []int32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVI64(a int64, b []int64, incr []int64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVU(a uint, b []uint, incr []uint, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVU8(a uint8, b []uint8, incr []uint8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVU16(a uint16, b []uint16, incr []uint16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVU32(a uint32, b []uint32, incr []uint32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVU64(a uint64, b []uint64, incr []uint64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVC64(a complex64, b []complex64, incr []complex64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func SubIterIncrSVC128(a complex128, b []complex128, incr []complex128, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a - b[i] } } return } func MulIterIncrSVI(a int, b []int, incr []int, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVI8(a int8, b []int8, incr []int8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVI16(a int16, b []int16, incr []int16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVI32(a int32, b []int32, incr []int32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVI64(a int64, b []int64, incr []int64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVU(a uint, b []uint, incr []uint, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVU8(a uint8, b []uint8, incr []uint8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVU16(a uint16, b []uint16, incr []uint16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVU32(a uint32, b []uint32, incr []uint32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVU64(a uint64, b []uint64, incr []uint64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVC64(a complex64, b []complex64, incr []complex64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func MulIterIncrSVC128(a complex128, b []complex128, incr []complex128, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a * b[i] } } return } func DivIterIncrSVI(a int, b []int, incr []int, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVI8(a int8, b []int8, incr []int8, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVI16(a int16, b []int16, incr []int16, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVI32(a int32, b []int32, incr []int32, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVI64(a int64, b []int64, incr []int64, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVU(a uint, b []uint, incr []uint, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVU8(a uint8, b []uint8, incr []uint8, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVU16(a uint16, b []uint16, incr []uint16, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVU32(a uint32, b []uint32, incr []uint32, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVU64(a uint64, b []uint64, incr []uint64, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a / b[i] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a / b[i] } } return } func DivIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a / b[i] } } return } func DivIterIncrSVC64(a complex64, b []complex64, incr []complex64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a / b[i] } } return } func DivIterIncrSVC128(a complex128, b []complex128, incr []complex128, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a / b[i] } } return } func PowIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math32.Pow(a, b[i]) } } return } func PowIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math.Pow(a, b[i]) } } return } func PowIterIncrSVC64(a complex64, b []complex64, incr []complex64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += complex64(cmplx.Pow(complex128(a), complex128(b[i]))) } } return } func PowIterIncrSVC128(a complex128, b []complex128, incr []complex128, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += cmplx.Pow(a, b[i]) } } return } func ModIterIncrSVI(a int, b []int, incr []int, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVI8(a int8, b []int8, incr []int8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVI16(a int16, b []int16, incr []int16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVI32(a int32, b []int32, incr []int32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVI64(a int64, b []int64, incr []int64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVU(a uint, b []uint, incr []uint, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVU8(a uint8, b []uint8, incr []uint8, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVU16(a uint16, b []uint16, incr []uint16, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVU32(a uint32, b []uint32, incr []uint32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVU64(a uint64, b []uint64, incr []uint64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a % b[i] } } return } func ModIterIncrSVF32(a float32, b []float32, incr []float32, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math32.Mod(a, b[i]) } } return } func ModIterIncrSVF64(a float64, b []float64, incr []float64, bit Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math.Mod(a, b[i]) } } return } func AddVSI(a []int, b int) { for i := range a { a[i] = a[i] + b } } func AddVSI8(a []int8, b int8) { for i := range a { a[i] = a[i] + b } } func AddVSI16(a []int16, b int16) { for i := range a { a[i] = a[i] + b } } func AddVSI32(a []int32, b int32) { for i := range a { a[i] = a[i] + b } } func AddVSI64(a []int64, b int64) { for i := range a { a[i] = a[i] + b } } func AddVSU(a []uint, b uint) { for i := range a { a[i] = a[i] + b } } func AddVSU8(a []uint8, b uint8) { for i := range a { a[i] = a[i] + b } } func AddVSU16(a []uint16, b uint16) { for i := range a { a[i] = a[i] + b } } func AddVSU32(a []uint32, b uint32) { for i := range a { a[i] = a[i] + b } } func AddVSU64(a []uint64, b uint64) { for i := range a { a[i] = a[i] + b } } func AddVSF32(a []float32, b float32) { for i := range a { a[i] = a[i] + b } } func AddVSF64(a []float64, b float64) { for i := range a { a[i] = a[i] + b } } func AddVSC64(a []complex64, b complex64) { for i := range a { a[i] = a[i] + b } } func AddVSC128(a []complex128, b complex128) { for i := range a { a[i] = a[i] + b } } func AddVSStr(a []string, b string) { for i := range a { a[i] = a[i] + b } } func SubVSI(a []int, b int) { for i := range a { a[i] = a[i] - b } } func SubVSI8(a []int8, b int8) { for i := range a { a[i] = a[i] - b } } func SubVSI16(a []int16, b int16) { for i := range a { a[i] = a[i] - b } } func SubVSI32(a []int32, b int32) { for i := range a { a[i] = a[i] - b } } func SubVSI64(a []int64, b int64) { for i := range a { a[i] = a[i] - b } } func SubVSU(a []uint, b uint) { for i := range a { a[i] = a[i] - b } } func SubVSU8(a []uint8, b uint8) { for i := range a { a[i] = a[i] - b } } func SubVSU16(a []uint16, b uint16) { for i := range a { a[i] = a[i] - b } } func SubVSU32(a []uint32, b uint32) { for i := range a { a[i] = a[i] - b } } func SubVSU64(a []uint64, b uint64) { for i := range a { a[i] = a[i] - b } } func SubVSF32(a []float32, b float32) { for i := range a { a[i] = a[i] - b } } func SubVSF64(a []float64, b float64) { for i := range a { a[i] = a[i] - b } } func SubVSC64(a []complex64, b complex64) { for i := range a { a[i] = a[i] - b } } func SubVSC128(a []complex128, b complex128) { for i := range a { a[i] = a[i] - b } } func MulVSI(a []int, b int) { for i := range a { a[i] = a[i] * b } } func MulVSI8(a []int8, b int8) { for i := range a { a[i] = a[i] * b } } func MulVSI16(a []int16, b int16) { for i := range a { a[i] = a[i] * b } } func MulVSI32(a []int32, b int32) { for i := range a { a[i] = a[i] * b } } func MulVSI64(a []int64, b int64) { for i := range a { a[i] = a[i] * b } } func MulVSU(a []uint, b uint) { for i := range a { a[i] = a[i] * b } } func MulVSU8(a []uint8, b uint8) { for i := range a { a[i] = a[i] * b } } func MulVSU16(a []uint16, b uint16) { for i := range a { a[i] = a[i] * b } } func MulVSU32(a []uint32, b uint32) { for i := range a { a[i] = a[i] * b } } func MulVSU64(a []uint64, b uint64) { for i := range a { a[i] = a[i] * b } } func MulVSF32(a []float32, b float32) { for i := range a { a[i] = a[i] * b } } func MulVSF64(a []float64, b float64) { for i := range a { a[i] = a[i] * b } } func MulVSC64(a []complex64, b complex64) { for i := range a { a[i] = a[i] * b } } func MulVSC128(a []complex128, b complex128) { for i := range a { a[i] = a[i] * b } } func DivVSI(a []int, b int) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSI8(a []int8, b int8) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSI16(a []int16, b int16) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSI32(a []int32, b int32) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSI64(a []int64, b int64) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSU(a []uint, b uint) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSU8(a []uint8, b uint8) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSU16(a []uint16, b uint16) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSU32(a []uint32, b uint32) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSU64(a []uint64, b uint64) (err error) { var errs errorIndices for i := range a { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivVSF32(a []float32, b float32) { for i := range a { a[i] = a[i] / b } } func DivVSF64(a []float64, b float64) { for i := range a { a[i] = a[i] / b } } func DivVSC64(a []complex64, b complex64) { for i := range a { a[i] = a[i] / b } } func DivVSC128(a []complex128, b complex128) { for i := range a { a[i] = a[i] / b } } func PowVSF32(a []float32, b float32) { for i := range a { a[i] = math32.Pow(a[i], b) } } func PowVSF64(a []float64, b float64) { for i := range a { a[i] = math.Pow(a[i], b) } } func PowVSC64(a []complex64, b complex64) { for i := range a { a[i] = complex64(cmplx.Pow(complex128(a[i]), complex128(b))) } } func PowVSC128(a []complex128, b complex128) { for i := range a { a[i] = cmplx.Pow(a[i], b) } } func ModVSI(a []int, b int) { for i := range a { a[i] = a[i] % b } } func ModVSI8(a []int8, b int8) { for i := range a { a[i] = a[i] % b } } func ModVSI16(a []int16, b int16) { for i := range a { a[i] = a[i] % b } } func ModVSI32(a []int32, b int32) { for i := range a { a[i] = a[i] % b } } func ModVSI64(a []int64, b int64) { for i := range a { a[i] = a[i] % b } } func ModVSU(a []uint, b uint) { for i := range a { a[i] = a[i] % b } } func ModVSU8(a []uint8, b uint8) { for i := range a { a[i] = a[i] % b } } func ModVSU16(a []uint16, b uint16) { for i := range a { a[i] = a[i] % b } } func ModVSU32(a []uint32, b uint32) { for i := range a { a[i] = a[i] % b } } func ModVSU64(a []uint64, b uint64) { for i := range a { a[i] = a[i] % b } } func ModVSF32(a []float32, b float32) { for i := range a { a[i] = math32.Mod(a[i], b) } } func ModVSF64(a []float64, b float64) { for i := range a { a[i] = math.Mod(a[i], b) } } func AddIncrVSI(a []int, b int, incr []int) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSI8(a []int8, b int8, incr []int8) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSI16(a []int16, b int16, incr []int16) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSI32(a []int32, b int32, incr []int32) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSI64(a []int64, b int64, incr []int64) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSU(a []uint, b uint, incr []uint) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSU8(a []uint8, b uint8, incr []uint8) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSU16(a []uint16, b uint16, incr []uint16) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSU32(a []uint32, b uint32, incr []uint32) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSU64(a []uint64, b uint64, incr []uint64) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSC64(a []complex64, b complex64, incr []complex64) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSC128(a []complex128, b complex128, incr []complex128) { for i := range incr { incr[i] += a[i] + b } } func AddIncrVSStr(a []string, b string, incr []string) { for i := range incr { incr[i] += a[i] + b } } func SubIncrVSI(a []int, b int, incr []int) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSI8(a []int8, b int8, incr []int8) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSI16(a []int16, b int16, incr []int16) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSI32(a []int32, b int32, incr []int32) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSI64(a []int64, b int64, incr []int64) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSU(a []uint, b uint, incr []uint) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSU8(a []uint8, b uint8, incr []uint8) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSU16(a []uint16, b uint16, incr []uint16) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSU32(a []uint32, b uint32, incr []uint32) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSU64(a []uint64, b uint64, incr []uint64) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSC64(a []complex64, b complex64, incr []complex64) { for i := range incr { incr[i] += a[i] - b } } func SubIncrVSC128(a []complex128, b complex128, incr []complex128) { for i := range incr { incr[i] += a[i] - b } } func MulIncrVSI(a []int, b int, incr []int) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSI8(a []int8, b int8, incr []int8) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSI16(a []int16, b int16, incr []int16) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSI32(a []int32, b int32, incr []int32) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSI64(a []int64, b int64, incr []int64) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSU(a []uint, b uint, incr []uint) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSU8(a []uint8, b uint8, incr []uint8) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSU16(a []uint16, b uint16, incr []uint16) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSU32(a []uint32, b uint32, incr []uint32) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSU64(a []uint64, b uint64, incr []uint64) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSC64(a []complex64, b complex64, incr []complex64) { for i := range incr { incr[i] += a[i] * b } } func MulIncrVSC128(a []complex128, b complex128, incr []complex128) { for i := range incr { incr[i] += a[i] * b } } func DivIncrVSI(a []int, b int, incr []int) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSI8(a []int8, b int8, incr []int8) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSI16(a []int16, b int16, incr []int16) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSI32(a []int32, b int32, incr []int32) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSI64(a []int64, b int64, incr []int64) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSU(a []uint, b uint, incr []uint) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSU8(a []uint8, b uint8, incr []uint8) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSU16(a []uint16, b uint16, incr []uint16) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSU32(a []uint32, b uint32, incr []uint32) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSU64(a []uint64, b uint64, incr []uint64) (err error) { var errs errorIndices for i := range incr { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += a[i] / b } } func DivIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += a[i] / b } } func DivIncrVSC64(a []complex64, b complex64, incr []complex64) { for i := range incr { incr[i] += a[i] / b } } func DivIncrVSC128(a []complex128, b complex128, incr []complex128) { for i := range incr { incr[i] += a[i] / b } } func PowIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += math32.Pow(a[i], b) } } func PowIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += math.Pow(a[i], b) } } func PowIncrVSC64(a []complex64, b complex64, incr []complex64) { for i := range incr { incr[i] += complex64(cmplx.Pow(complex128(a[i]), complex128(b))) } } func PowIncrVSC128(a []complex128, b complex128, incr []complex128) { for i := range incr { incr[i] += cmplx.Pow(a[i], b) } } func ModIncrVSI(a []int, b int, incr []int) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSI8(a []int8, b int8, incr []int8) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSI16(a []int16, b int16, incr []int16) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSI32(a []int32, b int32, incr []int32) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSI64(a []int64, b int64, incr []int64) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSU(a []uint, b uint, incr []uint) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSU8(a []uint8, b uint8, incr []uint8) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSU16(a []uint16, b uint16, incr []uint16) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSU32(a []uint32, b uint32, incr []uint32) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSU64(a []uint64, b uint64, incr []uint64) { for i := range incr { incr[i] += a[i] % b } } func ModIncrVSF32(a []float32, b float32, incr []float32) { for i := range incr { incr[i] += math32.Mod(a[i], b) } } func ModIncrVSF64(a []float64, b float64, incr []float64) { for i := range incr { incr[i] += math.Mod(a[i], b) } } func AddIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func AddIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] + b } } return } func SubIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func SubIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] - b } } return } func MulIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func MulIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * b } } return } func DivIterVSI(a []int, b int, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSI8(a []int8, b int8, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSI16(a []int16, b int16, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSI32(a []int32, b int32, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSI64(a []int64, b int64, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSU(a []uint, b uint, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var errs errorIndices var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] / b } } return } func DivIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] / b } } return } func DivIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] / b } } return } func DivIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] / b } } return } func PowIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Pow(a[i], b) } } return } func PowIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Pow(a[i], b) } } return } func PowIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Pow(complex128(a[i]), complex128(b))) } } return } func PowIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Pow(a[i], b) } } return } func ModIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] % b } } return } func ModIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Mod(a[i], b) } } return } func ModIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Mod(a[i], b) } } return } func AddIterIncrVSI(a []int, b int, incr []int, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSI8(a []int8, b int8, incr []int8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSI16(a []int16, b int16, incr []int16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSI32(a []int32, b int32, incr []int32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSI64(a []int64, b int64, incr []int64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSU(a []uint, b uint, incr []uint, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSU8(a []uint8, b uint8, incr []uint8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSU16(a []uint16, b uint16, incr []uint16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSU32(a []uint32, b uint32, incr []uint32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSU64(a []uint64, b uint64, incr []uint64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSC64(a []complex64, b complex64, incr []complex64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSC128(a []complex128, b complex128, incr []complex128, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func AddIterIncrVSStr(a []string, b string, incr []string, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] + b } } return } func SubIterIncrVSI(a []int, b int, incr []int, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSI8(a []int8, b int8, incr []int8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSI16(a []int16, b int16, incr []int16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSI32(a []int32, b int32, incr []int32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSI64(a []int64, b int64, incr []int64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSU(a []uint, b uint, incr []uint, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSU8(a []uint8, b uint8, incr []uint8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSU16(a []uint16, b uint16, incr []uint16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSU32(a []uint32, b uint32, incr []uint32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSU64(a []uint64, b uint64, incr []uint64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSC64(a []complex64, b complex64, incr []complex64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func SubIterIncrVSC128(a []complex128, b complex128, incr []complex128, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] - b } } return } func MulIterIncrVSI(a []int, b int, incr []int, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSI8(a []int8, b int8, incr []int8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSI16(a []int16, b int16, incr []int16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSI32(a []int32, b int32, incr []int32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSI64(a []int64, b int64, incr []int64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSU(a []uint, b uint, incr []uint, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSU8(a []uint8, b uint8, incr []uint8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSU16(a []uint16, b uint16, incr []uint16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSU32(a []uint32, b uint32, incr []uint32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSU64(a []uint64, b uint64, incr []uint64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSC64(a []complex64, b complex64, incr []complex64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func MulIterIncrVSC128(a []complex128, b complex128, incr []complex128, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] * b } } return } func DivIterIncrVSI(a []int, b int, incr []int, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSI8(a []int8, b int8, incr []int8, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSI16(a []int16, b int16, incr []int16, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSI32(a []int32, b int32, incr []int32, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSI64(a []int64, b int64, incr []int64, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSU(a []uint, b uint, incr []uint, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSU8(a []uint8, b uint8, incr []uint8, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSU16(a []uint16, b uint16, incr []uint16, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSU32(a []uint32, b uint32, incr []uint32, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSU64(a []uint64, b uint64, incr []uint64, ait Iterator, iit Iterator) (err error) { var errs errorIndices var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { if b == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] / b } } return } func DivIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] / b } } return } func DivIterIncrVSC64(a []complex64, b complex64, incr []complex64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] / b } } return } func DivIterIncrVSC128(a []complex128, b complex128, incr []complex128, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] / b } } return } func PowIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math32.Pow(a[i], b) } } return } func PowIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math.Pow(a[i], b) } } return } func PowIterIncrVSC64(a []complex64, b complex64, incr []complex64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += complex64(cmplx.Pow(complex128(a[i]), complex128(b))) } } return } func PowIterIncrVSC128(a []complex128, b complex128, incr []complex128, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += cmplx.Pow(a[i], b) } } return } func ModIterIncrVSI(a []int, b int, incr []int, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSI8(a []int8, b int8, incr []int8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSI16(a []int16, b int16, incr []int16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSI32(a []int32, b int32, incr []int32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSI64(a []int64, b int64, incr []int64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSU(a []uint, b uint, incr []uint, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSU8(a []uint8, b uint8, incr []uint8, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSU16(a []uint16, b uint16, incr []uint16, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSU32(a []uint32, b uint32, incr []uint32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSU64(a []uint64, b uint64, incr []uint64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += a[i] % b } } return } func ModIterIncrVSF32(a []float32, b float32, incr []float32, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math32.Mod(a[i], b) } } return } func ModIterIncrVSF64(a []float64, b float64, incr []float64, ait Iterator, iit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { incr[k] += math.Mod(a[i], b) } } return } tensor-0.9.24/internal/execution/generic_arith_vv.go000066400000000000000000003144531426512615100226210ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "math" "math/cmplx" "github.com/chewxy/math32" "gorgonia.org/vecf32" "gorgonia.org/vecf64" ) func VecAddI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddF32(a []float32, b []float32) { vecf32.Add(a, b) } func VecAddF64(a []float64, b []float64) { vecf64.Add(a, b) } func VecAddC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecAddStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] + b[i] } } func VecSubI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubF32(a []float32, b []float32) { vecf32.Sub(a, b) } func VecSubF64(a []float64, b []float64) { vecf64.Sub(a, b) } func VecSubC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecSubC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] - b[i] } } func VecMulI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulF32(a []float32, b []float32) { vecf32.Mul(a, b) } func VecMulF64(a []float64, b []float64) { vecf64.Mul(a, b) } func VecMulC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecMulC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] * b[i] } } func VecDivI(a []int, b []int) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivI8(a []int8, b []int8) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivI16(a []int16, b []int16) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivI32(a []int32, b []int32) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivI64(a []int64, b []int64) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivU(a []uint, b []uint) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivU8(a []uint8, b []uint8) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivU16(a []uint16, b []uint16) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivU32(a []uint32, b []uint32) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivU64(a []uint64, b []uint64) (err error) { a = a[:] b = b[:len(a)] var errs errorIndices for i := range a { if b[i] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func VecDivF32(a []float32, b []float32) { vecf32.Div(a, b) } func VecDivF64(a []float64, b []float64) { vecf64.Div(a, b) } func VecDivC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] / b[i] } } func VecDivC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] / b[i] } } func VecPowF32(a []float32, b []float32) { vecf32.Pow(a, b) } func VecPowF64(a []float64, b []float64) { vecf64.Pow(a, b) } func VecPowC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = complex64(cmplx.Pow(complex128(a[i]), complex128(b[i]))) } } func VecPowC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { a[i] = cmplx.Pow(a[i], b[i]) } } func VecModI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { a[i] = a[i] % b[i] } } func VecModF32(a []float32, b []float32) { vecf32.Mod(a, b) } func VecModF64(a []float64, b []float64) { vecf64.Mod(a, b) } func AddIncrI(a []int, b []int, incr []int) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrI8(a []int8, b []int8, incr []int8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrI16(a []int16, b []int16, incr []int16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrI32(a []int32, b []int32, incr []int32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrI64(a []int64, b []int64, incr []int64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrU(a []uint, b []uint, incr []uint) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrU8(a []uint8, b []uint8, incr []uint8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrU16(a []uint16, b []uint16, incr []uint16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrU32(a []uint32, b []uint32, incr []uint32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrU64(a []uint64, b []uint64, incr []uint64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrAdd(a, b, incr) } func AddIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrAdd(a, b, incr) } func AddIncrC64(a []complex64, b []complex64, incr []complex64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrC128(a []complex128, b []complex128, incr []complex128) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func AddIncrStr(a []string, b []string, incr []string) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] + b[i] } } func SubIncrI(a []int, b []int, incr []int) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrI8(a []int8, b []int8, incr []int8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrI16(a []int16, b []int16, incr []int16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrI32(a []int32, b []int32, incr []int32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrI64(a []int64, b []int64, incr []int64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrU(a []uint, b []uint, incr []uint) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrU8(a []uint8, b []uint8, incr []uint8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrU16(a []uint16, b []uint16, incr []uint16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrU32(a []uint32, b []uint32, incr []uint32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrU64(a []uint64, b []uint64, incr []uint64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrSub(a, b, incr) } func SubIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrSub(a, b, incr) } func SubIncrC64(a []complex64, b []complex64, incr []complex64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func SubIncrC128(a []complex128, b []complex128, incr []complex128) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] - b[i] } } func MulIncrI(a []int, b []int, incr []int) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrI8(a []int8, b []int8, incr []int8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrI16(a []int16, b []int16, incr []int16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrI32(a []int32, b []int32, incr []int32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrI64(a []int64, b []int64, incr []int64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrU(a []uint, b []uint, incr []uint) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrU8(a []uint8, b []uint8, incr []uint8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrU16(a []uint16, b []uint16, incr []uint16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrU32(a []uint32, b []uint32, incr []uint32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrU64(a []uint64, b []uint64, incr []uint64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrMul(a, b, incr) } func MulIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrMul(a, b, incr) } func MulIncrC64(a []complex64, b []complex64, incr []complex64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func MulIncrC128(a []complex128, b []complex128, incr []complex128) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] * b[i] } } func DivIncrI(a []int, b []int, incr []int) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrI8(a []int8, b []int8, incr []int8) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrI16(a []int16, b []int16, incr []int16) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrI32(a []int32, b []int32, incr []int32) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrI64(a []int64, b []int64, incr []int64) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrU(a []uint, b []uint, incr []uint) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrU8(a []uint8, b []uint8, incr []uint8) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrU16(a []uint16, b []uint16, incr []uint16) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrU32(a []uint32, b []uint32, incr []uint32) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrU64(a []uint64, b []uint64, incr []uint64) (err error) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] var errs errorIndices for i := range incr { if b[i] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[i] += a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrDiv(a, b, incr) } func DivIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrDiv(a, b, incr) } func DivIncrC64(a []complex64, b []complex64, incr []complex64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] / b[i] } } func DivIncrC128(a []complex128, b []complex128, incr []complex128) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] / b[i] } } func PowIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrPow(a, b, incr) } func PowIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrPow(a, b, incr) } func PowIncrC64(a []complex64, b []complex64, incr []complex64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += complex64(cmplx.Pow(complex128(a[i]), complex128(b[i]))) } } func PowIncrC128(a []complex128, b []complex128, incr []complex128) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += cmplx.Pow(a[i], b[i]) } } func ModIncrI(a []int, b []int, incr []int) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrI8(a []int8, b []int8, incr []int8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrI16(a []int16, b []int16, incr []int16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrI32(a []int32, b []int32, incr []int32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrI64(a []int64, b []int64, incr []int64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrU(a []uint, b []uint, incr []uint) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrU8(a []uint8, b []uint8, incr []uint8) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrU16(a []uint16, b []uint16, incr []uint16) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrU32(a []uint32, b []uint32, incr []uint32) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrU64(a []uint64, b []uint64, incr []uint64) { a = a[:] b = b[:len(a)] incr = incr[:len(a)] for i := range incr { incr[i] += a[i] % b[i] } } func ModIncrF32(a []float32, b []float32, incr []float32) { vecf32.IncrMod(a, b, incr) } func ModIncrF64(a []float64, b []float64, incr []float64) { vecf64.IncrMod(a, b, incr) } func AddIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func AddIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] + b[j] } } return } func SubIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func SubIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] - b[j] } } return } func MulIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func MulIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] * b[j] } } return } func DivIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var errs errorIndices var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] == 0 { errs = append(errs, i) a[i] = 0 continue } a[i] = a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] / b[j] } } return } func DivIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] / b[j] } } return } func DivIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] / b[j] } } return } func DivIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] / b[j] } } return } func PowIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = math32.Pow(a[i], b[j]) } } return } func PowIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = math.Pow(a[i], b[j]) } } return } func PowIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = complex64(cmplx.Pow(complex128(a[i]), complex128(b[j]))) } } return } func PowIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = cmplx.Pow(a[i], b[j]) } } return } func ModIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = a[i] % b[j] } } return } func ModIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = math32.Mod(a[i], b[j]) } } return } func ModIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { a[i] = math.Mod(a[i], b[j]) } } return } func AddIterIncrI(a []int, b []int, incr []int, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrI8(a []int8, b []int8, incr []int8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrI16(a []int16, b []int16, incr []int16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrI32(a []int32, b []int32, incr []int32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrI64(a []int64, b []int64, incr []int64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrU(a []uint, b []uint, incr []uint, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrU8(a []uint8, b []uint8, incr []uint8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrU16(a []uint16, b []uint16, incr []uint16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrU32(a []uint32, b []uint32, incr []uint32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrU64(a []uint64, b []uint64, incr []uint64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrC64(a []complex64, b []complex64, incr []complex64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrC128(a []complex128, b []complex128, incr []complex128, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func AddIterIncrStr(a []string, b []string, incr []string, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] + b[j] } } return } func SubIterIncrI(a []int, b []int, incr []int, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrI8(a []int8, b []int8, incr []int8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrI16(a []int16, b []int16, incr []int16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrI32(a []int32, b []int32, incr []int32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrI64(a []int64, b []int64, incr []int64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrU(a []uint, b []uint, incr []uint, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrU8(a []uint8, b []uint8, incr []uint8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrU16(a []uint16, b []uint16, incr []uint16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrU32(a []uint32, b []uint32, incr []uint32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrU64(a []uint64, b []uint64, incr []uint64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrC64(a []complex64, b []complex64, incr []complex64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func SubIterIncrC128(a []complex128, b []complex128, incr []complex128, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] - b[j] } } return } func MulIterIncrI(a []int, b []int, incr []int, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrI8(a []int8, b []int8, incr []int8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrI16(a []int16, b []int16, incr []int16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrI32(a []int32, b []int32, incr []int32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrI64(a []int64, b []int64, incr []int64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrU(a []uint, b []uint, incr []uint, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrU8(a []uint8, b []uint8, incr []uint8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrU16(a []uint16, b []uint16, incr []uint16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrU32(a []uint32, b []uint32, incr []uint32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrU64(a []uint64, b []uint64, incr []uint64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrC64(a []complex64, b []complex64, incr []complex64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func MulIterIncrC128(a []complex128, b []complex128, incr []complex128, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] * b[j] } } return } func DivIterIncrI(a []int, b []int, incr []int, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrI8(a []int8, b []int8, incr []int8, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrI16(a []int16, b []int16, incr []int16, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrI32(a []int32, b []int32, incr []int32, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrI64(a []int64, b []int64, incr []int64, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrU(a []uint, b []uint, incr []uint, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrU8(a []uint8, b []uint8, incr []uint8, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrU16(a []uint16, b []uint16, incr []uint16, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrU32(a []uint32, b []uint32, incr []uint32, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrU64(a []uint64, b []uint64, incr []uint64, ait Iterator, bit Iterator, iit Iterator) (err error) { var errs errorIndices var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { if b[j] == 0 { errs = append(errs, i) incr[i] = 0 continue } incr[k] += a[i] / b[j] } } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] / b[j] } } return } func DivIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] / b[j] } } return } func DivIterIncrC64(a []complex64, b []complex64, incr []complex64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] / b[j] } } return } func DivIterIncrC128(a []complex128, b []complex128, incr []complex128, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] / b[j] } } return } func PowIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += math32.Pow(a[i], b[j]) } } return } func PowIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += math.Pow(a[i], b[j]) } } return } func PowIterIncrC64(a []complex64, b []complex64, incr []complex64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += complex64(cmplx.Pow(complex128(a[i]), complex128(b[j]))) } } return } func PowIterIncrC128(a []complex128, b []complex128, incr []complex128, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += cmplx.Pow(a[i], b[j]) } } return } func ModIterIncrI(a []int, b []int, incr []int, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrI8(a []int8, b []int8, incr []int8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrI16(a []int16, b []int16, incr []int16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrI32(a []int32, b []int32, incr []int32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrI64(a []int64, b []int64, incr []int64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrU(a []uint, b []uint, incr []uint, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrU8(a []uint8, b []uint8, incr []uint8, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrU16(a []uint16, b []uint16, incr []uint16, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrU32(a []uint32, b []uint32, incr []uint32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrU64(a []uint64, b []uint64, incr []uint64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += a[i] % b[j] } } return } func ModIterIncrF32(a []float32, b []float32, incr []float32, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += math32.Mod(a[i], b[j]) } } return } func ModIterIncrF64(a []float64, b []float64, incr []float64, ait Iterator, bit Iterator, iit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = iit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { incr[k] += math.Mod(a[i], b[j]) } } return } func AddRecvI(a []int, b []int, recv []int) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvI8(a []int8, b []int8, recv []int8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvI16(a []int16, b []int16, recv []int16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvI32(a []int32, b []int32, recv []int32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvI64(a []int64, b []int64, recv []int64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvU(a []uint, b []uint, recv []uint) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvU8(a []uint8, b []uint8, recv []uint8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvU16(a []uint16, b []uint16, recv []uint16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvU32(a []uint32, b []uint32, recv []uint32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvU64(a []uint64, b []uint64, recv []uint64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvC64(a []complex64, b []complex64, recv []complex64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvC128(a []complex128, b []complex128, recv []complex128) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func AddRecvStr(a []string, b []string, recv []string) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] + b[i] } } func SubRecvI(a []int, b []int, recv []int) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvI8(a []int8, b []int8, recv []int8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvI16(a []int16, b []int16, recv []int16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvI32(a []int32, b []int32, recv []int32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvI64(a []int64, b []int64, recv []int64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvU(a []uint, b []uint, recv []uint) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvU8(a []uint8, b []uint8, recv []uint8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvU16(a []uint16, b []uint16, recv []uint16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvU32(a []uint32, b []uint32, recv []uint32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvU64(a []uint64, b []uint64, recv []uint64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvC64(a []complex64, b []complex64, recv []complex64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func SubRecvC128(a []complex128, b []complex128, recv []complex128) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] - b[i] } } func MulRecvI(a []int, b []int, recv []int) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvI8(a []int8, b []int8, recv []int8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvI16(a []int16, b []int16, recv []int16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvI32(a []int32, b []int32, recv []int32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvI64(a []int64, b []int64, recv []int64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvU(a []uint, b []uint, recv []uint) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvU8(a []uint8, b []uint8, recv []uint8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvU16(a []uint16, b []uint16, recv []uint16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvU32(a []uint32, b []uint32, recv []uint32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvU64(a []uint64, b []uint64, recv []uint64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvC64(a []complex64, b []complex64, recv []complex64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func MulRecvC128(a []complex128, b []complex128, recv []complex128) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] * b[i] } } func DivRecvI(a []int, b []int, recv []int) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvI8(a []int8, b []int8, recv []int8) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvI16(a []int16, b []int16, recv []int16) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvI32(a []int32, b []int32, recv []int32) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvI64(a []int64, b []int64, recv []int64) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvU(a []uint, b []uint, recv []uint) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvU8(a []uint8, b []uint8, recv []uint8) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvU16(a []uint16, b []uint16, recv []uint16) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvU32(a []uint32, b []uint32, recv []uint32) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvU64(a []uint64, b []uint64, recv []uint64) (err error) { a = a[:len(recv)] b = b[:len(recv)] var errs errorIndices for i := range recv { if b[i] == 0 { errs = append(errs, i) recv[i] = 0 continue } recv[i] = a[i] / b[i] } if err != nil { return } if len(errs) > 0 { return errs } return nil } func DivRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] / b[i] } } func DivRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] / b[i] } } func DivRecvC64(a []complex64, b []complex64, recv []complex64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] / b[i] } } func DivRecvC128(a []complex128, b []complex128, recv []complex128) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] / b[i] } } func PowRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = math32.Pow(a[i], b[i]) } } func PowRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = math.Pow(a[i], b[i]) } } func PowRecvC64(a []complex64, b []complex64, recv []complex64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = complex64(cmplx.Pow(complex128(a[i]), complex128(b[i]))) } } func PowRecvC128(a []complex128, b []complex128, recv []complex128) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = cmplx.Pow(a[i], b[i]) } } func ModRecvI(a []int, b []int, recv []int) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvI8(a []int8, b []int8, recv []int8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvI16(a []int16, b []int16, recv []int16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvI32(a []int32, b []int32, recv []int32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvI64(a []int64, b []int64, recv []int64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvU(a []uint, b []uint, recv []uint) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvU8(a []uint8, b []uint8, recv []uint8) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvU16(a []uint16, b []uint16, recv []uint16) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvU32(a []uint32, b []uint32, recv []uint32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvU64(a []uint64, b []uint64, recv []uint64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = a[i] % b[i] } } func ModRecvF32(a []float32, b []float32, recv []float32) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = math32.Mod(a[i], b[i]) } } func ModRecvF64(a []float64, b []float64, recv []float64) { a = a[:len(recv)] b = b[:len(recv)] for i := range recv { recv[i] = math.Mod(a[i], b[i]) } } tensor-0.9.24/internal/execution/generic_cmp_mixed.go000066400000000000000000004673031426512615100227470ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import "unsafe" func GtSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GtSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a > b[i] } } func GteSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func GteSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a >= b[i] } } func LtSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LtSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a < b[i] } } func LteSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func LteSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a <= b[i] } } func EqSVB(a bool, b []bool, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVUintptr(a uintptr, b []uintptr, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVC64(a complex64, b []complex64, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVC128(a complex128, b []complex128, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func EqSVUnsafePointer(a unsafe.Pointer, b []unsafe.Pointer, retVal []bool) { for i := range retVal { retVal[i] = a == b[i] } } func NeSVB(a bool, b []bool, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVI(a int, b []int, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVI8(a int8, b []int8, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVI16(a int16, b []int16, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVI32(a int32, b []int32, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVI64(a int64, b []int64, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVU(a uint, b []uint, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVU8(a uint8, b []uint8, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVU16(a uint16, b []uint16, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVU32(a uint32, b []uint32, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVU64(a uint64, b []uint64, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVUintptr(a uintptr, b []uintptr, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVF32(a float32, b []float32, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVF64(a float64, b []float64, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVC64(a complex64, b []complex64, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVC128(a complex128, b []complex128, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVStr(a string, b []string, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func NeSVUnsafePointer(a unsafe.Pointer, b []unsafe.Pointer, retVal []bool) { for i := range retVal { retVal[i] = a != b[i] } } func GtSameSVI(a int, b []int) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVI8(a int8, b []int8) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVI16(a int16, b []int16) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVI32(a int32, b []int32) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVI64(a int64, b []int64) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVU(a uint, b []uint) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVU8(a uint8, b []uint8) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVU16(a uint16, b []uint16) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVU32(a uint32, b []uint32) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVU64(a uint64, b []uint64) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVF32(a float32, b []float32) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVF64(a float64, b []float64) { for i := range b { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } func GtSameSVStr(a string, b []string) { for i := range b { if a > b[i] { b[i] = "true" } else { b[i] = "false" } } } func GteSameSVI(a int, b []int) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVI8(a int8, b []int8) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVI16(a int16, b []int16) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVI32(a int32, b []int32) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVI64(a int64, b []int64) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVU(a uint, b []uint) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVU8(a uint8, b []uint8) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVU16(a uint16, b []uint16) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVU32(a uint32, b []uint32) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVU64(a uint64, b []uint64) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVF32(a float32, b []float32) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVF64(a float64, b []float64) { for i := range b { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } func GteSameSVStr(a string, b []string) { for i := range b { if a >= b[i] { b[i] = "true" } else { b[i] = "false" } } } func LtSameSVI(a int, b []int) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVI8(a int8, b []int8) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVI16(a int16, b []int16) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVI32(a int32, b []int32) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVI64(a int64, b []int64) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVU(a uint, b []uint) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVU8(a uint8, b []uint8) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVU16(a uint16, b []uint16) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVU32(a uint32, b []uint32) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVU64(a uint64, b []uint64) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVF32(a float32, b []float32) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVF64(a float64, b []float64) { for i := range b { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } func LtSameSVStr(a string, b []string) { for i := range b { if a < b[i] { b[i] = "true" } else { b[i] = "false" } } } func LteSameSVI(a int, b []int) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVI8(a int8, b []int8) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVI16(a int16, b []int16) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVI32(a int32, b []int32) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVI64(a int64, b []int64) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVU(a uint, b []uint) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVU8(a uint8, b []uint8) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVU16(a uint16, b []uint16) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVU32(a uint32, b []uint32) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVU64(a uint64, b []uint64) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVF32(a float32, b []float32) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVF64(a float64, b []float64) { for i := range b { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } func LteSameSVStr(a string, b []string) { for i := range b { if a <= b[i] { b[i] = "true" } else { b[i] = "false" } } } func EqSameSVB(a bool, b []bool) { for i := range b { if a == b[i] { b[i] = true } else { b[i] = false } } } func EqSameSVI(a int, b []int) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVI8(a int8, b []int8) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVI16(a int16, b []int16) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVI32(a int32, b []int32) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVI64(a int64, b []int64) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVU(a uint, b []uint) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVU8(a uint8, b []uint8) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVU16(a uint16, b []uint16) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVU32(a uint32, b []uint32) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVU64(a uint64, b []uint64) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVUintptr(a uintptr, b []uintptr) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVF32(a float32, b []float32) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVF64(a float64, b []float64) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVC64(a complex64, b []complex64) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVC128(a complex128, b []complex128) { for i := range b { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } func EqSameSVStr(a string, b []string) { for i := range b { if a == b[i] { b[i] = "true" } else { b[i] = "false" } } } func NeSameSVB(a bool, b []bool) { for i := range b { if a != b[i] { b[i] = true } else { b[i] = false } } } func NeSameSVI(a int, b []int) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVI8(a int8, b []int8) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVI16(a int16, b []int16) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVI32(a int32, b []int32) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVI64(a int64, b []int64) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVU(a uint, b []uint) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVU8(a uint8, b []uint8) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVU16(a uint16, b []uint16) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVU32(a uint32, b []uint32) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVU64(a uint64, b []uint64) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVUintptr(a uintptr, b []uintptr) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVF32(a float32, b []float32) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVF64(a float64, b []float64) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVC64(a complex64, b []complex64) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVC128(a complex128, b []complex128) { for i := range b { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } func NeSameSVStr(a string, b []string) { for i := range b { if a != b[i] { b[i] = "true" } else { b[i] = "false" } } } func GtIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GtIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a > b[i] } } return } func GteIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func GteIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a >= b[i] } } return } func LtIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LtIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a < b[i] } } return } func LteIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func LteIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a <= b[i] } } return } func EqIterSVB(a bool, b []bool, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVUintptr(a uintptr, b []uintptr, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVC64(a complex64, b []complex64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVC128(a complex128, b []complex128, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func EqIterSVUnsafePointer(a unsafe.Pointer, b []unsafe.Pointer, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a == b[i] } } return } func NeIterSVB(a bool, b []bool, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVI(a int, b []int, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVI8(a int8, b []int8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVI16(a int16, b []int16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVI32(a int32, b []int32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVI64(a int64, b []int64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVU(a uint, b []uint, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVU8(a uint8, b []uint8, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVU16(a uint16, b []uint16, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVU32(a uint32, b []uint32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVU64(a uint64, b []uint64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVUintptr(a uintptr, b []uintptr, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVF32(a float32, b []float32, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVF64(a float64, b []float64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVC64(a complex64, b []complex64, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVC128(a complex128, b []complex128, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVStr(a string, b []string, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func NeIterSVUnsafePointer(a unsafe.Pointer, b []unsafe.Pointer, retVal []bool, bit Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a != b[i] } } return } func GtSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GtSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func GteSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func GteSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a >= b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func LtSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LtSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func LteSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func LteSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a <= b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func EqSameIterSVB(a bool, b []bool, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = true } else { b[i] = false } } } return } func EqSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVUintptr(a uintptr, b []uintptr, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func EqSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a == b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func NeSameIterSVB(a bool, b []bool, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = true } else { b[i] = false } } } return } func NeSameIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVUintptr(a uintptr, b []uintptr, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVC64(a complex64, b []complex64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVC128(a complex128, b []complex128, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = 1 } else { b[i] = 0 } } } return } func NeSameIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a != b[i] { b[i] = "true" } else { b[i] = "false" } } } return } func GtVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GtVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] > b } } func GteVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func GteVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] >= b } } func LtVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LtVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] < b } } func LteVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func LteVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] <= b } } func EqVSB(a []bool, b bool, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSUintptr(a []uintptr, b uintptr, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSC64(a []complex64, b complex64, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSC128(a []complex128, b complex128, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func EqVSUnsafePointer(a []unsafe.Pointer, b unsafe.Pointer, retVal []bool) { for i := range retVal { retVal[i] = a[i] == b } } func NeVSB(a []bool, b bool, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSI(a []int, b int, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSI8(a []int8, b int8, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSI16(a []int16, b int16, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSI32(a []int32, b int32, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSI64(a []int64, b int64, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSU(a []uint, b uint, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSU8(a []uint8, b uint8, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSU16(a []uint16, b uint16, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSU32(a []uint32, b uint32, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSU64(a []uint64, b uint64, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSUintptr(a []uintptr, b uintptr, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSF32(a []float32, b float32, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSF64(a []float64, b float64, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSC64(a []complex64, b complex64, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSC128(a []complex128, b complex128, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSStr(a []string, b string, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func NeVSUnsafePointer(a []unsafe.Pointer, b unsafe.Pointer, retVal []bool) { for i := range retVal { retVal[i] = a[i] != b } } func GtSameVSI(a []int, b int) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSI8(a []int8, b int8) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSI16(a []int16, b int16) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSI32(a []int32, b int32) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSI64(a []int64, b int64) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSU(a []uint, b uint) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSF32(a []float32, b float32) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSF64(a []float64, b float64) { for i := range a { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } func GtSameVSStr(a []string, b string) { for i := range a { if a[i] > b { a[i] = "true" } else { a[i] = "false" } } } func GteSameVSI(a []int, b int) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSI8(a []int8, b int8) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSI16(a []int16, b int16) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSI32(a []int32, b int32) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSI64(a []int64, b int64) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSU(a []uint, b uint) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSF32(a []float32, b float32) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSF64(a []float64, b float64) { for i := range a { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } func GteSameVSStr(a []string, b string) { for i := range a { if a[i] >= b { a[i] = "true" } else { a[i] = "false" } } } func LtSameVSI(a []int, b int) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSI8(a []int8, b int8) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSI16(a []int16, b int16) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSI32(a []int32, b int32) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSI64(a []int64, b int64) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSU(a []uint, b uint) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSF32(a []float32, b float32) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSF64(a []float64, b float64) { for i := range a { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } func LtSameVSStr(a []string, b string) { for i := range a { if a[i] < b { a[i] = "true" } else { a[i] = "false" } } } func LteSameVSI(a []int, b int) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSI8(a []int8, b int8) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSI16(a []int16, b int16) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSI32(a []int32, b int32) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSI64(a []int64, b int64) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSU(a []uint, b uint) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSF32(a []float32, b float32) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSF64(a []float64, b float64) { for i := range a { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } func LteSameVSStr(a []string, b string) { for i := range a { if a[i] <= b { a[i] = "true" } else { a[i] = "false" } } } func EqSameVSB(a []bool, b bool) { for i := range a { if a[i] == b { a[i] = true } else { a[i] = false } } } func EqSameVSI(a []int, b int) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSI8(a []int8, b int8) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSI16(a []int16, b int16) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSI32(a []int32, b int32) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSI64(a []int64, b int64) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSU(a []uint, b uint) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSUintptr(a []uintptr, b uintptr) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSF32(a []float32, b float32) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSF64(a []float64, b float64) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSC64(a []complex64, b complex64) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSC128(a []complex128, b complex128) { for i := range a { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } func EqSameVSStr(a []string, b string) { for i := range a { if a[i] == b { a[i] = "true" } else { a[i] = "false" } } } func NeSameVSB(a []bool, b bool) { for i := range a { if a[i] != b { a[i] = true } else { a[i] = false } } } func NeSameVSI(a []int, b int) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSI8(a []int8, b int8) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSI16(a []int16, b int16) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSI32(a []int32, b int32) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSI64(a []int64, b int64) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSU(a []uint, b uint) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSU8(a []uint8, b uint8) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSU16(a []uint16, b uint16) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSU32(a []uint32, b uint32) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSU64(a []uint64, b uint64) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSUintptr(a []uintptr, b uintptr) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSF32(a []float32, b float32) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSF64(a []float64, b float64) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSC64(a []complex64, b complex64) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSC128(a []complex128, b complex128) { for i := range a { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } func NeSameVSStr(a []string, b string) { for i := range a { if a[i] != b { a[i] = "true" } else { a[i] = "false" } } } func GtIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GtIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] > b } } return } func GteIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func GteIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] >= b } } return } func LtIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LtIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] < b } } return } func LteIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func LteIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] <= b } } return } func EqIterVSB(a []bool, b bool, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSUintptr(a []uintptr, b uintptr, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSC64(a []complex64, b complex64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSC128(a []complex128, b complex128, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func EqIterVSUnsafePointer(a []unsafe.Pointer, b unsafe.Pointer, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] == b } } return } func NeIterVSB(a []bool, b bool, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSI(a []int, b int, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSI8(a []int8, b int8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSI16(a []int16, b int16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSI32(a []int32, b int32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSI64(a []int64, b int64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSU(a []uint, b uint, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSU8(a []uint8, b uint8, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSU16(a []uint16, b uint16, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSU32(a []uint32, b uint32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSU64(a []uint64, b uint64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSUintptr(a []uintptr, b uintptr, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSF32(a []float32, b float32, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSF64(a []float64, b float64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSC64(a []complex64, b complex64, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSC128(a []complex128, b complex128, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSStr(a []string, b string, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func NeIterVSUnsafePointer(a []unsafe.Pointer, b unsafe.Pointer, retVal []bool, ait Iterator, rit Iterator) (err error) { var i, k int var validi, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validk { retVal[k] = a[i] != b } } return } func GtSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] > b { a[i] = "true" } else { a[i] = "false" } } } return } func GteSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] >= b { a[i] = "true" } else { a[i] = "false" } } } return } func LtSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < b { a[i] = "true" } else { a[i] = "false" } } } return } func LteSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] <= b { a[i] = "true" } else { a[i] = "false" } } } return } func EqSameIterVSB(a []bool, b bool, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = true } else { a[i] = false } } } return } func EqSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSUintptr(a []uintptr, b uintptr, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] == b { a[i] = "true" } else { a[i] = "false" } } } return } func NeSameIterVSB(a []bool, b bool, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = true } else { a[i] = false } } } return } func NeSameIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSUintptr(a []uintptr, b uintptr, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSC64(a []complex64, b complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSC128(a []complex128, b complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] != b { a[i] = "true" } else { a[i] = "false" } } } return } tensor-0.9.24/internal/execution/generic_cmp_vv.go000066400000000000000000003264711426512615100222740ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import "unsafe" func GtI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GtStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] > b[i] } } func GteI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func GteStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] >= b[i] } } func LtI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LtStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] < b[i] } } func LteI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func LteStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] <= b[i] } } func EqB(a []bool, b []bool, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqUintptr(a []uintptr, b []uintptr, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqC64(a []complex64, b []complex64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqC128(a []complex128, b []complex128, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func EqUnsafePointer(a []unsafe.Pointer, b []unsafe.Pointer, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] == b[i] } } func NeB(a []bool, b []bool, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeI(a []int, b []int, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeI8(a []int8, b []int8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeI16(a []int16, b []int16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeI32(a []int32, b []int32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeI64(a []int64, b []int64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeU(a []uint, b []uint, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeU8(a []uint8, b []uint8, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeU16(a []uint16, b []uint16, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeU32(a []uint32, b []uint32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeU64(a []uint64, b []uint64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeUintptr(a []uintptr, b []uintptr, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeF32(a []float32, b []float32, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeF64(a []float64, b []float64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeC64(a []complex64, b []complex64, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeC128(a []complex128, b []complex128, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeStr(a []string, b []string, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func NeUnsafePointer(a []unsafe.Pointer, b []unsafe.Pointer, retVal []bool) { a = a[:] b = b[:len(a)] retVal = retVal[:len(a)] for i := range retVal { retVal[i] = a[i] != b[i] } } func GtSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = 1 } else { a[i] = 0 } } } func GtSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] > b[i] { a[i] = "true" } else { a[i] = "false" } } } func GteSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = 1 } else { a[i] = 0 } } } func GteSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] >= b[i] { a[i] = "true" } else { a[i] = "false" } } } func LtSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = 1 } else { a[i] = 0 } } } func LtSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] < b[i] { a[i] = "true" } else { a[i] = "false" } } } func LteSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = 1 } else { a[i] = 0 } } } func LteSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] <= b[i] { a[i] = "true" } else { a[i] = "false" } } } func EqSameB(a []bool, b []bool) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = true } else { a[i] = false } } } func EqSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameUintptr(a []uintptr, b []uintptr) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = 1 } else { a[i] = 0 } } } func EqSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] == b[i] { a[i] = "true" } else { a[i] = "false" } } } func NeSameB(a []bool, b []bool) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = true } else { a[i] = false } } } func NeSameI(a []int, b []int) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameI8(a []int8, b []int8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameI16(a []int16, b []int16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameI32(a []int32, b []int32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameI64(a []int64, b []int64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameU(a []uint, b []uint) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameU8(a []uint8, b []uint8) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameU16(a []uint16, b []uint16) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameU32(a []uint32, b []uint32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameU64(a []uint64, b []uint64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameUintptr(a []uintptr, b []uintptr) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameF32(a []float32, b []float32) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameF64(a []float64, b []float64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameC64(a []complex64, b []complex64) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameC128(a []complex128, b []complex128) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = 1 } else { a[i] = 0 } } } func NeSameStr(a []string, b []string) { a = a[:] b = b[:len(a)] for i := range a { if a[i] != b[i] { a[i] = "true" } else { a[i] = "false" } } } func GtIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GtIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] > b[j] } } return } func GteIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func GteIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] >= b[j] } } return } func LtIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LtIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] < b[j] } } return } func LteIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func LteIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] <= b[j] } } return } func EqIterB(a []bool, b []bool, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterUintptr(a []uintptr, b []uintptr, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterC64(a []complex64, b []complex64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterC128(a []complex128, b []complex128, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func EqIterUnsafePointer(a []unsafe.Pointer, b []unsafe.Pointer, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] == b[j] } } return } func NeIterB(a []bool, b []bool, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterI(a []int, b []int, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterI8(a []int8, b []int8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterI16(a []int16, b []int16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterI32(a []int32, b []int32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterI64(a []int64, b []int64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterU(a []uint, b []uint, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterU8(a []uint8, b []uint8, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterU16(a []uint16, b []uint16, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterU32(a []uint32, b []uint32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterU64(a []uint64, b []uint64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterUintptr(a []uintptr, b []uintptr, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterF32(a []float32, b []float32, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterF64(a []float64, b []float64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterC64(a []complex64, b []complex64, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterC128(a []complex128, b []complex128, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterStr(a []string, b []string, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func NeIterUnsafePointer(a []unsafe.Pointer, b []unsafe.Pointer, retVal []bool, ait Iterator, bit Iterator, rit Iterator) (err error) { var i, j, k int var validi, validj, validk bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if k, validk, err = rit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj && validk { retVal[k] = a[i] != b[j] } } return } func GtSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GtSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] > b[j] { a[i] = "true" } else { a[i] = "false" } } } return } func GteSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func GteSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] >= b[j] { a[i] = "true" } else { a[i] = "false" } } } return } func LtSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LtSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] < b[j] { a[i] = "true" } else { a[i] = "false" } } } return } func LteSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func LteSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] <= b[j] { a[i] = "true" } else { a[i] = "false" } } } return } func EqSameIterB(a []bool, b []bool, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = true } else { a[i] = false } } } return } func EqSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterUintptr(a []uintptr, b []uintptr, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func EqSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] == b[j] { a[i] = "true" } else { a[i] = "false" } } } return } func NeSameIterB(a []bool, b []bool, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = true } else { a[i] = false } } } return } func NeSameIterI(a []int, b []int, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterI8(a []int8, b []int8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterI16(a []int16, b []int16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterI32(a []int32, b []int32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterI64(a []int64, b []int64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterU(a []uint, b []uint, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterU8(a []uint8, b []uint8, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterU16(a []uint16, b []uint16, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterU32(a []uint32, b []uint32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterU64(a []uint64, b []uint64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterUintptr(a []uintptr, b []uintptr, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterF32(a []float32, b []float32, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterF64(a []float64, b []float64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterC64(a []complex64, b []complex64, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterC128(a []complex128, b []complex128, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = 1 } else { a[i] = 0 } } } return } func NeSameIterStr(a []string, b []string, ait Iterator, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if a[i] != b[j] { a[i] = "true" } else { a[i] = "false" } } } return } tensor-0.9.24/internal/execution/generic_map.go000066400000000000000000000734031426512615100215510ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import "unsafe" func MapB(fn func(bool) bool, a []bool) { for i := range a { a[i] = fn(a[i]) } return } func MapI(fn func(int) int, a []int) { for i := range a { a[i] = fn(a[i]) } return } func MapI8(fn func(int8) int8, a []int8) { for i := range a { a[i] = fn(a[i]) } return } func MapI16(fn func(int16) int16, a []int16) { for i := range a { a[i] = fn(a[i]) } return } func MapI32(fn func(int32) int32, a []int32) { for i := range a { a[i] = fn(a[i]) } return } func MapI64(fn func(int64) int64, a []int64) { for i := range a { a[i] = fn(a[i]) } return } func MapU(fn func(uint) uint, a []uint) { for i := range a { a[i] = fn(a[i]) } return } func MapU8(fn func(uint8) uint8, a []uint8) { for i := range a { a[i] = fn(a[i]) } return } func MapU16(fn func(uint16) uint16, a []uint16) { for i := range a { a[i] = fn(a[i]) } return } func MapU32(fn func(uint32) uint32, a []uint32) { for i := range a { a[i] = fn(a[i]) } return } func MapU64(fn func(uint64) uint64, a []uint64) { for i := range a { a[i] = fn(a[i]) } return } func MapUintptr(fn func(uintptr) uintptr, a []uintptr) { for i := range a { a[i] = fn(a[i]) } return } func MapF32(fn func(float32) float32, a []float32) { for i := range a { a[i] = fn(a[i]) } return } func MapF64(fn func(float64) float64, a []float64) { for i := range a { a[i] = fn(a[i]) } return } func MapC64(fn func(complex64) complex64, a []complex64) { for i := range a { a[i] = fn(a[i]) } return } func MapC128(fn func(complex128) complex128, a []complex128) { for i := range a { a[i] = fn(a[i]) } return } func MapStr(fn func(string) string, a []string) { for i := range a { a[i] = fn(a[i]) } return } func MapUnsafePointer(fn func(unsafe.Pointer) unsafe.Pointer, a []unsafe.Pointer) { for i := range a { a[i] = fn(a[i]) } return } func MapErrB(fn func(bool) (bool, error), a []bool) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrI(fn func(int) (int, error), a []int) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrI8(fn func(int8) (int8, error), a []int8) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrI16(fn func(int16) (int16, error), a []int16) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrI32(fn func(int32) (int32, error), a []int32) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrI64(fn func(int64) (int64, error), a []int64) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrU(fn func(uint) (uint, error), a []uint) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrU8(fn func(uint8) (uint8, error), a []uint8) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrU16(fn func(uint16) (uint16, error), a []uint16) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrU32(fn func(uint32) (uint32, error), a []uint32) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrU64(fn func(uint64) (uint64, error), a []uint64) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrUintptr(fn func(uintptr) (uintptr, error), a []uintptr) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrF32(fn func(float32) (float32, error), a []float32) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrF64(fn func(float64) (float64, error), a []float64) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrC64(fn func(complex64) (complex64, error), a []complex64) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrC128(fn func(complex128) (complex128, error), a []complex128) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrStr(fn func(string) (string, error), a []string) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapErrUnsafePointer(fn func(unsafe.Pointer) (unsafe.Pointer, error), a []unsafe.Pointer) (err error) { for i := range a { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } return } func MapIterB(fn func(bool) bool, a []bool, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterI(fn func(int) int, a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterI8(fn func(int8) int8, a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterI16(fn func(int16) int16, a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterI32(fn func(int32) int32, a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterI64(fn func(int64) int64, a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterU(fn func(uint) uint, a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterU8(fn func(uint8) uint8, a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterU16(fn func(uint16) uint16, a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterU32(fn func(uint32) uint32, a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterU64(fn func(uint64) uint64, a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterUintptr(fn func(uintptr) uintptr, a []uintptr, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterF32(fn func(float32) float32, a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterF64(fn func(float64) float64, a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterC64(fn func(complex64) complex64, a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterC128(fn func(complex128) complex128, a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterStr(fn func(string) string, a []string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterUnsafePointer(fn func(unsafe.Pointer) unsafe.Pointer, a []unsafe.Pointer, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = fn(a[i]) } } return } func MapIterErrB(fn func(bool) (bool, error), a []bool, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrI(fn func(int) (int, error), a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrI8(fn func(int8) (int8, error), a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrI16(fn func(int16) (int16, error), a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrI32(fn func(int32) (int32, error), a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrI64(fn func(int64) (int64, error), a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrU(fn func(uint) (uint, error), a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrU8(fn func(uint8) (uint8, error), a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrU16(fn func(uint16) (uint16, error), a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrU32(fn func(uint32) (uint32, error), a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrU64(fn func(uint64) (uint64, error), a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrUintptr(fn func(uintptr) (uintptr, error), a []uintptr, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrF32(fn func(float32) (float32, error), a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrF64(fn func(float64) (float64, error), a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrC64(fn func(complex64) (complex64, error), a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrC128(fn func(complex128) (complex128, error), a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrStr(fn func(string) (string, error), a []string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIterErrUnsafePointer(fn func(unsafe.Pointer) (unsafe.Pointer, error), a []unsafe.Pointer, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i], err = fn(a[i]); handleNoOp(err) != nil { return } } } return } func MapIncrI(fn func(int) int, a []int) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrI8(fn func(int8) int8, a []int8) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrI16(fn func(int16) int16, a []int16) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrI32(fn func(int32) int32, a []int32) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrI64(fn func(int64) int64, a []int64) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrU(fn func(uint) uint, a []uint) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrU8(fn func(uint8) uint8, a []uint8) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrU16(fn func(uint16) uint16, a []uint16) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrU32(fn func(uint32) uint32, a []uint32) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrU64(fn func(uint64) uint64, a []uint64) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrF32(fn func(float32) float32, a []float32) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrF64(fn func(float64) float64, a []float64) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrC64(fn func(complex64) complex64, a []complex64) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrC128(fn func(complex128) complex128, a []complex128) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrStr(fn func(string) string, a []string) { for i := range a { a[i] += fn(a[i]) } return } func MapIncrErrI(fn func(int) (int, error), a []int) (err error) { for i := range a { var x int if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrI8(fn func(int8) (int8, error), a []int8) (err error) { for i := range a { var x int8 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrI16(fn func(int16) (int16, error), a []int16) (err error) { for i := range a { var x int16 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrI32(fn func(int32) (int32, error), a []int32) (err error) { for i := range a { var x int32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrI64(fn func(int64) (int64, error), a []int64) (err error) { for i := range a { var x int64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrU(fn func(uint) (uint, error), a []uint) (err error) { for i := range a { var x uint if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrU8(fn func(uint8) (uint8, error), a []uint8) (err error) { for i := range a { var x uint8 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrU16(fn func(uint16) (uint16, error), a []uint16) (err error) { for i := range a { var x uint16 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrU32(fn func(uint32) (uint32, error), a []uint32) (err error) { for i := range a { var x uint32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrU64(fn func(uint64) (uint64, error), a []uint64) (err error) { for i := range a { var x uint64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrF32(fn func(float32) (float32, error), a []float32) (err error) { for i := range a { var x float32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrF64(fn func(float64) (float64, error), a []float64) (err error) { for i := range a { var x float64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrC64(fn func(complex64) (complex64, error), a []complex64) (err error) { for i := range a { var x complex64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrC128(fn func(complex128) (complex128, error), a []complex128) (err error) { for i := range a { var x complex128 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIncrErrStr(fn func(string) (string, error), a []string) (err error) { for i := range a { var x string if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } return } func MapIterIncrI(fn func(int) int, a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrI8(fn func(int8) int8, a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrI16(fn func(int16) int16, a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrI32(fn func(int32) int32, a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrI64(fn func(int64) int64, a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrU(fn func(uint) uint, a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrU8(fn func(uint8) uint8, a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrU16(fn func(uint16) uint16, a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrU32(fn func(uint32) uint32, a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrU64(fn func(uint64) uint64, a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrF32(fn func(float32) float32, a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrF64(fn func(float64) float64, a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrC64(fn func(complex64) complex64, a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrC128(fn func(complex128) complex128, a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrStr(fn func(string) string, a []string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] += fn(a[i]) } } return } func MapIterIncrErrI(fn func(int) (int, error), a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x int if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrI8(fn func(int8) (int8, error), a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x int8 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrI16(fn func(int16) (int16, error), a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x int16 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrI32(fn func(int32) (int32, error), a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x int32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrI64(fn func(int64) (int64, error), a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x int64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrU(fn func(uint) (uint, error), a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x uint if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrU8(fn func(uint8) (uint8, error), a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x uint8 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrU16(fn func(uint16) (uint16, error), a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x uint16 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrU32(fn func(uint32) (uint32, error), a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x uint32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrU64(fn func(uint64) (uint64, error), a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x uint64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrF32(fn func(float32) (float32, error), a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x float32 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrF64(fn func(float64) (float64, error), a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x float64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrC64(fn func(complex64) (complex64, error), a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x complex64 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrC128(fn func(complex128) (complex128, error), a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x complex128 if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } func MapIterIncrErrStr(fn func(string) (string, error), a []string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { var x string if x, err = fn(a[i]); err != nil { if err = handleNoOp(err); err != nil { return } } a[i] = x } } return } tensor-0.9.24/internal/execution/generic_minmax.go000066400000000000000000001017201426512615100222570ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution func VecMinI(a, b []int) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVI(a int, b []int) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSI(a []int, b int) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxI(a, b []int) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVI(a int, b []int) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSI(a []int, b int) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinI8(a, b []int8) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVI8(a int8, b []int8) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSI8(a []int8, b int8) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxI8(a, b []int8) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVI8(a int8, b []int8) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSI8(a []int8, b int8) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinI16(a, b []int16) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVI16(a int16, b []int16) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSI16(a []int16, b int16) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxI16(a, b []int16) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVI16(a int16, b []int16) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSI16(a []int16, b int16) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinI32(a, b []int32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVI32(a int32, b []int32) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSI32(a []int32, b int32) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxI32(a, b []int32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVI32(a int32, b []int32) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSI32(a []int32, b int32) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinI64(a, b []int64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVI64(a int64, b []int64) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSI64(a []int64, b int64) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxI64(a, b []int64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVI64(a int64, b []int64) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSI64(a []int64, b int64) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinU(a, b []uint) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVU(a uint, b []uint) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSU(a []uint, b uint) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxU(a, b []uint) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVU(a uint, b []uint) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSU(a []uint, b uint) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinU8(a, b []uint8) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVU8(a uint8, b []uint8) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSU8(a []uint8, b uint8) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxU8(a, b []uint8) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVU8(a uint8, b []uint8) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSU8(a []uint8, b uint8) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinU16(a, b []uint16) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVU16(a uint16, b []uint16) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSU16(a []uint16, b uint16) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxU16(a, b []uint16) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVU16(a uint16, b []uint16) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSU16(a []uint16, b uint16) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinU32(a, b []uint32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVU32(a uint32, b []uint32) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSU32(a []uint32, b uint32) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxU32(a, b []uint32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVU32(a uint32, b []uint32) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSU32(a []uint32, b uint32) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinU64(a, b []uint64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVU64(a uint64, b []uint64) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSU64(a []uint64, b uint64) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxU64(a, b []uint64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVU64(a uint64, b []uint64) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSU64(a []uint64, b uint64) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinF32(a, b []float32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVF32(a float32, b []float32) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSF32(a []float32, b float32) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxF32(a, b []float32) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVF32(a float32, b []float32) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSF32(a []float32, b float32) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinF64(a, b []float64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVF64(a float64, b []float64) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSF64(a []float64, b float64) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxF64(a, b []float64) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVF64(a float64, b []float64) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSF64(a []float64, b float64) { for i := range a { if b > a[i] { a[i] = b } } } func VecMinStr(a, b []string) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv < v { a[i] = bv } } } func MinSVStr(a string, b []string) { for i := range b { if a < b[i] { b[i] = a } } } func MinVSStr(a []string, b string) { for i := range a { if b < a[i] { a[i] = b } } } func VecMaxStr(a, b []string) { a = a[:] b = b[:len(a)] for i, v := range a { bv := b[i] if bv > v { a[i] = bv } } } func MaxSVStr(a string, b []string) { for i := range b { if a > b[i] { b[i] = a } } } func MaxVSStr(a []string, b string) { for i := range a { if b > a[i] { a[i] = b } } } func MinI(a, b int) (c int) { if a < b { return a } return b } func MaxI(a, b int) (c int) { if a > b { return a } return b } func MinI8(a, b int8) (c int8) { if a < b { return a } return b } func MaxI8(a, b int8) (c int8) { if a > b { return a } return b } func MinI16(a, b int16) (c int16) { if a < b { return a } return b } func MaxI16(a, b int16) (c int16) { if a > b { return a } return b } func MinI32(a, b int32) (c int32) { if a < b { return a } return b } func MaxI32(a, b int32) (c int32) { if a > b { return a } return b } func MinI64(a, b int64) (c int64) { if a < b { return a } return b } func MaxI64(a, b int64) (c int64) { if a > b { return a } return b } func MinU(a, b uint) (c uint) { if a < b { return a } return b } func MaxU(a, b uint) (c uint) { if a > b { return a } return b } func MinU8(a, b uint8) (c uint8) { if a < b { return a } return b } func MaxU8(a, b uint8) (c uint8) { if a > b { return a } return b } func MinU16(a, b uint16) (c uint16) { if a < b { return a } return b } func MaxU16(a, b uint16) (c uint16) { if a > b { return a } return b } func MinU32(a, b uint32) (c uint32) { if a < b { return a } return b } func MaxU32(a, b uint32) (c uint32) { if a > b { return a } return b } func MinU64(a, b uint64) (c uint64) { if a < b { return a } return b } func MaxU64(a, b uint64) (c uint64) { if a > b { return a } return b } func MinF32(a, b float32) (c float32) { if a < b { return a } return b } func MaxF32(a, b float32) (c float32) { if a > b { return a } return b } func MinF64(a, b float64) (c float64) { if a < b { return a } return b } func MaxF64(a, b float64) (c float64) { if a > b { return a } return b } func MinStr(a, b string) (c string) { if a < b { return a } return b } func MaxStr(a, b string) (c string) { if a > b { return a } return b } func MinIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterI(a, b []int, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVI(a int, b []int, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSI(a []int, b int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterI(a, b []int, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterI8(a, b []int8, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVI8(a int8, b []int8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSI8(a []int8, b int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterI8(a, b []int8, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterI16(a, b []int16, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVI16(a int16, b []int16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSI16(a []int16, b int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterI16(a, b []int16, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterI32(a, b []int32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVI32(a int32, b []int32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSI32(a []int32, b int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterI32(a, b []int32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterI64(a, b []int64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVI64(a int64, b []int64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSI64(a []int64, b int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterI64(a, b []int64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterU(a, b []uint, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVU(a uint, b []uint, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSU(a []uint, b uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterU(a, b []uint, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterU8(a, b []uint8, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVU8(a uint8, b []uint8, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSU8(a []uint8, b uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterU8(a, b []uint8, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterU16(a, b []uint16, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVU16(a uint16, b []uint16, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSU16(a []uint16, b uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterU16(a, b []uint16, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterU32(a, b []uint32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVU32(a uint32, b []uint32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSU32(a []uint32, b uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterU32(a, b []uint32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterU64(a, b []uint64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVU64(a uint64, b []uint64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSU64(a []uint64, b uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterU64(a, b []uint64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterF32(a, b []float32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVF32(a float32, b []float32, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSF32(a []float32, b float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterF32(a, b []float32, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterF64(a, b []float64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVF64(a float64, b []float64, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSF64(a []float64, b float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterF64(a, b []float64, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } func MinIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a < b[i] { b[i] = a } } } return } func MinIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b < a[i] { a[i] = b } } } return } func VecMinIterStr(a, b []string, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] < a[i] { a[i] = b[j] } } } return } func MaxIterSVStr(a string, b []string, bit Iterator) (err error) { var i int var validi bool for { if i, validi, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a > b[i] { b[i] = a } } } return } func MaxIterVSStr(a []string, b string, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if b > a[i] { a[i] = b } } } return } func VecMaxIterStr(a, b []string, ait, bit Iterator) (err error) { var i, j int var validi, validj bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if j, validj, err = bit.NextValidity(); err != nil { err = handleNoOp(err) break } if validi && validj { if b[j] > a[i] { a[i] = b[j] } } } return } tensor-0.9.24/internal/execution/generic_reduce.go000066400000000000000000001176631426512615100222520ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import "unsafe" func ReduceB(f func(a, b bool) bool, def bool, l ...bool) (retVal bool) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceI(f func(a, b int) int, def int, l ...int) (retVal int) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceI8(f func(a, b int8) int8, def int8, l ...int8) (retVal int8) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceI16(f func(a, b int16) int16, def int16, l ...int16) (retVal int16) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceI32(f func(a, b int32) int32, def int32, l ...int32) (retVal int32) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceI64(f func(a, b int64) int64, def int64, l ...int64) (retVal int64) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceU(f func(a, b uint) uint, def uint, l ...uint) (retVal uint) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceU8(f func(a, b uint8) uint8, def uint8, l ...uint8) (retVal uint8) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceU16(f func(a, b uint16) uint16, def uint16, l ...uint16) (retVal uint16) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceU32(f func(a, b uint32) uint32, def uint32, l ...uint32) (retVal uint32) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceU64(f func(a, b uint64) uint64, def uint64, l ...uint64) (retVal uint64) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceUintptr(f func(a, b uintptr) uintptr, def uintptr, l ...uintptr) (retVal uintptr) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceF32(f func(a, b float32) float32, def float32, l ...float32) (retVal float32) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceF64(f func(a, b float64) float64, def float64, l ...float64) (retVal float64) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceC64(f func(a, b complex64) complex64, def complex64, l ...complex64) (retVal complex64) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceC128(f func(a, b complex128) complex128, def complex128, l ...complex128) (retVal complex128) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceStr(f func(a, b string) string, def string, l ...string) (retVal string) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func ReduceUnsafePointer(f func(a, b unsafe.Pointer) unsafe.Pointer, def unsafe.Pointer, l ...unsafe.Pointer) (retVal unsafe.Pointer) { retVal = def if len(l) == 0 { return } for _, v := range l { retVal = f(retVal, v) } return } func SumI(a []int) int { var retVal int a = a[:] for _, v := range a { retVal += v } return retVal } func SumI8(a []int8) int8 { var retVal int8 a = a[:] for _, v := range a { retVal += v } return retVal } func SumI16(a []int16) int16 { var retVal int16 a = a[:] for _, v := range a { retVal += v } return retVal } func SumI32(a []int32) int32 { var retVal int32 a = a[:] for _, v := range a { retVal += v } return retVal } func SumI64(a []int64) int64 { var retVal int64 a = a[:] for _, v := range a { retVal += v } return retVal } func SumU(a []uint) uint { var retVal uint a = a[:] for _, v := range a { retVal += v } return retVal } func SumU8(a []uint8) uint8 { var retVal uint8 a = a[:] for _, v := range a { retVal += v } return retVal } func SumU16(a []uint16) uint16 { var retVal uint16 a = a[:] for _, v := range a { retVal += v } return retVal } func SumU32(a []uint32) uint32 { var retVal uint32 a = a[:] for _, v := range a { retVal += v } return retVal } func SumU64(a []uint64) uint64 { var retVal uint64 a = a[:] for _, v := range a { retVal += v } return retVal } func SumF32(a []float32) float32 { var retVal float32 a = a[:] for _, v := range a { retVal += v } return retVal } func SumF64(a []float64) float64 { var retVal float64 a = a[:] for _, v := range a { retVal += v } return retVal } func SumC64(a []complex64) complex64 { var retVal complex64 a = a[:] for _, v := range a { retVal += v } return retVal } func SumC128(a []complex128) complex128 { var retVal complex128 a = a[:] for _, v := range a { retVal += v } return retVal } func ProdI(a []int) int { if len(a) == 0 { return 0 } var retVal int = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdI8(a []int8) int8 { if len(a) == 0 { return 0 } var retVal int8 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdI16(a []int16) int16 { if len(a) == 0 { return 0 } var retVal int16 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdI32(a []int32) int32 { if len(a) == 0 { return 0 } var retVal int32 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdI64(a []int64) int64 { if len(a) == 0 { return 0 } var retVal int64 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdU(a []uint) uint { if len(a) == 0 { return 0 } var retVal uint = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdU8(a []uint8) uint8 { if len(a) == 0 { return 0 } var retVal uint8 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdU16(a []uint16) uint16 { if len(a) == 0 { return 0 } var retVal uint16 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdU32(a []uint32) uint32 { if len(a) == 0 { return 0 } var retVal uint32 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdU64(a []uint64) uint64 { if len(a) == 0 { return 0 } var retVal uint64 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdF32(a []float32) float32 { if len(a) == 0 { return 0 } var retVal float32 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdF64(a []float64) float64 { if len(a) == 0 { return 0 } var retVal float64 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdC64(a []complex64) complex64 { if len(a) == 0 { return 0 } var retVal complex64 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func ProdC128(a []complex128) complex128 { if len(a) == 0 { return 0 } var retVal complex128 = 1 a = a[:] for _, v := range a { retVal *= v } return retVal } func SliceMinI(a []int) int { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI(MinI, a[0], a[1:]...) } func SliceMaxI(a []int) int { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI(MaxI, a[0], a[1:]...) } func SliceMinI8(a []int8) int8 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI8(MinI8, a[0], a[1:]...) } func SliceMaxI8(a []int8) int8 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI8(MaxI8, a[0], a[1:]...) } func SliceMinI16(a []int16) int16 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI16(MinI16, a[0], a[1:]...) } func SliceMaxI16(a []int16) int16 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI16(MaxI16, a[0], a[1:]...) } func SliceMinI32(a []int32) int32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI32(MinI32, a[0], a[1:]...) } func SliceMaxI32(a []int32) int32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI32(MaxI32, a[0], a[1:]...) } func SliceMinI64(a []int64) int64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI64(MinI64, a[0], a[1:]...) } func SliceMaxI64(a []int64) int64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceI64(MaxI64, a[0], a[1:]...) } func SliceMinU(a []uint) uint { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU(MinU, a[0], a[1:]...) } func SliceMaxU(a []uint) uint { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU(MaxU, a[0], a[1:]...) } func SliceMinU8(a []uint8) uint8 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU8(MinU8, a[0], a[1:]...) } func SliceMaxU8(a []uint8) uint8 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU8(MaxU8, a[0], a[1:]...) } func SliceMinU16(a []uint16) uint16 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU16(MinU16, a[0], a[1:]...) } func SliceMaxU16(a []uint16) uint16 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU16(MaxU16, a[0], a[1:]...) } func SliceMinU32(a []uint32) uint32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU32(MinU32, a[0], a[1:]...) } func SliceMaxU32(a []uint32) uint32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU32(MaxU32, a[0], a[1:]...) } func SliceMinU64(a []uint64) uint64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU64(MinU64, a[0], a[1:]...) } func SliceMaxU64(a []uint64) uint64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceU64(MaxU64, a[0], a[1:]...) } func SliceMinF32(a []float32) float32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceF32(MinF32, a[0], a[1:]...) } func SliceMaxF32(a []float32) float32 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceF32(MaxF32, a[0], a[1:]...) } func SliceMinF64(a []float64) float64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceF64(MinF64, a[0], a[1:]...) } func SliceMaxF64(a []float64) float64 { if len(a) < 1 { panic("Max of empty slice is meaningless") } return ReduceF64(MaxF64, a[0], a[1:]...) } func reduceFirstB(data, retVal []bool, split, size int, fn func(a, b []bool)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstB(data, retVal []bool, split, size int, fn func(a, b bool) bool) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstI(data, retVal []int, split, size int, fn func(a, b []int)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstI(data, retVal []int, split, size int, fn func(a, b int) int) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstI8(data, retVal []int8, split, size int, fn func(a, b []int8)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstI8(data, retVal []int8, split, size int, fn func(a, b int8) int8) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstI16(data, retVal []int16, split, size int, fn func(a, b []int16)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstI16(data, retVal []int16, split, size int, fn func(a, b int16) int16) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstI32(data, retVal []int32, split, size int, fn func(a, b []int32)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstI32(data, retVal []int32, split, size int, fn func(a, b int32) int32) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstI64(data, retVal []int64, split, size int, fn func(a, b []int64)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstI64(data, retVal []int64, split, size int, fn func(a, b int64) int64) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstU(data, retVal []uint, split, size int, fn func(a, b []uint)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstU(data, retVal []uint, split, size int, fn func(a, b uint) uint) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstU8(data, retVal []uint8, split, size int, fn func(a, b []uint8)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstU8(data, retVal []uint8, split, size int, fn func(a, b uint8) uint8) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstU16(data, retVal []uint16, split, size int, fn func(a, b []uint16)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstU16(data, retVal []uint16, split, size int, fn func(a, b uint16) uint16) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstU32(data, retVal []uint32, split, size int, fn func(a, b []uint32)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstU32(data, retVal []uint32, split, size int, fn func(a, b uint32) uint32) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstU64(data, retVal []uint64, split, size int, fn func(a, b []uint64)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstU64(data, retVal []uint64, split, size int, fn func(a, b uint64) uint64) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstUintptr(data, retVal []uintptr, split, size int, fn func(a, b []uintptr)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstUintptr(data, retVal []uintptr, split, size int, fn func(a, b uintptr) uintptr) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstF32(data, retVal []float32, split, size int, fn func(a, b []float32)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstF32(data, retVal []float32, split, size int, fn func(a, b float32) float32) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstF64(data, retVal []float64, split, size int, fn func(a, b []float64)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstF64(data, retVal []float64, split, size int, fn func(a, b float64) float64) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstC64(data, retVal []complex64, split, size int, fn func(a, b []complex64)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstC64(data, retVal []complex64, split, size int, fn func(a, b complex64) complex64) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstC128(data, retVal []complex128, split, size int, fn func(a, b []complex128)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstC128(data, retVal []complex128, split, size int, fn func(a, b complex128) complex128) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstStr(data, retVal []string, split, size int, fn func(a, b []string)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstStr(data, retVal []string, split, size int, fn func(a, b string) string) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceFirstUnsafePointer(data, retVal []unsafe.Pointer, split, size int, fn func(a, b []unsafe.Pointer)) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { fn(retVal, data[start:start+split]) start += split } } func genericReduceFirstUnsafePointer(data, retVal []unsafe.Pointer, split, size int, fn func(a, b unsafe.Pointer) unsafe.Pointer) { start := split copy(retVal[0:split], data[0:split]) for i := 0; i < size-1; i++ { for j := 0; j < split; j++ { retVal[j] = fn(retVal[j], data[j+start]) } start += split } } func reduceLastB(a, retVal []bool, dimSize int, defaultValue bool, fn func(a []bool) bool) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastB(a, retVal []bool, dimSize int, defaultValue bool, fn func(bool, bool) bool) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceB(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastI(a, retVal []int, dimSize int, defaultValue int, fn func(a []int) int) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastI(a, retVal []int, dimSize int, defaultValue int, fn func(int, int) int) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceI(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastI8(a, retVal []int8, dimSize int, defaultValue int8, fn func(a []int8) int8) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastI8(a, retVal []int8, dimSize int, defaultValue int8, fn func(int8, int8) int8) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceI8(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastI16(a, retVal []int16, dimSize int, defaultValue int16, fn func(a []int16) int16) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastI16(a, retVal []int16, dimSize int, defaultValue int16, fn func(int16, int16) int16) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceI16(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastI32(a, retVal []int32, dimSize int, defaultValue int32, fn func(a []int32) int32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastI32(a, retVal []int32, dimSize int, defaultValue int32, fn func(int32, int32) int32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceI32(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastI64(a, retVal []int64, dimSize int, defaultValue int64, fn func(a []int64) int64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastI64(a, retVal []int64, dimSize int, defaultValue int64, fn func(int64, int64) int64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceI64(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastU(a, retVal []uint, dimSize int, defaultValue uint, fn func(a []uint) uint) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastU(a, retVal []uint, dimSize int, defaultValue uint, fn func(uint, uint) uint) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceU(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastU8(a, retVal []uint8, dimSize int, defaultValue uint8, fn func(a []uint8) uint8) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastU8(a, retVal []uint8, dimSize int, defaultValue uint8, fn func(uint8, uint8) uint8) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceU8(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastU16(a, retVal []uint16, dimSize int, defaultValue uint16, fn func(a []uint16) uint16) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastU16(a, retVal []uint16, dimSize int, defaultValue uint16, fn func(uint16, uint16) uint16) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceU16(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastU32(a, retVal []uint32, dimSize int, defaultValue uint32, fn func(a []uint32) uint32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastU32(a, retVal []uint32, dimSize int, defaultValue uint32, fn func(uint32, uint32) uint32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceU32(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastU64(a, retVal []uint64, dimSize int, defaultValue uint64, fn func(a []uint64) uint64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastU64(a, retVal []uint64, dimSize int, defaultValue uint64, fn func(uint64, uint64) uint64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceU64(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastUintptr(a, retVal []uintptr, dimSize int, defaultValue uintptr, fn func(a []uintptr) uintptr) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastUintptr(a, retVal []uintptr, dimSize int, defaultValue uintptr, fn func(uintptr, uintptr) uintptr) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceUintptr(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastF32(a, retVal []float32, dimSize int, defaultValue float32, fn func(a []float32) float32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastF32(a, retVal []float32, dimSize int, defaultValue float32, fn func(float32, float32) float32) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceF32(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastF64(a, retVal []float64, dimSize int, defaultValue float64, fn func(a []float64) float64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastF64(a, retVal []float64, dimSize int, defaultValue float64, fn func(float64, float64) float64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceF64(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastC64(a, retVal []complex64, dimSize int, defaultValue complex64, fn func(a []complex64) complex64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastC64(a, retVal []complex64, dimSize int, defaultValue complex64, fn func(complex64, complex64) complex64) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceC64(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastC128(a, retVal []complex128, dimSize int, defaultValue complex128, fn func(a []complex128) complex128) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastC128(a, retVal []complex128, dimSize int, defaultValue complex128, fn func(complex128, complex128) complex128) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceC128(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastStr(a, retVal []string, dimSize int, defaultValue string, fn func(a []string) string) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastStr(a, retVal []string, dimSize int, defaultValue string, fn func(string, string) string) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceStr(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceLastUnsafePointer(a, retVal []unsafe.Pointer, dimSize int, defaultValue unsafe.Pointer, fn func(a []unsafe.Pointer) unsafe.Pointer) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := fn(a[start : start+dimSize]) retVal[at] = r at++ } } func genericReduceLastUnsafePointer(a, retVal []unsafe.Pointer, dimSize int, defaultValue unsafe.Pointer, fn func(unsafe.Pointer, unsafe.Pointer) unsafe.Pointer) { var at int for start := 0; start <= len(a)-dimSize; start += dimSize { r := ReduceUnsafePointer(fn, defaultValue, a[start:start+dimSize]...) retVal[at] = r at++ } } func reduceDefaultB(data, retVal []bool, dim0, dimSize, outerStride, stride, expected int, fn func(a, b bool) bool) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultI(data, retVal []int, dim0, dimSize, outerStride, stride, expected int, fn func(a, b int) int) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultI8(data, retVal []int8, dim0, dimSize, outerStride, stride, expected int, fn func(a, b int8) int8) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultI16(data, retVal []int16, dim0, dimSize, outerStride, stride, expected int, fn func(a, b int16) int16) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultI32(data, retVal []int32, dim0, dimSize, outerStride, stride, expected int, fn func(a, b int32) int32) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultI64(data, retVal []int64, dim0, dimSize, outerStride, stride, expected int, fn func(a, b int64) int64) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultU(data, retVal []uint, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uint) uint) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultU8(data, retVal []uint8, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uint8) uint8) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultU16(data, retVal []uint16, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uint16) uint16) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultU32(data, retVal []uint32, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uint32) uint32) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultU64(data, retVal []uint64, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uint64) uint64) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultUintptr(data, retVal []uintptr, dim0, dimSize, outerStride, stride, expected int, fn func(a, b uintptr) uintptr) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultF32(data, retVal []float32, dim0, dimSize, outerStride, stride, expected int, fn func(a, b float32) float32) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultF64(data, retVal []float64, dim0, dimSize, outerStride, stride, expected int, fn func(a, b float64) float64) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultC64(data, retVal []complex64, dim0, dimSize, outerStride, stride, expected int, fn func(a, b complex64) complex64) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultC128(data, retVal []complex128, dim0, dimSize, outerStride, stride, expected int, fn func(a, b complex128) complex128) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultStr(data, retVal []string, dim0, dimSize, outerStride, stride, expected int, fn func(a, b string) string) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } func reduceDefaultUnsafePointer(data, retVal []unsafe.Pointer, dim0, dimSize, outerStride, stride, expected int, fn func(a, b unsafe.Pointer) unsafe.Pointer) { for i := 0; i < dim0; i++ { start := i * outerStride sliced := data[start : start+outerStride] var innerStart, strideTrack int for j := 0; j < expected; j++ { writeTo := i*expected + j retVal[writeTo] = sliced[innerStart] for k := 1; k < dimSize; k++ { readFrom := innerStart + k*stride retVal[writeTo] = fn(retVal[writeTo], sliced[readFrom]) } strideTrack++ if strideTrack >= stride { strideTrack = 0 innerStart += stride } innerStart++ } } } tensor-0.9.24/internal/execution/generic_unary.go000066400000000000000000001113311426512615100221230ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "math" "math/cmplx" "github.com/chewxy/math32" ) func NegI(a []int) { for i := range a { a[i] = -a[i] } } func NegI8(a []int8) { for i := range a { a[i] = -a[i] } } func NegI16(a []int16) { for i := range a { a[i] = -a[i] } } func NegI32(a []int32) { for i := range a { a[i] = -a[i] } } func NegI64(a []int64) { for i := range a { a[i] = -a[i] } } func NegU(a []uint) { for i := range a { a[i] = -a[i] } } func NegU8(a []uint8) { for i := range a { a[i] = -a[i] } } func NegU16(a []uint16) { for i := range a { a[i] = -a[i] } } func NegU32(a []uint32) { for i := range a { a[i] = -a[i] } } func NegU64(a []uint64) { for i := range a { a[i] = -a[i] } } func NegF32(a []float32) { for i := range a { a[i] = -a[i] } } func NegF64(a []float64) { for i := range a { a[i] = -a[i] } } func NegC64(a []complex64) { for i := range a { a[i] = -a[i] } } func NegC128(a []complex128) { for i := range a { a[i] = -a[i] } } func InvI(a []int) { for i := range a { a[i] = 1 / a[i] } } func InvI8(a []int8) { for i := range a { a[i] = 1 / a[i] } } func InvI16(a []int16) { for i := range a { a[i] = 1 / a[i] } } func InvI32(a []int32) { for i := range a { a[i] = 1 / a[i] } } func InvI64(a []int64) { for i := range a { a[i] = 1 / a[i] } } func InvU(a []uint) { for i := range a { a[i] = 1 / a[i] } } func InvU8(a []uint8) { for i := range a { a[i] = 1 / a[i] } } func InvU16(a []uint16) { for i := range a { a[i] = 1 / a[i] } } func InvU32(a []uint32) { for i := range a { a[i] = 1 / a[i] } } func InvU64(a []uint64) { for i := range a { a[i] = 1 / a[i] } } func InvF32(a []float32) { for i := range a { a[i] = 1 / a[i] } } func InvF64(a []float64) { for i := range a { a[i] = 1 / a[i] } } func InvC64(a []complex64) { for i := range a { a[i] = 1 / a[i] } } func InvC128(a []complex128) { for i := range a { a[i] = 1 / a[i] } } func SquareI(a []int) { for i := range a { a[i] = a[i] * a[i] } } func SquareI8(a []int8) { for i := range a { a[i] = a[i] * a[i] } } func SquareI16(a []int16) { for i := range a { a[i] = a[i] * a[i] } } func SquareI32(a []int32) { for i := range a { a[i] = a[i] * a[i] } } func SquareI64(a []int64) { for i := range a { a[i] = a[i] * a[i] } } func SquareU(a []uint) { for i := range a { a[i] = a[i] * a[i] } } func SquareU8(a []uint8) { for i := range a { a[i] = a[i] * a[i] } } func SquareU16(a []uint16) { for i := range a { a[i] = a[i] * a[i] } } func SquareU32(a []uint32) { for i := range a { a[i] = a[i] * a[i] } } func SquareU64(a []uint64) { for i := range a { a[i] = a[i] * a[i] } } func SquareF32(a []float32) { for i := range a { a[i] = a[i] * a[i] } } func SquareF64(a []float64) { for i := range a { a[i] = a[i] * a[i] } } func SquareC64(a []complex64) { for i := range a { a[i] = a[i] * a[i] } } func SquareC128(a []complex128) { for i := range a { a[i] = a[i] * a[i] } } func CubeI(a []int) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeI8(a []int8) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeI16(a []int16) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeI32(a []int32) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeI64(a []int64) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeU(a []uint) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeU8(a []uint8) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeU16(a []uint16) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeU32(a []uint32) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeU64(a []uint64) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeF32(a []float32) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeF64(a []float64) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeC64(a []complex64) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func CubeC128(a []complex128) { for i := range a { a[i] = a[i] * a[i] * a[i] } } func ExpF32(a []float32) { for i := range a { a[i] = math32.Exp(a[i]) } } func ExpF64(a []float64) { for i := range a { a[i] = math.Exp(a[i]) } } func ExpC64(a []complex64) { for i := range a { a[i] = complex64(cmplx.Exp(complex128(a[i]))) } } func ExpC128(a []complex128) { for i := range a { a[i] = cmplx.Exp(a[i]) } } func TanhF32(a []float32) { for i := range a { a[i] = math32.Tanh(a[i]) } } func TanhF64(a []float64) { for i := range a { a[i] = math.Tanh(a[i]) } } func TanhC64(a []complex64) { for i := range a { a[i] = complex64(cmplx.Tanh(complex128(a[i]))) } } func TanhC128(a []complex128) { for i := range a { a[i] = cmplx.Tanh(a[i]) } } func LogF32(a []float32) { for i := range a { a[i] = math32.Log(a[i]) } } func LogF64(a []float64) { for i := range a { a[i] = math.Log(a[i]) } } func LogC64(a []complex64) { for i := range a { a[i] = complex64(cmplx.Log(complex128(a[i]))) } } func LogC128(a []complex128) { for i := range a { a[i] = cmplx.Log(a[i]) } } func Log2F32(a []float32) { for i := range a { a[i] = math32.Log2(a[i]) } } func Log2F64(a []float64) { for i := range a { a[i] = math.Log2(a[i]) } } func Log10F32(a []float32) { for i := range a { a[i] = math32.Log10(a[i]) } } func Log10F64(a []float64) { for i := range a { a[i] = math.Log10(a[i]) } } func Log10C64(a []complex64) { for i := range a { a[i] = complex64(cmplx.Log10(complex128(a[i]))) } } func Log10C128(a []complex128) { for i := range a { a[i] = cmplx.Log10(a[i]) } } func SqrtF32(a []float32) { for i := range a { a[i] = math32.Sqrt(a[i]) } } func SqrtF64(a []float64) { for i := range a { a[i] = math.Sqrt(a[i]) } } func SqrtC64(a []complex64) { for i := range a { a[i] = complex64(cmplx.Sqrt(complex128(a[i]))) } } func SqrtC128(a []complex128) { for i := range a { a[i] = cmplx.Sqrt(a[i]) } } func CbrtF32(a []float32) { for i := range a { a[i] = math32.Cbrt(a[i]) } } func CbrtF64(a []float64) { for i := range a { a[i] = math.Cbrt(a[i]) } } func InvSqrtF32(a []float32) { for i := range a { a[i] = float32(1) / math32.Sqrt(a[i]) } } func InvSqrtF64(a []float64) { for i := range a { a[i] = float64(1) / math.Sqrt(a[i]) } } func NegIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterU(a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterU8(a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterU16(a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterU32(a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterU64(a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func NegIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = -a[i] } } return } func InvIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterU(a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterU8(a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterU16(a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterU32(a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterU64(a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func InvIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = 1 / a[i] } } return } func SquareIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterU(a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterU8(a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterU16(a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterU32(a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterU64(a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func SquareIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] } } return } func CubeIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterU(a []uint, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterU8(a []uint8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterU16(a []uint16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterU32(a []uint32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterU64(a []uint64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func CubeIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = a[i] * a[i] * a[i] } } return } func ExpIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Exp(a[i]) } } return } func ExpIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Exp(a[i]) } } return } func ExpIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Exp(complex128(a[i]))) } } return } func ExpIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Exp(a[i]) } } return } func TanhIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Tanh(a[i]) } } return } func TanhIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Tanh(a[i]) } } return } func TanhIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Tanh(complex128(a[i]))) } } return } func TanhIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Tanh(a[i]) } } return } func LogIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Log(a[i]) } } return } func LogIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Log(a[i]) } } return } func LogIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Log(complex128(a[i]))) } } return } func LogIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Log(a[i]) } } return } func Log2IterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Log2(a[i]) } } return } func Log2IterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Log2(a[i]) } } return } func Log10IterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Log10(a[i]) } } return } func Log10IterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Log10(a[i]) } } return } func Log10IterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Log10(complex128(a[i]))) } } return } func Log10IterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Log10(a[i]) } } return } func SqrtIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Sqrt(a[i]) } } return } func SqrtIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Sqrt(a[i]) } } return } func SqrtIterC64(a []complex64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = complex64(cmplx.Sqrt(complex128(a[i]))) } } return } func SqrtIterC128(a []complex128, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = cmplx.Sqrt(a[i]) } } return } func CbrtIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Cbrt(a[i]) } } return } func CbrtIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Cbrt(a[i]) } } return } func InvSqrtIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = float32(1) / math32.Sqrt(a[i]) } } return } func InvSqrtIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = float64(1) / math.Sqrt(a[i]) } } return } func AbsI(a []int) { for i := range a { if a[i] < 0 { a[i] = -a[i] } } } func AbsI8(a []int8) { for i := range a { if a[i] < 0 { a[i] = -a[i] } } } func AbsI16(a []int16) { for i := range a { if a[i] < 0 { a[i] = -a[i] } } } func AbsI32(a []int32) { for i := range a { if a[i] < 0 { a[i] = -a[i] } } } func AbsI64(a []int64) { for i := range a { if a[i] < 0 { a[i] = -a[i] } } } func AbsF32(a []float32) { for i := range a { a[i] = math32.Abs(a[i]) } } func AbsF64(a []float64) { for i := range a { a[i] = math.Abs(a[i]) } } func SignI(a []int) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignI8(a []int8) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignI16(a []int16) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignI32(a []int32) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignI64(a []int64) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignF32(a []float32) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func SignF64(a []float64) { for i := range a { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } func AbsIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -a[i] } } } return } func AbsIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -a[i] } } } return } func AbsIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -a[i] } } } return } func AbsIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -a[i] } } } return } func AbsIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -a[i] } } } return } func AbsIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math32.Abs(a[i]) } } return } func AbsIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { a[i] = math.Abs(a[i]) } } return } func SignIterI(a []int, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterI8(a []int8, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterI16(a []int16, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterI32(a []int32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterI64(a []int64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterF32(a []float32, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func SignIterF64(a []float64, ait Iterator) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < 0 { a[i] = -1 } else if a[i] > 0 { a[i] = 1 } } } return } func ClampI(a []int, min int, max int) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampI8(a []int8, min int8, max int8) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampI16(a []int16, min int16, max int16) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampI32(a []int32, min int32, max int32) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampI64(a []int64, min int64, max int64) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampU(a []uint, min uint, max uint) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampU8(a []uint8, min uint8, max uint8) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampU16(a []uint16, min uint16, max uint16) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampU32(a []uint32, min uint32, max uint32) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampU64(a []uint64, min uint64, max uint64) { for i := range a { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } func ClampF32(a []float32, min float32, max float32) { for i := range a { if a[i] < min || math32.IsInf(a[i], -1) { a[i] = min continue } if a[i] > max || math32.IsInf(a[i], 1) { a[i] = max } } } func ClampF64(a []float64, min float64, max float64) { for i := range a { if a[i] < min || math.IsInf(a[i], -1) { a[i] = min continue } if a[i] > max || math.IsInf(a[i], 1) { a[i] = max } } } func ClampIterI(a []int, ait Iterator, min int, max int) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterI8(a []int8, ait Iterator, min int8, max int8) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterI16(a []int16, ait Iterator, min int16, max int16) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterI32(a []int32, ait Iterator, min int32, max int32) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterI64(a []int64, ait Iterator, min int64, max int64) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterU(a []uint, ait Iterator, min uint, max uint) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterU8(a []uint8, ait Iterator, min uint8, max uint8) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterU16(a []uint16, ait Iterator, min uint16, max uint16) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterU32(a []uint32, ait Iterator, min uint32, max uint32) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterU64(a []uint64, ait Iterator, min uint64, max uint64) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min { a[i] = min continue } if a[i] > max { a[i] = max } } } return } func ClampIterF32(a []float32, ait Iterator, min float32, max float32) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min || math32.IsInf(a[i], -1) { a[i] = min continue } if a[i] > max || math32.IsInf(a[i], 1) { a[i] = max } } } return } func ClampIterF64(a []float64, ait Iterator, min float64, max float64) (err error) { var i int var validi bool for { if i, validi, err = ait.NextValidity(); err != nil { err = handleNoOp(err) break } if validi { if a[i] < min || math.IsInf(a[i], -1) { a[i] = min continue } if a[i] > max || math.IsInf(a[i], 1) { a[i] = max } } } return } tensor-0.9.24/internal/execution/keepsync.go000066400000000000000000000010261426512615100211110ustar00rootroot00000000000000package execution // Iterator is the generic iterator interface type Iterator interface { Start() (int, error) Next() (int, error) NextValidity() (int, bool, error) NextValid() (int, int, error) NextInvalid() (int, int, error) Reset() SetReverse() SetForward() Coord() []int Done() bool } // NoOpError is a useful for operations that have no op. type NoOpError interface { NoOp() bool } func handleNoOp(err error) error { if err == nil { return nil } if _, ok := err.(NoOpError); ok { return nil } return err } tensor-0.9.24/internal/execution/keepsync_test.go000066400000000000000000000006761426512615100221620ustar00rootroot00000000000000package execution import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "testing" ) type noopError struct{} func (e noopError) NoOp() bool { return true } func (e noopError) Error() string { return "NoOp" } func TestHandleNoOp(t *testing.T) { otherErr := errors.New("other error") assert.Equal(t, nil, handleNoOp(noopError{})) assert.Equal(t, nil, handleNoOp(nil)) assert.Equal(t, otherErr, handleNoOp(otherErr)) } tensor-0.9.24/internal/execution/reduction_specialization.go000066400000000000000000000125311426512615100243650ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package execution import ( "reflect" "github.com/pkg/errors" "gorgonia.org/tensor/internal/storage" ) func MonotonicSum(t reflect.Type, a *storage.Header) (retVal interface{}, err error) { switch t { case Int: retVal = SumI(a.Ints()) return case Int8: retVal = SumI8(a.Int8s()) return case Int16: retVal = SumI16(a.Int16s()) return case Int32: retVal = SumI32(a.Int32s()) return case Int64: retVal = SumI64(a.Int64s()) return case Uint: retVal = SumU(a.Uints()) return case Uint8: retVal = SumU8(a.Uint8s()) return case Uint16: retVal = SumU16(a.Uint16s()) return case Uint32: retVal = SumU32(a.Uint32s()) return case Uint64: retVal = SumU64(a.Uint64s()) return case Float32: retVal = SumF32(a.Float32s()) return case Float64: retVal = SumF64(a.Float64s()) return case Complex64: retVal = SumC64(a.Complex64s()) return case Complex128: retVal = SumC128(a.Complex128s()) return default: err = errors.Errorf("Cannot perform Sum on %v", t) return } } func SumMethods(t reflect.Type) (firstFn, lasFn, defaultFn interface{}, err error) { switch t { case Int: return VecAddI, SumI, AddI, nil case Int8: return VecAddI8, SumI8, AddI8, nil case Int16: return VecAddI16, SumI16, AddI16, nil case Int32: return VecAddI32, SumI32, AddI32, nil case Int64: return VecAddI64, SumI64, AddI64, nil case Uint: return VecAddU, SumU, AddU, nil case Uint8: return VecAddU8, SumU8, AddU8, nil case Uint16: return VecAddU16, SumU16, AddU16, nil case Uint32: return VecAddU32, SumU32, AddU32, nil case Uint64: return VecAddU64, SumU64, AddU64, nil case Float32: return VecAddF32, SumF32, AddF32, nil case Float64: return VecAddF64, SumF64, AddF64, nil case Complex64: return VecAddC64, SumC64, AddC64, nil case Complex128: return VecAddC128, SumC128, AddC128, nil default: return nil, nil, nil, errors.Errorf("No methods found for Sum for %v", t) } } func MonotonicMax(t reflect.Type, a *storage.Header) (retVal interface{}, err error) { switch t { case Int: retVal = SliceMaxI(a.Ints()) return case Int8: retVal = SliceMaxI8(a.Int8s()) return case Int16: retVal = SliceMaxI16(a.Int16s()) return case Int32: retVal = SliceMaxI32(a.Int32s()) return case Int64: retVal = SliceMaxI64(a.Int64s()) return case Uint: retVal = SliceMaxU(a.Uints()) return case Uint8: retVal = SliceMaxU8(a.Uint8s()) return case Uint16: retVal = SliceMaxU16(a.Uint16s()) return case Uint32: retVal = SliceMaxU32(a.Uint32s()) return case Uint64: retVal = SliceMaxU64(a.Uint64s()) return case Float32: retVal = SliceMaxF32(a.Float32s()) return case Float64: retVal = SliceMaxF64(a.Float64s()) return default: err = errors.Errorf("Cannot perform Max on %v", t) return } } func MaxMethods(t reflect.Type) (firstFn, lasFn, defaultFn interface{}, err error) { switch t { case Int: return VecMaxI, SliceMaxI, MaxI, nil case Int8: return VecMaxI8, SliceMaxI8, MaxI8, nil case Int16: return VecMaxI16, SliceMaxI16, MaxI16, nil case Int32: return VecMaxI32, SliceMaxI32, MaxI32, nil case Int64: return VecMaxI64, SliceMaxI64, MaxI64, nil case Uint: return VecMaxU, SliceMaxU, MaxU, nil case Uint8: return VecMaxU8, SliceMaxU8, MaxU8, nil case Uint16: return VecMaxU16, SliceMaxU16, MaxU16, nil case Uint32: return VecMaxU32, SliceMaxU32, MaxU32, nil case Uint64: return VecMaxU64, SliceMaxU64, MaxU64, nil case Float32: return VecMaxF32, SliceMaxF32, MaxF32, nil case Float64: return VecMaxF64, SliceMaxF64, MaxF64, nil default: return nil, nil, nil, errors.Errorf("No methods found for Max for %v", t) } } func MonotonicMin(t reflect.Type, a *storage.Header) (retVal interface{}, err error) { switch t { case Int: retVal = SliceMinI(a.Ints()) return case Int8: retVal = SliceMinI8(a.Int8s()) return case Int16: retVal = SliceMinI16(a.Int16s()) return case Int32: retVal = SliceMinI32(a.Int32s()) return case Int64: retVal = SliceMinI64(a.Int64s()) return case Uint: retVal = SliceMinU(a.Uints()) return case Uint8: retVal = SliceMinU8(a.Uint8s()) return case Uint16: retVal = SliceMinU16(a.Uint16s()) return case Uint32: retVal = SliceMinU32(a.Uint32s()) return case Uint64: retVal = SliceMinU64(a.Uint64s()) return case Float32: retVal = SliceMinF32(a.Float32s()) return case Float64: retVal = SliceMinF64(a.Float64s()) return default: err = errors.Errorf("Cannot perform Min on %v", t) return } } func MinMethods(t reflect.Type) (firstFn, lasFn, defaultFn interface{}, err error) { switch t { case Int: return VecMinI, SliceMinI, MinI, nil case Int8: return VecMinI8, SliceMinI8, MinI8, nil case Int16: return VecMinI16, SliceMinI16, MinI16, nil case Int32: return VecMinI32, SliceMinI32, MinI32, nil case Int64: return VecMinI64, SliceMinI64, MinI64, nil case Uint: return VecMinU, SliceMinU, MinU, nil case Uint8: return VecMinU8, SliceMinU8, MinU8, nil case Uint16: return VecMinU16, SliceMinU16, MinU16, nil case Uint32: return VecMinU32, SliceMinU32, MinU32, nil case Uint64: return VecMinU64, SliceMinU64, MinU64, nil case Float32: return VecMinF32, SliceMinF32, MinF32, nil case Float64: return VecMinF64, SliceMinF64, MinF64, nil default: return nil, nil, nil, errors.Errorf("No methods found for Min for %v", t) } } tensor-0.9.24/internal/serialization/000077500000000000000000000000001426512615100176145ustar00rootroot00000000000000tensor-0.9.24/internal/serialization/README.md000066400000000000000000000022031426512615100210700ustar00rootroot00000000000000# Serialization # This pseudopackage of sorts handles serialization. The "Canonical" serialized data structure is found in the `pb` subdirectory. # Protobuf generation Proteus needs to be installed, as does its dependencies. 1. `cd pb` 2. `rm generated*` 3. `proteus -f ../../IDLs -p gorgonia.org/tensor/internal/serialization/pb` 4. `cd ../../IDLs` 5. `find gorgonia.org/ -mindepth 2 -type f -exec mv -i '{}' . ';'` 6. `rm -rf gorgonia.org` # FlatBuffer generation 1. generate protobuf first 2. delete the `import "github.com/gogo/protobuf/gogoproto/gogo.proto";` line from the generated protobuf file 3. `flatc --proto PATH/TO/generated.proto` 4. place the `generated.fbs` file in the IDLs directory 4. restore the import line in the `generated.proto` file 5. From this directory: `flatc --go-namespace fb -g PATH/TO/generated.fbs` # Notes # `find gorgonia.org/ -mindepth 2 -type f -exec mv -i '{}' . ';'` is used to flatten and put all the stuff in the root IDLs directory. # The Serialization Story # To serialize, we copy/convert/coerce the data to the internal/serialization data structures, then call the `Marshall` methods from theretensor-0.9.24/internal/serialization/doc.go000066400000000000000000000001361426512615100207100ustar00rootroot00000000000000// package serialization provides the data structures for serialization package serialization tensor-0.9.24/internal/serialization/fb/000077500000000000000000000000001426512615100202035ustar00rootroot00000000000000tensor-0.9.24/internal/serialization/fb/AP.go000066400000000000000000000050041426512615100210310ustar00rootroot00000000000000// automatically generated by the FlatBuffers compiler, do not modify package fb import ( flatbuffers "github.com/google/flatbuffers/go" ) type AP struct { _tab flatbuffers.Table } func GetRootAsAP(buf []byte, offset flatbuffers.UOffsetT) *AP { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &AP{} x.Init(buf, n+offset) return x } func (rcv *AP) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *AP) Table() flatbuffers.Table { return rcv._tab } func (rcv *AP) Shape(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *AP) ShapeLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *AP) Strides(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *AP) StridesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *AP) O() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *AP) MutateO(n uint32) bool { return rcv._tab.MutateUint32Slot(8, n) } func (rcv *AP) T() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *AP) MutateT(n int32) bool { return rcv._tab.MutateInt32Slot(10, n) } func APStart(builder *flatbuffers.Builder) { builder.StartObject(4) } func APAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(shape), 0) } func APStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func APAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(strides), 0) } func APStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func APAddO(builder *flatbuffers.Builder, o uint32) { builder.PrependUint32Slot(2, o, 0) } func APAddT(builder *flatbuffers.Builder, t int32) { builder.PrependInt32Slot(3, t, 0) } func APEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } tensor-0.9.24/internal/serialization/fb/Dense.go000066400000000000000000000072121426512615100215720ustar00rootroot00000000000000// automatically generated by the FlatBuffers compiler, do not modify package fb import ( flatbuffers "github.com/google/flatbuffers/go" ) type Dense struct { _tab flatbuffers.Table } func GetRootAsDense(buf []byte, offset flatbuffers.UOffsetT) *Dense { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &Dense{} x.Init(buf, n+offset) return x } func (rcv *Dense) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *Dense) Table() flatbuffers.Table { return rcv._tab } func (rcv *Dense) Shape(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *Dense) ShapeLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Dense) Strides(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *Dense) StridesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Dense) O() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *Dense) MutateO(n uint32) bool { return rcv._tab.MutateUint32Slot(8, n) } func (rcv *Dense) T() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *Dense) MutateT(n int32) bool { return rcv._tab.MutateInt32Slot(10, n) } func (rcv *Dense) Type() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Dense) Data(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *Dense) DataLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Dense) DataBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func DenseStart(builder *flatbuffers.Builder) { builder.StartObject(6) } func DenseAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(shape), 0) } func DenseStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func DenseAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(strides), 0) } func DenseStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func DenseAddO(builder *flatbuffers.Builder, o uint32) { builder.PrependUint32Slot(2, o, 0) } func DenseAddT(builder *flatbuffers.Builder, t int32) { builder.PrependInt32Slot(3, t, 0) } func DenseAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(type_), 0) } func DenseAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(data), 0) } func DenseStartDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func DenseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } tensor-0.9.24/internal/serialization/fb/MaskedDense.go000066400000000000000000000121521426512615100227160ustar00rootroot00000000000000// automatically generated by the FlatBuffers compiler, do not modify package fb import ( flatbuffers "github.com/google/flatbuffers/go" ) type MaskedDense struct { _tab flatbuffers.Table } func GetRootAsMaskedDense(buf []byte, offset flatbuffers.UOffsetT) *MaskedDense { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &MaskedDense{} x.Init(buf, n+offset) return x } func (rcv *MaskedDense) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *MaskedDense) Table() flatbuffers.Table { return rcv._tab } func (rcv *MaskedDense) Shape(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *MaskedDense) ShapeLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *MaskedDense) Strides(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) } return 0 } func (rcv *MaskedDense) StridesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *MaskedDense) O() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *MaskedDense) MutateO(n uint32) bool { return rcv._tab.MutateUint32Slot(8, n) } func (rcv *MaskedDense) T() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *MaskedDense) MutateT(n int32) bool { return rcv._tab.MutateInt32Slot(10, n) } func (rcv *MaskedDense) Type() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *MaskedDense) Data(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *MaskedDense) DataLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *MaskedDense) DataBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *MaskedDense) Mask(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *MaskedDense) MaskLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *MaskedDense) MaskIsSoft(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *MaskedDense) MaskIsSoftLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func MaskedDenseStart(builder *flatbuffers.Builder) { builder.StartObject(8) } func MaskedDenseAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(shape), 0) } func MaskedDenseStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MaskedDenseAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(strides), 0) } func MaskedDenseStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MaskedDenseAddO(builder *flatbuffers.Builder, o uint32) { builder.PrependUint32Slot(2, o, 0) } func MaskedDenseAddT(builder *flatbuffers.Builder, t int32) { builder.PrependInt32Slot(3, t, 0) } func MaskedDenseAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(type_), 0) } func MaskedDenseAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(data), 0) } func MaskedDenseStartDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MaskedDenseAddMask(builder *flatbuffers.Builder, mask flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(mask), 0) } func MaskedDenseStartMaskVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MaskedDenseAddMaskIsSoft(builder *flatbuffers.Builder, maskIsSoft flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(maskIsSoft), 0) } func MaskedDenseStartMaskIsSoftVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MaskedDenseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } tensor-0.9.24/internal/serialization/fb/Triangle.go000066400000000000000000000005261426512615100223020ustar00rootroot00000000000000// automatically generated by the FlatBuffers compiler, do not modify package fb const ( TriangleNOT_TRIANGLE = 0 TriangleUPPER = 1 TriangleLOWER = 2 TriangleSYMMETRIC = 3 ) var EnumNamesTriangle = map[int]string{ TriangleNOT_TRIANGLE:"NOT_TRIANGLE", TriangleUPPER:"UPPER", TriangleLOWER:"LOWER", TriangleSYMMETRIC:"SYMMETRIC", } tensor-0.9.24/internal/serialization/pb/000077500000000000000000000000001426512615100202155ustar00rootroot00000000000000tensor-0.9.24/internal/serialization/pb/dense.go000066400000000000000000000011641426512615100216440ustar00rootroot00000000000000package pb //proteus:generate type DataOrder byte // the reason for spreading the states out is because proteaus cannot handle non-iota tates const ( RowMajorContiguous = iota RowMajorNonContiguous ColMajorContiguous ColMajorNonContiguous ) //proteus:generate type Triangle byte const ( NotTriangle Triangle = iota Upper Lower Symmetric ) //proteus:generate type AP struct { Shape []int32 Strides []int32 O DataOrder T Triangle } //proteus:generate type Dense struct { AP Type string // type name Data []byte } //proteus:generate type MaskedDense struct { Dense Mask []bool MaskIsSoft []bool } tensor-0.9.24/internal/serialization/pb/generated.pb.go000066400000000000000000001017011426512615100231020ustar00rootroot00000000000000// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gorgonia.org/tensor/internal/serialization/pb/generated.proto /* Package pb is a generated protocol buffer package. It is generated from these files: gorgonia.org/tensor/internal/serialization/pb/generated.proto It has these top-level messages: AP Dense MaskedDense */ package pb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package var Triangle_name = map[int32]string{ 0: "NOT_TRIANGLE", 1: "UPPER", 2: "LOWER", 3: "SYMMETRIC", } var Triangle_value = map[string]int32{ "NOT_TRIANGLE": 0, "UPPER": 1, "LOWER": 2, "SYMMETRIC": 3, } func (Triangle) EnumDescriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AP) Reset() { *m = AP{} } func (m *AP) String() string { return proto.CompactTextString(m) } func (*AP) ProtoMessage() {} func (*AP) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *Dense) Reset() { *m = Dense{} } func (m *Dense) String() string { return proto.CompactTextString(m) } func (*Dense) ProtoMessage() {} func (*Dense) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *MaskedDense) Reset() { *m = MaskedDense{} } func (m *MaskedDense) String() string { return proto.CompactTextString(m) } func (*MaskedDense) ProtoMessage() {} func (*MaskedDense) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { proto.RegisterType((*AP)(nil), "gorgonia.org.tensor.internal.serialization.pb.AP") proto.RegisterType((*Dense)(nil), "gorgonia.org.tensor.internal.serialization.pb.Dense") proto.RegisterType((*MaskedDense)(nil), "gorgonia.org.tensor.internal.serialization.pb.MaskedDense") proto.RegisterEnum("gorgonia.org.tensor.internal.serialization.pb.Triangle", Triangle_name, Triangle_value) } func (m *AP) Marshal() (dAtA []byte, err error) { size := m.ProtoSize() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *AP) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Shape) > 0 { dAtA2 := make([]byte, len(m.Shape)*10) var j1 int for _, num1 := range m.Shape { num := uint64(num1) for num >= 1<<7 { dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j1++ } dAtA2[j1] = uint8(num) j1++ } dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(j1)) i += copy(dAtA[i:], dAtA2[:j1]) } if len(m.Strides) > 0 { dAtA4 := make([]byte, len(m.Strides)*10) var j3 int for _, num1 := range m.Strides { num := uint64(num1) for num >= 1<<7 { dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j3++ } dAtA4[j3] = uint8(num) j3++ } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(j3)) i += copy(dAtA[i:], dAtA4[:j3]) } if m.O != 0 { dAtA[i] = 0x18 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.O)) } if m.T != 0 { dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.T)) } return i, nil } func (m *Dense) Marshal() (dAtA []byte, err error) { size := m.ProtoSize() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Dense) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Shape) > 0 { dAtA6 := make([]byte, len(m.Shape)*10) var j5 int for _, num1 := range m.Shape { num := uint64(num1) for num >= 1<<7 { dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j5++ } dAtA6[j5] = uint8(num) j5++ } dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(j5)) i += copy(dAtA[i:], dAtA6[:j5]) } if len(m.Strides) > 0 { dAtA8 := make([]byte, len(m.Strides)*10) var j7 int for _, num1 := range m.Strides { num := uint64(num1) for num >= 1<<7 { dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j7++ } dAtA8[j7] = uint8(num) j7++ } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(j7)) i += copy(dAtA[i:], dAtA8[:j7]) } if m.O != 0 { dAtA[i] = 0x18 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.O)) } if m.T != 0 { dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.T)) } if len(m.Type) > 0 { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i += copy(dAtA[i:], m.Type) } if len(m.Data) > 0 { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) i += copy(dAtA[i:], m.Data) } return i, nil } func (m *MaskedDense) Marshal() (dAtA []byte, err error) { size := m.ProtoSize() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MaskedDense) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Shape) > 0 { dAtA10 := make([]byte, len(m.Shape)*10) var j9 int for _, num1 := range m.Shape { num := uint64(num1) for num >= 1<<7 { dAtA10[j9] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j9++ } dAtA10[j9] = uint8(num) j9++ } dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(j9)) i += copy(dAtA[i:], dAtA10[:j9]) } if len(m.Strides) > 0 { dAtA12 := make([]byte, len(m.Strides)*10) var j11 int for _, num1 := range m.Strides { num := uint64(num1) for num >= 1<<7 { dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j11++ } dAtA12[j11] = uint8(num) j11++ } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(j11)) i += copy(dAtA[i:], dAtA12[:j11]) } if m.O != 0 { dAtA[i] = 0x18 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.O)) } if m.T != 0 { dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.T)) } if len(m.Type) > 0 { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i += copy(dAtA[i:], m.Type) } if len(m.Data) > 0 { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) i += copy(dAtA[i:], m.Data) } if len(m.Mask) > 0 { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mask))) for _, b := range m.Mask { if b { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } } if len(m.MaskIsSoft) > 0 { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaskIsSoft))) for _, b := range m.MaskIsSoft { if b { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } } return i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *AP) ProtoSize() (n int) { var l int _ = l if len(m.Shape) > 0 { l = 0 for _, e := range m.Shape { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if len(m.Strides) > 0 { l = 0 for _, e := range m.Strides { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if m.O != 0 { n += 1 + sovGenerated(uint64(m.O)) } if m.T != 0 { n += 1 + sovGenerated(uint64(m.T)) } return n } func (m *Dense) ProtoSize() (n int) { var l int _ = l if len(m.Shape) > 0 { l = 0 for _, e := range m.Shape { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if len(m.Strides) > 0 { l = 0 for _, e := range m.Strides { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if m.O != 0 { n += 1 + sovGenerated(uint64(m.O)) } if m.T != 0 { n += 1 + sovGenerated(uint64(m.T)) } l = len(m.Type) if l > 0 { n += 1 + l + sovGenerated(uint64(l)) } l = len(m.Data) if l > 0 { n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *MaskedDense) ProtoSize() (n int) { var l int _ = l if len(m.Shape) > 0 { l = 0 for _, e := range m.Shape { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if len(m.Strides) > 0 { l = 0 for _, e := range m.Strides { l += sovGenerated(uint64(e)) } n += 1 + sovGenerated(uint64(l)) + l } if m.O != 0 { n += 1 + sovGenerated(uint64(m.O)) } if m.T != 0 { n += 1 + sovGenerated(uint64(m.T)) } l = len(m.Type) if l > 0 { n += 1 + l + sovGenerated(uint64(l)) } l = len(m.Data) if l > 0 { n += 1 + l + sovGenerated(uint64(l)) } if len(m.Mask) > 0 { n += 1 + sovGenerated(uint64(len(m.Mask))) + len(m.Mask)*1 } if len(m.MaskIsSoft) > 0 { n += 1 + sovGenerated(uint64(len(m.MaskIsSoft))) + len(m.MaskIsSoft)*1 } return n } func sovGenerated(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *AP) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AP: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AP: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Shape", wireType) } case 2: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Strides", wireType) } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field O", wireType) } m.O = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.O |= (DataOrder(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field T", wireType) } m.T = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.T |= (Triangle(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Dense) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Dense: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Dense: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Shape", wireType) } case 2: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Strides", wireType) } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field O", wireType) } m.O = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.O |= (DataOrder(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field T", wireType) } m.T = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.T |= (Triangle(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) if m.Data == nil { m.Data = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MaskedDense) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MaskedDense: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MaskedDense: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Shape = append(m.Shape, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Shape", wireType) } case 2: if wireType == 0 { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Strides = append(m.Strides, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Strides", wireType) } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field O", wireType) } m.O = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.O |= (DataOrder(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field T", wireType) } m.T = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.T |= (Triangle(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) if m.Data == nil { m.Data = []byte{} } iNdEx = postIndex case 7: if wireType == 0 { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Mask = append(m.Mask, bool(v != 0)) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Mask = append(m.Mask, bool(v != 0)) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Mask", wireType) } case 8: if wireType == 0 { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.MaskIsSoft = append(m.MaskIsSoft, bool(v != 0)) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.MaskIsSoft = append(m.MaskIsSoft, bool(v != 0)) } } else { return fmt.Errorf("proto: wrong wireType = %d for field MaskIsSoft", wireType) } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("gorgonia.org/tensor/internal/serialization/pb/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ // 482 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x93, 0x4f, 0x6b, 0x13, 0x41, 0x18, 0xc6, 0x77, 0x92, 0x6e, 0x9b, 0x4c, 0x13, 0x0d, 0x43, 0x0f, 0x43, 0x94, 0xcd, 0xd8, 0xd3, 0x22, 0x74, 0x17, 0xf4, 0x20, 0x08, 0x1e, 0x5a, 0x1b, 0x24, 0x90, 0x7f, 0x4c, 0x52, 0x44, 0x2f, 0x61, 0xb6, 0x3b, 0xd9, 0x0e, 0x4d, 0x76, 0x96, 0x99, 0x29, 0x52, 0xef, 0x42, 0xcd, 0x27, 0xf0, 0x12, 0xa8, 0xda, 0x83, 0x1f, 0xc3, 0xa3, 0x17, 0xc1, 0x4f, 0x20, 0x92, 0x7e, 0x01, 0xcf, 0x9e, 0x64, 0x27, 0x44, 0xe2, 0xd1, 0x9b, 0x3d, 0xcd, 0xf3, 0xfc, 0x66, 0x9e, 0x77, 0xde, 0x97, 0x61, 0xe0, 0x93, 0x44, 0xaa, 0x44, 0xa6, 0x82, 0x05, 0x52, 0x25, 0xa1, 0xe1, 0xa9, 0x96, 0x2a, 0x14, 0xa9, 0xe1, 0x2a, 0x65, 0x93, 0x50, 0x73, 0x25, 0xd8, 0x44, 0xbc, 0x66, 0x46, 0xc8, 0x34, 0xcc, 0xa2, 0x30, 0xe1, 0x29, 0x57, 0xcc, 0xf0, 0x38, 0xc8, 0x94, 0x34, 0x12, 0xed, 0xad, 0xc7, 0x83, 0x65, 0x3c, 0x58, 0xc5, 0x83, 0xbf, 0xe2, 0x41, 0x16, 0xd5, 0xf7, 0x12, 0x61, 0x4e, 0xce, 0xa2, 0xe0, 0x58, 0x4e, 0xc3, 0x44, 0x26, 0x32, 0xb4, 0x55, 0xa2, 0xb3, 0xb1, 0x75, 0xd6, 0x58, 0xb5, 0xac, 0xbe, 0xfb, 0x01, 0xc0, 0xc2, 0x7e, 0x1f, 0xed, 0x40, 0x57, 0x9f, 0xb0, 0x8c, 0x63, 0x40, 0x8a, 0xbe, 0x4b, 0x97, 0x06, 0x61, 0xb8, 0xa5, 0x8d, 0x12, 0x31, 0xd7, 0xb8, 0x60, 0xf9, 0xca, 0xa2, 0x3b, 0x10, 0x48, 0x5c, 0x24, 0xc0, 0xaf, 0x1e, 0x54, 0x7f, 0x7d, 0x6f, 0x94, 0x0f, 0x99, 0x61, 0x3d, 0x15, 0x73, 0x45, 0x81, 0x44, 0x4d, 0x08, 0x0c, 0xde, 0x20, 0xc0, 0xbf, 0xf5, 0xe0, 0x51, 0xf0, 0x4f, 0xdd, 0x07, 0x43, 0x25, 0x58, 0x9a, 0x4c, 0x38, 0x05, 0xe6, 0x71, 0xe9, 0xe2, 0xb2, 0xe1, 0xfc, 0x7c, 0xdf, 0x70, 0x76, 0xbf, 0x02, 0xe8, 0x1e, 0xf2, 0x54, 0xf3, 0xff, 0xb1, 0x4f, 0x84, 0xe0, 0x86, 0x39, 0xcf, 0x38, 0x76, 0x09, 0xf0, 0xcb, 0xd4, 0xea, 0x9c, 0xc5, 0xcc, 0x30, 0xbc, 0x49, 0x80, 0x5f, 0xa1, 0x56, 0xaf, 0xcd, 0xf3, 0xb6, 0x00, 0xb7, 0x3b, 0x4c, 0x9f, 0xf2, 0xf8, 0xc6, 0x4f, 0x95, 0xb3, 0x29, 0xd3, 0xa7, 0x78, 0x8b, 0x14, 0xfd, 0x12, 0xb5, 0x1a, 0x11, 0x58, 0xc9, 0xd7, 0x91, 0xd0, 0x23, 0x2d, 0xc7, 0x06, 0x97, 0xec, 0x1e, 0xcc, 0x59, 0x4b, 0x0f, 0xe4, 0x78, 0xed, 0x6d, 0xef, 0xbf, 0x01, 0xb0, 0xb4, 0xba, 0x17, 0xdd, 0x83, 0x95, 0x6e, 0x6f, 0x38, 0x1a, 0xd2, 0xd6, 0x7e, 0xf7, 0x59, 0xbb, 0x59, 0x73, 0xea, 0xb7, 0x67, 0x73, 0xb2, 0xdd, 0x95, 0xe6, 0xcf, 0x91, 0x1d, 0xe8, 0x1e, 0xf5, 0xfb, 0x4d, 0x5a, 0x03, 0xf5, 0xf2, 0x6c, 0x4e, 0xdc, 0xa3, 0x2c, 0xe3, 0x2a, 0xa7, 0xed, 0xde, 0xf3, 0x26, 0xad, 0x15, 0x96, 0xb4, 0x2d, 0x5f, 0x71, 0x85, 0xee, 0xc2, 0xf2, 0xe0, 0x45, 0xa7, 0xd3, 0x1c, 0xd2, 0xd6, 0xd3, 0x5a, 0xb1, 0x5e, 0x9d, 0xcd, 0x49, 0x79, 0x70, 0x3e, 0x9d, 0x72, 0xa3, 0xc4, 0x71, 0xbd, 0x72, 0xf1, 0xd1, 0x73, 0x3e, 0x5d, 0x79, 0xce, 0xe7, 0x2b, 0xcf, 0x39, 0xc0, 0x5f, 0x16, 0x1e, 0xf8, 0xb6, 0xf0, 0xc0, 0x8f, 0x85, 0xe7, 0xbc, 0xbb, 0xf6, 0x9c, 0xcb, 0x6b, 0x0f, 0xbc, 0x2c, 0x64, 0x51, 0xb4, 0x69, 0x7f, 0xca, 0xc3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xff, 0xbb, 0x8f, 0xc8, 0x03, 0x00, 0x00, } tensor-0.9.24/internal/storage/000077500000000000000000000000001426512615100164035ustar00rootroot00000000000000tensor-0.9.24/internal/storage/consts.go000066400000000000000000000017001426512615100202410ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package storage import ( "reflect" "unsafe" ) var ( bType = reflect.TypeOf(bool(false)) iType = reflect.TypeOf(int(0)) i8Type = reflect.TypeOf(int8(0)) i16Type = reflect.TypeOf(int16(0)) i32Type = reflect.TypeOf(int32(0)) i64Type = reflect.TypeOf(int64(0)) uType = reflect.TypeOf(uint(0)) u8Type = reflect.TypeOf(uint8(0)) u16Type = reflect.TypeOf(uint16(0)) u32Type = reflect.TypeOf(uint32(0)) u64Type = reflect.TypeOf(uint64(0)) uintptrType = reflect.TypeOf(uintptr(0)) f32Type = reflect.TypeOf(float32(0)) f64Type = reflect.TypeOf(float64(0)) c64Type = reflect.TypeOf(complex64(0)) c128Type = reflect.TypeOf(complex128(0)) strType = reflect.TypeOf(string("")) unsafePointerType = reflect.TypeOf(unsafe.Pointer(nil)) ) tensor-0.9.24/internal/storage/getset.go000066400000000000000000000117441426512615100202340ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package storage import "unsafe" /* bool */ func (h *Header) Bools() []bool { return (*(*[]bool)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(bType):h.TypedLen(bType)] } func (h *Header) SetB(i int, x bool) { h.Bools()[i] = x } func (h *Header) GetB(i int) bool { return h.Bools()[i] } /* int */ func (h *Header) Ints() []int { return (*(*[]int)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(iType):h.TypedLen(iType)] } func (h *Header) SetI(i int, x int) { h.Ints()[i] = x } func (h *Header) GetI(i int) int { return h.Ints()[i] } /* int8 */ func (h *Header) Int8s() []int8 { return (*(*[]int8)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i8Type):h.TypedLen(i8Type)] } func (h *Header) SetI8(i int, x int8) { h.Int8s()[i] = x } func (h *Header) GetI8(i int) int8 { return h.Int8s()[i] } /* int16 */ func (h *Header) Int16s() []int16 { return (*(*[]int16)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i16Type):h.TypedLen(i16Type)] } func (h *Header) SetI16(i int, x int16) { h.Int16s()[i] = x } func (h *Header) GetI16(i int) int16 { return h.Int16s()[i] } /* int32 */ func (h *Header) Int32s() []int32 { return (*(*[]int32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i32Type):h.TypedLen(i32Type)] } func (h *Header) SetI32(i int, x int32) { h.Int32s()[i] = x } func (h *Header) GetI32(i int) int32 { return h.Int32s()[i] } /* int64 */ func (h *Header) Int64s() []int64 { return (*(*[]int64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(i64Type):h.TypedLen(i64Type)] } func (h *Header) SetI64(i int, x int64) { h.Int64s()[i] = x } func (h *Header) GetI64(i int) int64 { return h.Int64s()[i] } /* uint */ func (h *Header) Uints() []uint { return (*(*[]uint)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(uType):h.TypedLen(uType)] } func (h *Header) SetU(i int, x uint) { h.Uints()[i] = x } func (h *Header) GetU(i int) uint { return h.Uints()[i] } /* uint8 */ func (h *Header) Uint8s() []uint8 { return (*(*[]uint8)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u8Type):h.TypedLen(u8Type)] } func (h *Header) SetU8(i int, x uint8) { h.Uint8s()[i] = x } func (h *Header) GetU8(i int) uint8 { return h.Uint8s()[i] } /* uint16 */ func (h *Header) Uint16s() []uint16 { return (*(*[]uint16)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u16Type):h.TypedLen(u16Type)] } func (h *Header) SetU16(i int, x uint16) { h.Uint16s()[i] = x } func (h *Header) GetU16(i int) uint16 { return h.Uint16s()[i] } /* uint32 */ func (h *Header) Uint32s() []uint32 { return (*(*[]uint32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u32Type):h.TypedLen(u32Type)] } func (h *Header) SetU32(i int, x uint32) { h.Uint32s()[i] = x } func (h *Header) GetU32(i int) uint32 { return h.Uint32s()[i] } /* uint64 */ func (h *Header) Uint64s() []uint64 { return (*(*[]uint64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(u64Type):h.TypedLen(u64Type)] } func (h *Header) SetU64(i int, x uint64) { h.Uint64s()[i] = x } func (h *Header) GetU64(i int) uint64 { return h.Uint64s()[i] } /* uintptr */ func (h *Header) Uintptrs() []uintptr { return (*(*[]uintptr)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(uintptrType):h.TypedLen(uintptrType)] } func (h *Header) SetUintptr(i int, x uintptr) { h.Uintptrs()[i] = x } func (h *Header) GetUintptr(i int) uintptr { return h.Uintptrs()[i] } /* float32 */ func (h *Header) Float32s() []float32 { return (*(*[]float32)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(f32Type):h.TypedLen(f32Type)] } func (h *Header) SetF32(i int, x float32) { h.Float32s()[i] = x } func (h *Header) GetF32(i int) float32 { return h.Float32s()[i] } /* float64 */ func (h *Header) Float64s() []float64 { return (*(*[]float64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(f64Type):h.TypedLen(f64Type)] } func (h *Header) SetF64(i int, x float64) { h.Float64s()[i] = x } func (h *Header) GetF64(i int) float64 { return h.Float64s()[i] } /* complex64 */ func (h *Header) Complex64s() []complex64 { return (*(*[]complex64)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(c64Type):h.TypedLen(c64Type)] } func (h *Header) SetC64(i int, x complex64) { h.Complex64s()[i] = x } func (h *Header) GetC64(i int) complex64 { return h.Complex64s()[i] } /* complex128 */ func (h *Header) Complex128s() []complex128 { return (*(*[]complex128)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(c128Type):h.TypedLen(c128Type)] } func (h *Header) SetC128(i int, x complex128) { h.Complex128s()[i] = x } func (h *Header) GetC128(i int) complex128 { return h.Complex128s()[i] } /* string */ func (h *Header) Strings() []string { return (*(*[]string)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(strType):h.TypedLen(strType)] } func (h *Header) SetStr(i int, x string) { h.Strings()[i] = x } func (h *Header) GetStr(i int) string { return h.Strings()[i] } /* unsafe.Pointer */ func (h *Header) UnsafePointers() []unsafe.Pointer { return (*(*[]unsafe.Pointer)(unsafe.Pointer(&h.Raw)))[:h.TypedLen(unsafePointerType):h.TypedLen(unsafePointerType)] } func (h *Header) SetUnsafePointer(i int, x unsafe.Pointer) { h.UnsafePointers()[i] = x } func (h *Header) GetUnsafePointer(i int) unsafe.Pointer { return h.UnsafePointers()[i] } tensor-0.9.24/internal/storage/header.go000066400000000000000000000052251426512615100201660ustar00rootroot00000000000000package storage // import "gorgonia.org/tensor/internal/storage" import ( "reflect" "unsafe" ) // Header is runtime representation of a slice. It's a cleaner version of reflect.SliceHeader. // With this, we wouldn't need to keep the uintptr. // This usually means additional pressure for the GC though, especially when passing around Headers type Header struct { Raw []byte } // TypedLen returns the length of data as if it was a slice of type t func (h *Header) TypedLen(t reflect.Type) int { return len(h.Raw) / int(t.Size()) } func Copy(t reflect.Type, dst, src *Header) int { copied := copy(dst.Raw, src.Raw) return copied / int(t.Size()) } func CopySliced(t reflect.Type, dst *Header, dstart, dend int, src *Header, sstart, send int) int { dstBA := dst.Raw srcBA := src.Raw size := int(t.Size()) ds := dstart * size de := dend * size ss := sstart * size se := send * size copied := copy(dstBA[ds:de], srcBA[ss:se]) return copied / size } func SwapCopy(a, b *Header) { for i := range a.Raw { a.Raw[i], b.Raw[i] = b.Raw[i], a.Raw[i] } } func Fill(t reflect.Type, dst, src *Header) int { dstBA := dst.Raw srcBA := src.Raw size := int(t.Size()) lenSrc := len(srcBA) dstart := 0 for { copied := copy(dstBA[dstart:], srcBA) dstart += copied if copied < lenSrc { break } } return dstart / size } func CopyIter(t reflect.Type, dst, src *Header, diter, siter Iterator) int { dstBA := dst.Raw srcBA := src.Raw size := int(t.Size()) var idx, jdx, i, j, count int var err error for { if idx, err = diter.Next(); err != nil { if err = handleNoOp(err); err != nil { panic(err) } break } if jdx, err = siter.Next(); err != nil { if err = handleNoOp(err); err != nil { panic(err) } break } i = idx * size j = jdx * size copy(dstBA[i:i+size], srcBA[j:j+size]) // dstBA[i : i+size] = srcBA[j : j+size] count++ } return count } // Element gets the pointer of ith element func ElementAt(i int, base unsafe.Pointer, typeSize uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + uintptr(i)*typeSize) } // AsByteSlice takes a slice of anything and returns a casted-as-byte-slice view of it. // This function panics if input is not a slice. func AsByteSlice(x interface{}) []byte { xV := reflect.ValueOf(x) xT := reflect.TypeOf(x).Elem() // expects a []T hdr := reflect.SliceHeader{ Data: xV.Pointer(), Len: xV.Len() * int(xT.Size()), Cap: xV.Cap() * int(xT.Size()), } return *(*[]byte)(unsafe.Pointer(&hdr)) } func FromMemory(ptr uintptr, memsize uintptr) []byte { hdr := reflect.SliceHeader{ Data: ptr, Len: int(memsize), Cap: int(memsize), } return *(*[]byte)(unsafe.Pointer(&hdr)) } tensor-0.9.24/internal/storage/header_test.go000066400000000000000000000031771426512615100212310ustar00rootroot00000000000000package storage import ( "github.com/stretchr/testify/assert" "reflect" "testing" ) func TestCopy(t *testing.T) { // A longer than B a := headerFromSlice([]int{0, 1, 2, 3, 4}) b := headerFromSlice([]int{10, 11}) copied := Copy(reflect.TypeOf(1), &a, &b) assert.Equal(t, 2, copied) assert.Equal(t, []int{10, 11, 2, 3, 4}, a.Ints()) // B longer than A a = headerFromSlice([]int{10, 11}) b = headerFromSlice([]int{0, 1, 2, 3, 4}) copied = Copy(reflect.TypeOf(1), &a, &b) assert.Equal(t, 2, copied) assert.Equal(t, []int{0, 1}, a.Ints()) // A is empty a = headerFromSlice([]int{}) b = headerFromSlice([]int{0, 1, 2, 3, 4}) copied = Copy(reflect.TypeOf(1), &a, &b) assert.Equal(t, 0, copied) // B is empty a = headerFromSlice([]int{0, 1, 2, 3, 4}) b = headerFromSlice([]int{}) copied = Copy(reflect.TypeOf(1), &a, &b) assert.Equal(t, 0, copied) assert.Equal(t, []int{0, 1, 2, 3, 4}, a.Ints()) } func TestFill(t *testing.T) { // A longer than B a := headerFromSlice([]int{0, 1, 2, 3, 4}) b := headerFromSlice([]int{10, 11}) copied := Fill(reflect.TypeOf(1), &a, &b) assert.Equal(t, 5, copied) assert.Equal(t, []int{10, 11, 10, 11, 10}, a.Ints()) // B longer than A a = headerFromSlice([]int{10, 11}) b = headerFromSlice([]int{0, 1, 2, 3, 4}) copied = Fill(reflect.TypeOf(1), &a, &b) assert.Equal(t, 2, copied) assert.Equal(t, []int{0, 1}, a.Ints()) } func headerFromSlice(x interface{}) Header { xT := reflect.TypeOf(x) if xT.Kind() != reflect.Slice { panic("Expected a slice") } xV := reflect.ValueOf(x) size := uintptr(xV.Len()) * xT.Elem().Size() return Header{ Raw: FromMemory(xV.Pointer(), size), } } tensor-0.9.24/internal/storage/keepsync.go000066400000000000000000000010241426512615100205500ustar00rootroot00000000000000package storage // Iterator is the generic iterator interface type Iterator interface { Start() (int, error) Next() (int, error) NextValidity() (int, bool, error) NextValid() (int, int, error) NextInvalid() (int, int, error) Reset() SetReverse() SetForward() Coord() []int Done() bool } // NoOpError is a useful for operations that have no op. type NoOpError interface { NoOp() bool } func handleNoOp(err error) error { if err == nil { return nil } if _, ok := err.(NoOpError); ok { return nil } return err } tensor-0.9.24/internal/storage/keepsync_test.go000066400000000000000000000006741426512615100216210ustar00rootroot00000000000000package storage import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "testing" ) type noopError struct{} func (e noopError) NoOp() bool { return true } func (e noopError) Error() string { return "NoOp" } func TestHandleNoOp(t *testing.T) { otherErr := errors.New("other error") assert.Equal(t, nil, handleNoOp(noopError{})) assert.Equal(t, nil, handleNoOp(nil)) assert.Equal(t, otherErr, handleNoOp(otherErr)) } tensor-0.9.24/iterator.go000066400000000000000000000377471426512615100153250ustar00rootroot00000000000000package tensor import ( "runtime" ) func requiresOrderedIterator(e Engine, t Tensor) bool { if t.IsScalar() { return false } if t.RequiresIterator() { return true } switch tt := t.(type) { case DenseTensor: return !e.WorksWith(tt.DataOrder()) case SparseTensor: return true } panic("Unreachable") } // Iterator is the generic iterator interface. // It's used to iterate across multi-dimensional slices, no matter the underlying data arrangement type Iterator interface { // Start returns the first index Start() (int, error) // Next returns the next index. Next is defined as the next value in the coordinates // For example: let x be a (5,5) matrix that is row-major. Current index is for the coordinate (3,3). // Next() returns the index of (3,4). // // If there is no underlying data store for (3,4) - say for example, the matrix is a sparse matrix, it return an error. // If however, there is an underlying data store for (3,4), but it's not valid (for example, masked tensors), it will not return an error. // // Second example: let x be a (5,5) matrix that is col-major. Current index is for coordinate (3,3). // Next() returns the index of (4,3). Next() (int, error) // NextValidity is like Next, but returns the validity of the value at the index as well. NextValidity() (int, bool, error) // NextValid returns the next valid index, as well as a skip count. NextValid() (int, int, error) // NextInvalid returns the next invalid index, as well as a skip count. NextInvalid() (int, int, error) // Reset resets the iterator Reset() // SetReverse tells the iterator to iterate in reverse SetReverse() // SetForward tells the iterator to iterate forwards SetForward() // Coord returns the coordinates Coord() []int // Done returns true when the iterator is done iterating. Done() bool // Shape returns the shape of the multidimensional tensor it's iterating on. Shape() Shape } // NewIterator creates a new Iterator from an ap. The type of iterator depends on number of // aps passed, and whether they are masked or not func NewIterator(aps ...*AP) Iterator { switch len(aps) { case 0: return nil case 1: return newFlatIterator(aps[0]) default: return NewMultIterator(aps...) } } // IteratorFromDense creates a new Iterator from a list of dense tensors func IteratorFromDense(tts ...DenseTensor) Iterator { switch len(tts) { case 0: return nil case 1: if mt, ok := tts[0].(MaskedTensor); ok && mt.IsMasked() { return FlatMaskedIteratorFromDense(mt) } return FlatIteratorFromDense(tts[0]) default: return MultIteratorFromDense(tts...) } } func destroyIterator(it Iterator) { switch itt := it.(type) { case *MultIterator: destroyMultIterator(itt) } } func iteratorLoadAP(it Iterator, ap *AP) { switch itt := it.(type) { case *FlatIterator: itt.AP = ap case *FlatMaskedIterator: itt.AP = ap case *MultIterator: // Do nothing, TODO: perhaps add something here } } /* FLAT ITERATOR */ // FlatIterator is an iterator that iterates over Tensors according to the data's layout. // It utilizes the *AP of a Tensor to determine what the next index is. // This data structure is similar to Numpy's flatiter, with some standard Go based restrictions of course // (such as, not allowing negative indices) type FlatIterator struct { *AP //state track []int nextIndex int lastIndex int size int done bool veclikeDim int // the dimension of a vectorlike shape that is not a 1. reverse bool // if true, iterator starts at end of array and runs backwards isScalar bool isVector bool outerFirst bool } // newFlatIterator creates a new FlatIterator. func newFlatIterator(ap *AP) *FlatIterator { var dim int if ap.IsVectorLike() { for d, i := range ap.shape { if i != 1 { dim = d break } } } return &FlatIterator{ AP: ap, track: make([]int, len(ap.shape)), size: ap.shape.TotalSize(), veclikeDim: dim, isScalar: ap.IsScalar(), isVector: ap.IsVectorLike(), } } // FlatIteratorFromDense creates a new FlatIterator from a dense tensor func FlatIteratorFromDense(tt DenseTensor) *FlatIterator { return newFlatIterator(tt.Info()) } // SetReverse initializes iterator to run backwards func (it *FlatIterator) SetReverse() { it.reverse = true it.Reset() return } // SetForward initializes iterator to run forwards func (it *FlatIterator) SetForward() { it.reverse = false it.Reset() return } //Start begins iteration func (it *FlatIterator) Start() (int, error) { it.Reset() return it.Next() } //Done checks whether iterators are done func (it *FlatIterator) Done() bool { return it.done } // Next returns the index of the current coordinate. func (it *FlatIterator) Next() (int, error) { if it.done { return -1, noopError{} } switch { case it.isScalar: it.done = true return 0, nil case it.isVector: if it.reverse { return it.singlePrevious() } return it.singleNext() default: if it.reverse { return it.ndPrevious() } if it.outerFirst { return it.colMajorNDNext() } return it.ndNext() } } // NextValidity returns the index of the current coordinate, and whether or not it's valid. Identical to Next() func (it *FlatIterator) NextValidity() (int, bool, error) { i, err := it.Next() return i, true, err } // NextValid returns the index of the current coordinate. Identical to Next for FlatIterator // Also returns the number of increments to get to next element ( 1, or -1 in reverse case). This is to maintain // consistency with the masked iterator, for which the step between valid elements can be more than 1 func (it *FlatIterator) NextValid() (int, int, error) { if it.done { return -1, 1, noopError{} } switch { case it.isScalar: it.done = true return 0, 0, nil case it.isVector: if it.reverse { a, err := it.singlePrevious() return a, -1, err } a, err := it.singleNext() return a, 1, err default: if it.reverse { a, err := it.ndPrevious() return a, -1, err } if it.outerFirst { a, err := it.colMajorNDNext() return a, 1, err } a, err := it.ndNext() return a, 1, err } } // NextInvalid returns the index of the current coordinate. Identical to Next for FlatIterator // also returns the number of increments to get to next invalid element (1 or -1 in reverse case). // Like NextValid, this method's purpose is to maintain consistency with the masked iterator, // for which the step between invalid elements can be anywhere from 0 to the tensor's length func (it *FlatIterator) NextInvalid() (int, int, error) { if it.reverse { return -1, -it.lastIndex, noopError{} } return -1, it.Size() - it.lastIndex, noopError{} } func (it *FlatIterator) singleNext() (int, error) { it.lastIndex = it.nextIndex it.nextIndex++ var tracked int it.track[it.veclikeDim]++ tracked = it.track[it.veclikeDim] if tracked >= it.size { it.done = true } return it.lastIndex, nil } func (it *FlatIterator) singlePrevious() (int, error) { it.lastIndex = it.nextIndex it.nextIndex-- var tracked int it.track[it.veclikeDim]-- tracked = it.track[it.veclikeDim] if tracked < 0 { it.done = true } return it.lastIndex, nil } func (it *FlatIterator) ndNext() (int, error) { // the reason for this weird looking bits of code is because the SSA compiler doesn't // know how to optimize for this bit of code, not keeping things in registers correctly // @stuartcarnie optimized this iout to great effect v := len(it.shape) - 1 nextIndex := it.nextIndex it.lastIndex = nextIndex // the following 3 lines causes the compiler to perform bounds check here, // instead of being done in the loop coord := it.shape[:v+1] track := it.track[:v+1] strides := it.strides[:v+1] for i := v; i >= 0; i-- { track[i]++ shapeI := coord[i] strideI := strides[i] if track[i] == shapeI { if i == 0 { it.done = true } track[i] = 0 nextIndex -= (shapeI - 1) * strideI continue } nextIndex += strideI break } it.nextIndex = nextIndex return it.lastIndex, nil } func (it *FlatIterator) colMajorNDNext() (int, error) { // the reason for this weird looking bits of code is because the SSA compiler doesn't // know how to optimize for this bit of code, not keeping things in registers correctly // @stuartcarnie optimized this iout to great effect v := len(it.shape) - 1 nextIndex := it.nextIndex it.lastIndex = nextIndex // the following 3 lines causes the compiler to perform bounds check here, // instead of being done in the loop coord := it.shape[:v+1] track := it.track[:v+1] strides := it.strides[:v+1] for i := 0; i <= v; i++ { track[i]++ shapeI := coord[i] strideI := strides[i] if track[i] == shapeI { if i == v { it.done = true } track[i] = 0 nextIndex -= (shapeI - 1) * strideI continue } nextIndex += strideI break } it.nextIndex = nextIndex return it.lastIndex, nil } func (it *FlatIterator) ndPrevious() (int, error) { it.lastIndex = it.nextIndex for i := len(it.shape) - 1; i >= 0; i-- { it.track[i]-- if it.track[i] < 0 { if i == 0 { it.done = true } it.track[i] = it.shape[i] - 1 it.nextIndex += (it.shape[i] - 1) * it.strides[i] continue } it.nextIndex -= it.strides[i] break } return it.lastIndex, nil } // TODO v0.9.0 func (it *FlatIterator) colMajorNDPrevious() (int, error) { return 0, nil } // Coord returns the next coordinate. // When Next() is called, the coordinates are updated AFTER the Next() returned. // See example for more details. // // The returned coordinates is mutable. Changing any values in the return value will // change the state of the iterator func (it *FlatIterator) Coord() []int { return it.track } // Slice is a convenience function that augments func (it *FlatIterator) Slice(sli Slice) (retVal []int, err error) { var next int var nexts []int for next, err = it.Next(); err == nil; next, err = it.Next() { nexts = append(nexts, next) } if _, ok := err.(NoOpError); err != nil && !ok { return } if sli == nil { retVal = nexts return } start := sli.Start() end := sli.End() step := sli.Step() // sanity checks if err = CheckSlice(sli, len(nexts)); err != nil { return } if step < 0 { // reverse the nexts for i := len(nexts)/2 - 1; i >= 0; i-- { j := len(nexts) - 1 - i nexts[i], nexts[j] = nexts[j], nexts[i] } step = -step } // cleanup before loop if end > len(nexts) { end = len(nexts) } // nexts = nexts[:end] for i := start; i < end; i += step { retVal = append(retVal, nexts[i]) } err = nil return } // Reset resets the iterator state. func (it *FlatIterator) Reset() { it.done = false if it.reverse { for i := range it.track { it.track[i] = it.shape[i] - 1 } switch { case it.IsScalar(): it.nextIndex = 0 case it.isVector: it.nextIndex = (it.shape[0] - 1) * it.strides[0] // case it.IsRowVec(): // it.nextIndex = (it.shape[1] - 1) * it.strides[1] // case it.IsColVec(): // it.nextIndex = (it.shape[0] - 1) * it.strides[0] default: it.nextIndex = 0 for i := range it.track { it.nextIndex += (it.shape[i] - 1) * it.strides[i] } } } else { it.nextIndex = 0 for i := range it.track { it.track[i] = 0 } } } // Chan returns a channel of ints. This is useful for iterating multiple Tensors at the same time. func (it *FlatIterator) Chan() (retVal chan int) { retVal = make(chan int) go func() { for next, err := it.Next(); err == nil; next, err = it.Next() { retVal <- next } close(retVal) }() return } /* FLAT MASKED ITERATOR */ // FlatMaskedIterator is an iterator that iterates over simple masked Tensors. // It is used when the mask stride is identical to data stride with the exception of trailing zeros, // in which case the data index is always a perfect integer multiple of the mask index type FlatMaskedIterator struct { *FlatIterator mask []bool } // FlatMaskedIteratorFromDense creates a new FlatMaskedIterator from dense tensor func FlatMaskedIteratorFromDense(tt MaskedTensor) *FlatMaskedIterator { it := new(FlatMaskedIterator) runtime.SetFinalizer(it, destroyIterator) it.FlatIterator = FlatIteratorFromDense(tt) it.mask = tt.Mask() return it } func (it *FlatMaskedIterator) NextValidity() (int, bool, error) { if len(it.mask) == 0 { return it.FlatIterator.NextValidity() } var i int var err error if i, err = it.Next(); err == nil { return i, !it.mask[i], err } return -1, false, err } // NextValid returns the index of the next valid element, // as well as the number of increments to get to next element func (it *FlatMaskedIterator) NextValid() (int, int, error) { if len(it.mask) == 0 { return it.FlatIterator.NextValid() } var count int var mult = 1 if it.reverse { mult = -1 } for i, err := it.Next(); err == nil; i, err = it.Next() { count++ if !(it.mask[i]) { return i, mult * count, err } } return -1, mult * count, noopError{} } // NextInvalid returns the index of the next invalid element // as well as the number of increments to get to next invalid element func (it *FlatMaskedIterator) NextInvalid() (int, int, error) { if it.mask == nil { return it.FlatIterator.NextInvalid() } var count int var mult = 1 if it.reverse { mult = -1 } for i, err := it.Next(); err == nil; i, err = it.Next() { count++ if it.mask[i] { return i, mult * count, err } } return -1, mult * count, noopError{} } // FlatSparseIterator is an iterator that works very much in the same way as flatiterator, except for sparse tensors type FlatSparseIterator struct { *CS //state nextIndex int lastIndex int track []int done bool reverse bool } func NewFlatSparseIterator(t *CS) *FlatSparseIterator { it := new(FlatSparseIterator) it.CS = t it.track = BorrowInts(len(t.s)) return it } func (it *FlatSparseIterator) Start() (int, error) { it.Reset() return it.Next() } func (it *FlatSparseIterator) Next() (int, error) { if it.done { return -1, noopError{} } // var ok bool it.lastIndex, _ = it.at(it.track...) // increment the coordinates for i := len(it.s) - 1; i >= 0; i-- { it.track[i]++ if it.track[i] == it.s[i] { if i == 0 { it.done = true } it.track[i] = 0 continue } break } return it.lastIndex, nil } func (it *FlatSparseIterator) NextValidity() (int, bool, error) { i, err := it.Next() if i == -1 { return i, false, err } return i, true, err } func (it *FlatSparseIterator) NextValid() (int, int, error) { var i int var err error for i, err = it.Next(); err == nil && i == -1; i, err = it.Next() { } return i, -1, err } func (it *FlatSparseIterator) NextInvalid() (int, int, error) { var i int var err error for i, err = it.Next(); err == nil && i != -1; i, err = it.Next() { } return i, -1, err } func (it *FlatSparseIterator) Reset() { if it.reverse { for i := range it.track { it.track[i] = it.s[i] - 1 } } else { it.nextIndex = 0 for i := range it.track { it.track[i] = 0 } } it.done = false } func (it *FlatSparseIterator) SetReverse() { it.reverse = true it.Reset() } func (it *FlatSparseIterator) SetForward() { it.reverse = false it.Reset() } func (it *FlatSparseIterator) Coord() []int { return it.track } func (it *FlatSparseIterator) Done() bool { return it.done } /* TEMPORARILY REMOVED // SortedMultiStridePerm takes multiple input strides, and creates a sorted stride permutation. // It's based very closely on Numpy's PyArray_CreateMultiSortedStridePerm, where a stable insertion sort is used // to create the permutations. func SortedMultiStridePerm(dims int, aps []*AP) (retVal []int) { retVal = BorrowInts(dims) for i := 0; i < dims; i++ { retVal[i] = i } for i := 1; i < dims; i++ { ipos := i axisi := retVal[i] for j := i - 1; j >= 0; j-- { var ambig, swap bool ambig = true axisj := retVal[j] for _, ap := range aps { if ap.shape[axisi] != 1 && ap.shape[axisj] != 1 { if ap.strides[axisi] <= ap.strides[axisj] { swap = true } else if ambig { swap = true } ambig = false } } if !ambig && swap { ipos = j } else { break } } if ipos != i { for j := i; j > ipos; j-- { retVal[j] = retVal[j-1] } retVal[ipos] = axisi } } return } */ tensor-0.9.24/iterator_mult.go000066400000000000000000000162451426512615100163540ustar00rootroot00000000000000package tensor import ( "runtime" ) // MultIterator is an iterator that iterates over multiple tensors, including masked tensors. // It utilizes the *AP of a Tensor to determine what the next index is. // This data structure is similar to Numpy's flatiter, with some standard Go based restrictions of course // (such as, not allowing negative indices) type MultIterator struct { *AP // Uses AP of the largest tensor in list fit0 *FlatIterator //largest fit in fitArr (by AP total size) mask []bool numMasked int lastIndexArr []int shape Shape whichBlock []int fitArr []*FlatIterator strides []int size int done bool reverse bool } func genIterator(m map[int]int, strides []int, idx int) (int, bool) { key := hashIntArray(strides) f, ok := m[key] if !ok { m[key] = idx return idx, ok } return f, ok } // NewMultIterator creates a new MultIterator from a list of APs func NewMultIterator(aps ...*AP) *MultIterator { nit := len(aps) if nit < 1 { return nil } for _, ap := range aps { if ap == nil { panic("ap is nil") //TODO: Probably remove this panic } } var maxDims int var maxShape = aps[0].shape for i := range aps { if aps[i].Dims() >= maxDims { maxDims = aps[i].Dims() if aps[i].Size() > maxShape.TotalSize() { maxShape = aps[i].shape } } } it := new(MultIterator) it.whichBlock = BorrowInts(nit) it.lastIndexArr = BorrowInts(nit) it.strides = BorrowInts(nit * maxDims) shape := BorrowInts(len(maxShape)) copy(shape, maxShape) it.shape = shape for _, ap := range aps { _, err := BroadcastStrides(shape, ap.shape, it.strides[:maxDims], ap.strides) if err != nil { panic("can not broadcast strides") } } for i := range it.strides { it.strides[i] = 0 } it.fitArr = make([]*FlatIterator, nit) //TODO: Convert this make to Borrow perhaps? m := make(map[int]int) nBlocks := 0 offset := 0 for i, ap := range aps { f, ok := genIterator(m, ap.strides, nBlocks) if !ok { offset = nBlocks * maxDims apStrides, _ := BroadcastStrides(shape, ap.shape, it.strides[offset:offset+maxDims], ap.strides) copy(it.strides[offset:offset+maxDims], apStrides) ReturnInts(apStrides) // Borrowed in BroadcastStrides but returned here - dangerous pattern? nBlocks++ } ap2 := MakeAP(it.shape[:maxDims], it.strides[offset:offset+maxDims], ap.o, ap.Δ) it.whichBlock[i] = f it.fitArr[nBlocks-1] = newFlatIterator(&ap2) } it.fitArr = it.fitArr[:nBlocks] it.strides = it.strides[:nBlocks*maxDims] // fill 0s with 1s for i := range it.strides { if it.strides[i] == 0 { it.strides[i] = 1 } } it.fit0 = it.fitArr[0] for _, f := range it.fitArr { if it.fit0.size < f.size { it.fit0 = f it.AP = f.AP } } return it } // MultIteratorFromDense creates a new MultIterator from a list of dense tensors func MultIteratorFromDense(tts ...DenseTensor) *MultIterator { aps := make([]*AP, len(tts)) hasMask := BorrowBools(len(tts)) defer ReturnBools(hasMask) var masked = false numMasked := 0 for i, tt := range tts { aps[i] = tt.Info() if mt, ok := tt.(MaskedTensor); ok { hasMask[i] = mt.IsMasked() } masked = masked || hasMask[i] if hasMask[i] { numMasked++ } } it := NewMultIterator(aps...) runtime.SetFinalizer(it, destroyIterator) if masked { // create new mask slice if more than tensor is masked if numMasked > 1 { it.mask = BorrowBools(it.shape.TotalSize()) memsetBools(it.mask, false) for i, err := it.Start(); err == nil; i, err = it.Next() { for j, k := range it.lastIndexArr { if hasMask[j] { it.mask[i] = it.mask[i] || tts[j].(MaskedTensor).Mask()[k] } } } } } it.numMasked = numMasked return it } // destroyMultIterator returns any borrowed objects back to pool func destroyMultIterator(it *MultIterator) { if cap(it.whichBlock) > 0 { ReturnInts(it.whichBlock) it.whichBlock = nil } if cap(it.lastIndexArr) > 0 { ReturnInts(it.lastIndexArr) it.lastIndexArr = nil } if cap(it.strides) > 0 { ReturnInts(it.strides) it.strides = nil } if it.numMasked > 1 { if cap(it.mask) > 0 { ReturnBools(it.mask) it.mask = nil } } } // SetReverse initializes iterator to run backward func (it *MultIterator) SetReverse() { for _, f := range it.fitArr { f.SetReverse() } } // SetForward initializes iterator to run forward func (it *MultIterator) SetForward() { for _, f := range it.fitArr { f.SetForward() } } //Start begins iteration func (it *MultIterator) Start() (int, error) { it.Reset() return it.Next() } //Done checks whether iterators are done func (it *MultIterator) Done() bool { for _, f := range it.fitArr { if !f.done { it.done = false return false } } it.done = true return true } // Next returns the index of the next coordinate func (it *MultIterator) Next() (int, error) { if it.done { return -1, noopError{} } it.done = false for _, f := range it.fitArr { if _, err := f.Next(); err != nil { return -1, err } it.done = it.done || f.done } for i, j := range it.whichBlock { it.lastIndexArr[i] = it.fitArr[j].lastIndex } return it.fit0.lastIndex, nil } func (it *MultIterator) NextValidity() (int, bool, error) { i, err := it.Next() if err != nil { return i, false, err } if len(it.mask) == 0 { return i, true, err } return i, it.mask[i], err } // NextValid returns the index of the next valid coordinate func (it *MultIterator) NextValid() (int, int, error) { var invalid = true var count int var mult = 1 if it.reverse { mult = -1 } for invalid { if it.done { for i, j := range it.whichBlock { it.lastIndexArr[i] = it.fitArr[j].lastIndex } return -1, 0, noopError{} } for _, f := range it.fitArr { f.Next() it.done = it.done || f.done } count++ invalid = !it.mask[it.fit0.lastIndex] } return it.fit0.lastIndex, mult * count, nil } // NextInvalid returns the index of the next invalid coordinate func (it *MultIterator) NextInvalid() (int, int, error) { var valid = true var count = 0 var mult = 1 if it.reverse { mult = -1 } for valid { if it.done { for i, j := range it.whichBlock { it.lastIndexArr[i] = it.fitArr[j].lastIndex } return -1, 0, noopError{} } for _, f := range it.fitArr { f.Next() it.done = it.done || f.done } count++ valid = !it.mask[it.fit0.lastIndex] } return it.fit0.lastIndex, mult * count, nil } // Coord returns the next coordinate. // When Next() is called, the coordinates are updated AFTER the Next() returned. // See example for more details. func (it *MultIterator) Coord() []int { return it.fit0.track } // Reset resets the iterator state. func (it *MultIterator) Reset() { for _, f := range it.fitArr { f.Reset() } for i, j := range it.whichBlock { it.lastIndexArr[i] = it.fitArr[j].lastIndex } it.done = false } // LastIndex returns index of requested iterator func (it *MultIterator) LastIndex(j int) int { return it.lastIndexArr[j] } /* // Chan returns a channel of ints. This is useful for iterating multiple Tensors at the same time. func (it *FlatIterator) Chan() (retVal chan int) { retVal = make(chan int) go func() { for next, err := it.Next(); err == nil; next, err = it.Next() { retVal <- next } close(retVal) }() return } */ tensor-0.9.24/iterator_test.go000066400000000000000000000302531426512615100163450ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) // newAP is a helper function now func newAP(shape Shape, strides []int) *AP { ap := MakeAP(shape, strides, 0, 0) return &ap } var flatIterTests1 = []struct { shape Shape strides []int correct []int }{ {ScalarShape(), []int{}, []int{0}}, // scalar {Shape{5}, []int{1}, []int{0, 1, 2, 3, 4}}, // vector {Shape{5, 1}, []int{1, 1}, []int{0, 1, 2, 3, 4}}, // colvec {Shape{1, 5}, []int{5, 1}, []int{0, 1, 2, 3, 4}}, // rowvec {Shape{2, 3}, []int{3, 1}, []int{0, 1, 2, 3, 4, 5}}, // basic mat {Shape{3, 2}, []int{1, 3}, []int{0, 3, 1, 4, 2, 5}}, // basic mat, transposed {Shape{2}, []int{2}, []int{0, 2}}, // basic 2x2 mat, sliced: Mat[:, 1] {Shape{2, 2}, []int{5, 1}, []int{0, 1, 5, 6}}, // basic 5x5, sliced: Mat[1:3, 2,4] {Shape{2, 2}, []int{1, 5}, []int{0, 5, 1, 6}}, // basic 5x5, sliced: Mat[1:3, 2,4] then transposed {Shape{2, 3, 4}, []int{12, 4, 1}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}}, // basic 3-Tensor {Shape{2, 4, 3}, []int{12, 1, 4}, []int{0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}}, // basic 3-Tensor (under (0, 2, 1) transpose) {Shape{4, 2, 3}, []int{1, 12, 4}, []int{0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}}, // basic 3-Tensor (under (2, 0, 1) transpose) {Shape{3, 2, 4}, []int{4, 12, 1}, []int{0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}}, // basic 3-Tensor (under (1, 0, 2) transpose) {Shape{4, 3, 2}, []int{1, 4, 12}, []int{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}}, // basic 3-Tensor (under (2, 1, 0) transpose) // ARTIFICIAL CASES - TODO // These cases should be impossible to reach in normal operation // You would have to specially construct these // {Shape{1, 5}, []int{1}, []int{0, 1, 2, 3, 4}}, // rowvec - NEARLY IMPOSSIBLE CASE- TODO } var flatIterSlices = []struct { slices []Slice corrects [][]int }{ {[]Slice{nil}, [][]int{{0}}}, {[]Slice{rs{0, 3, 1}, rs{0, 5, 2}, rs{0, 6, -1}}, [][]int{{0, 1, 2}, {0, 2, 4}, {4, 3, 2, 1, 0}}}, } func TestFlatIterator(t *testing.T) { assert := assert.New(t) var ap *AP var it *FlatIterator var err error var nexts []int // basic stuff for i, fit := range flatIterTests1 { nexts = nexts[:0] err = nil ap = newAP(fit.shape, fit.strides) it = newFlatIterator(ap) for next, err := it.Next(); err == nil; next, err = it.Next() { nexts = append(nexts, next) } if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } assert.Equal(fit.correct, nexts, "Test %d", i) } } func TestFlatIteratorReverse(t *testing.T) { assert := assert.New(t) var ap *AP var it *FlatIterator var err error var nexts []int // basic stuff for i, fit := range flatIterTests1 { nexts = nexts[:0] err = nil ap = newAP(fit.shape, fit.strides) it = newFlatIterator(ap) it.SetReverse() for next, err := it.Next(); err == nil; next, err = it.Next() { nexts = append(nexts, next) } if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } // reverse slice for i, j := 0, len(nexts)-1; i < j; i, j = i+1, j-1 { nexts[i], nexts[j] = nexts[j], nexts[i] } // and then check assert.Equal(fit.correct, nexts, "Test %d", i) } } func TestMultIterator(t *testing.T) { assert := assert.New(t) var ap []*AP var it *MultIterator var err error var nexts [][]int doReverse := []bool{false, true} for _, reverse := range doReverse { ap = make([]*AP, 6) nexts = make([][]int, 6) // Repeat flat tests for i, fit := range flatIterTests1 { nexts[0] = nexts[0][:0] err = nil ap[0] = newAP(fit.shape, fit.strides) it = NewMultIterator(ap[0]) if reverse { it.SetReverse() } for next, err := it.Next(); err == nil; next, err = it.Next() { nexts[0] = append(nexts[0], next) } if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } if reverse { for i, j := 0, len(nexts[0])-1; i < j; i, j = i+1, j-1 { nexts[0][i], nexts[0][j] = nexts[0][j], nexts[0][i] } } assert.Equal(fit.correct, nexts[0], "Repeating flat test %d. Reverse? %v", i, reverse) } // Test multiple iterators simultaneously /* var choices = []int{0, 0, 9, 9, 0, 9} for j := 0; j < 6; j++ { fit := flatIterTests1[choices[j]] nexts[j] = nexts[j][:0] err = nil ap[j] = newAP(fit.shape, fit.strides) } it = NewMultIterator(ap...) if reverse { it.SetReverse() } for _, err := it.Next(); err == nil; _, err = it.Next() { for j := 0; j < 6; j++ { nexts[j] = append(nexts[j], it.LastIndex(j)) } if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } } for j := 0; j < 6; j++ { fit := flatIterTests1[choices[j]] if reverse { for i, k := 0, len(nexts[j])-1; i < k; i, k = i+1, k-1 { nexts[j][i], nexts[j][k] = nexts[j][k], nexts[j][i] } } if ap[j].IsScalar() { assert.Equal(fit.correct, nexts[j][:1], "Test multiple iterators %d", j) } else { assert.Equal(fit.correct, nexts[j], "Test multiple iterators %d", j) } } */ } } func TestIteratorInterface(t *testing.T) { assert := assert.New(t) var ap *AP var it Iterator var err error var nexts []int // basic stuff for i, fit := range flatIterTests1 { nexts = nexts[:0] err = nil ap = newAP(fit.shape, fit.strides) it = NewIterator(ap) for next, err := it.Start(); err == nil; next, err = it.Next() { nexts = append(nexts, next) } if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } assert.Equal(fit.correct, nexts, "Test %d", i) } } func TestMultIteratorFromDense(t *testing.T) { assert := assert.New(t) T1 := New(Of(Int), WithShape(3, 20)) data1 := T1.Data().([]int) T2 := New(Of(Int), WithShape(3, 20)) data2 := T2.Data().([]int) T3 := New(Of(Int), FromScalar(7)) data3 := T3.Data().(int) for i := 0; i < 60; i++ { data1[i] = i data2[i] = 7 * i } it := MultIteratorFromDense(T1, T2, T3) for _, err := it.Next(); err == nil; _, err = it.Next() { x := data1[it.LastIndex(0)] y := data2[it.LastIndex(1)] z := data3 assert.True(y == x*z) } } func TestFlatIterator_Chan(t *testing.T) { assert := assert.New(t) var ap *AP var it *FlatIterator var nexts []int // basic stuff for i, fit := range flatIterTests1 { nexts = nexts[:0] ap = newAP(fit.shape, fit.strides) it = newFlatIterator(ap) ch := it.Chan() for next := range ch { nexts = append(nexts, next) } assert.Equal(fit.correct, nexts, "Test %d", i) } } func TestFlatIterator_Slice(t *testing.T) { assert := assert.New(t) var ap *AP var it *FlatIterator var err error var nexts []int for i, fit := range flatIterTests1 { ap = newAP(fit.shape, fit.strides) it = newFlatIterator(ap) nexts, err = it.Slice(nil) if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } assert.Equal(fit.correct, nexts, "Test %d", i) if i < len(flatIterSlices) { fis := flatIterSlices[i] for j, sli := range fis.slices { it.Reset() nexts, err = it.Slice(sli) if _, ok := err.(NoOpError); err != nil && !ok { t.Error(err) } assert.Equal(fis.corrects[j], nexts, "Test %d", i) } } } } func TestFlatIterator_Coord(t *testing.T) { assert := assert.New(t) var ap *AP var it *FlatIterator var err error // var nexts []int var donecount int ap = newAP(Shape{2, 3, 4}, []int{12, 4, 1}) it = newFlatIterator(ap) var correct = [][]int{ {0, 0, 1}, {0, 0, 2}, {0, 0, 3}, {0, 1, 0}, {0, 1, 1}, {0, 1, 2}, {0, 1, 3}, {0, 2, 0}, {0, 2, 1}, {0, 2, 2}, {0, 2, 3}, {1, 0, 0}, {1, 0, 1}, {1, 0, 2}, {1, 0, 3}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, {1, 2, 0}, {1, 2, 1}, {1, 2, 2}, {1, 2, 3}, {0, 0, 0}, } for _, err = it.Next(); err == nil; _, err = it.Next() { assert.Equal(correct[donecount], it.Coord()) donecount++ } } // really this is just for completeness sake func TestFlatIterator_Reset(t *testing.T) { assert := assert.New(t) ap := newAP(Shape{2, 3, 4}, []int{12, 4, 1}) it := newFlatIterator(ap) it.Next() it.Next() it.Reset() assert.Equal(0, it.nextIndex) assert.Equal(false, it.done) assert.Equal([]int{0, 0, 0}, it.track) for _, err := it.Next(); err == nil; _, err = it.Next() { } it.Reset() assert.Equal(0, it.nextIndex) assert.Equal(false, it.done) assert.Equal([]int{0, 0, 0}, it.track) } func TestDestroyIterator(t *testing.T) { it := new(MultIterator) destroyIterator(it) } /* BENCHMARK */ type oldFlatIterator struct { *AP //state lastIndex int track []int done bool } // newFlatIterator creates a new FlatIterator func newOldFlatIterator(ap *AP) *oldFlatIterator { return &oldFlatIterator{ AP: ap, track: make([]int, len(ap.shape)), } } func (it *oldFlatIterator) Next() (int, error) { if it.done { return -1, noopError{} } retVal, err := Ltoi(it.shape, it.strides, it.track...) it.lastIndex = retVal if it.IsScalar() { it.done = true return retVal, err } for d := len(it.shape) - 1; d >= 0; d-- { if d == 0 && it.track[0]+1 >= it.shape[0] { it.done = true it.track[d] = 0 // overflow it break } if it.track[d] < it.shape[d]-1 { it.track[d]++ break } // overflow it.track[d] = 0 } return retVal, err } func (it *oldFlatIterator) Reset() { it.done = false it.lastIndex = 0 if it.done { return } for i := range it.track { it.track[i] = 0 } } func BenchmarkOldFlatIterator(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) it := newOldFlatIterator(ap) for n := 0; n < b.N; n++ { for _, err := it.Next(); err == nil; _, err = it.Next() { } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } it.Reset() } } func BenchmarkFlatIterator(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) it := newFlatIterator(ap) for n := 0; n < b.N; n++ { for _, err := it.Next(); err == nil; _, err = it.Next() { } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } it.Reset() } } func BenchmarkFlatIteratorParallel6(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := make([]*AP, 6) it := make([]*FlatIterator, 6) for j := 0; j < 6; j++ { ap[j] = newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) it[j] = newFlatIterator(ap[j]) } for n := 0; n < b.N; n++ { for _, err := it[0].Next(); err == nil; _, err = it[0].Next() { for j := 1; j < 6; j++ { it[j].Next() } } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } for j := 0; j < 6; j++ { it[j].Reset() } } } func BenchmarkFlatIteratorMulti1(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) it := NewMultIterator(ap) for n := 0; n < b.N; n++ { for _, err := it.Next(); err == nil; _, err = it.Next() { } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } it.Reset() } } func BenchmarkFlatIteratorGeneric1(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) it := NewIterator(ap) for n := 0; n < b.N; n++ { for _, err := it.Next(); err == nil; _, err = it.Next() { } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } it.Reset() } } func BenchmarkFlatIteratorMulti6(b *testing.B) { var err error // as if T = NewTensor(WithShape(30, 1000, 1000)) // then T[:, 0:900:15, 250:750:50] ap := make([]*AP, 6) for j := 0; j < 6; j++ { ap[j] = newAP(Shape{30, 60, 10}, []int{1000000, 15000, 50}) } it := NewMultIterator(ap...) for n := 0; n < b.N; n++ { for _, err := it.Next(); err == nil; _, err = it.Next() { } if _, ok := err.(NoOpError); err != nil && !ok { b.Error(err) } it.Reset() } } tensor-0.9.24/iterator_utils.go000066400000000000000000000013231426512615100165220ustar00rootroot00000000000000package tensor import ( "encoding/binary" "hash/fnv" ) // hashIntArray uses fnv to generate an int func hashIntArray(in []int) int { tmp := make([]byte, 8*len(in)) for i := 0; i < len(in); i++ { binary.LittleEndian.PutUint64(tmp[i*8:i*8+8], uint64(in[i])) } h := fnv.New64a() v, _ := h.Write(tmp) return v } // func hashIntArrayPair(in1, in2 []int) int { // n := len(in1) + len(in2) // tmp := make([]byte, 8*n) // i := 0 // for ; i < len(in1); i++ { // binary.LittleEndian.PutUint64(tmp[i*8:i*8+8], uint64(in1[i])) // } // for j := 0; j < len(in2); j++ { // binary.LittleEndian.PutUint64(tmp[i*8:i*8+8], uint64(in2[j])) // i++ // } // h := fnv.New64a() // v, _ := h.Write(tmp) // return v // } tensor-0.9.24/junkyard_test.go000066400000000000000000000007341426512615100163440ustar00rootroot00000000000000package tensor import ( "reflect" "testing" ) // junkyard tests the miscelleneous things func TestRandom(t *testing.T) { const size = 50 for _, typ := range numberTypes.set { r := Random(typ, size) typR := reflect.TypeOf(r).Elem() valR := reflect.ValueOf(r) if typR != typ.Type { t.Errorf("Expected typR to be %v. Got %v instead", typ, typR) } if valR.Len() != size { t.Errorf("Expected length to be %v. Got %v instead", size, valR.Len()) } } } tensor-0.9.24/known_issues_test.go000066400000000000000000000075131426512615100172460ustar00rootroot00000000000000package tensor import ( "testing" "testing/quick" "github.com/stretchr/testify/assert" ) func TestIssue70(t *testing.T) { a := 2.0 b := NewDense(Float64, Shape{1, 1}, WithBacking([]float64{3})) var correct interface{} = []float64{6.0} res, err := Mul(a, b) if err != nil { t.Fatalf("Error: %v", err) } assert.Equal(t, correct, res.Data()) t.Logf("a %v b %v, res %v", a, b, res) } func TestIssue72(t *testing.T) { a := New(FromScalar(3.14)) b := 0.0 bsa, err := Sub(b, a) if err != nil { t.Fatal(err) } t.Logf("%v", bsa) ret, err := Sub(b, bsa, UseUnsafe()) if err != nil { t.Fatal(err) } t.Logf("%v %v", ret, bsa) invReuseScalar := func(q *Dense) bool { a := q.Clone().(*Dense) //if !a.Shape().IsScalarEquiv() { // return true //} b := identityVal(0, q.t) reuse := New(Of(a.t), WithShape(a.Shape().Clone()...)) correct := a.Clone().(*Dense) we, willFailEq := willerr(a, numberTypes, unsignedTypes) _, ok := q.Engine().(Suber) we = we || !ok //log.Printf("b-a(r) | b:%v, a %v, r %v", b, a.Shape(), reuse.Shape()) ret, err := Sub(b, a, WithReuse(reuse)) if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly { if err != nil { t.Logf("err %v", err) return false } return true } //log.Printf("b-a(r) | b:%v, a %v, r %v, ret %v", b, a.Shape(), reuse.Shape(), ret.Shape()) ret, err = Sub(b, ret, UseUnsafe()) if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) { t.Errorf("a %v ", a.Shape()) return false } if reuse != ret { t.Errorf("Expected reuse to be the same as retVal") return false } return true } if err := quick.Check(invReuseScalar, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil { t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err) } } func TestIssue83(t *testing.T) { backing := []float64{-1, 0, 1} var TT Tensor TT = New( WithShape(1, 3), WithBacking(backing)) TT, _ = T(TT) it := IteratorFromDense(TT.(*Dense)) for i, ierr := it.Next(); ierr == nil; i, ierr = it.Next() { if ierr != nil { t.Error(ierr) } if i >= len(backing) { t.Errorf("Iterator should not return an `i` greater than %v", i) } } backing = []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1} TT = New(WithShape(10, 1, 1, 1), WithBacking(backing)) it = IteratorFromDense(TT.(*Dense)) var vals []float64 for i, ierr := it.Next(); ierr == nil; i, ierr = it.Next() { if ierr != nil { t.Error(ierr) } v := TT.Data().([]float64)[i] vals = append(vals, v) } t.Logf("%v", vals) } func TestIssue88(t *testing.T) { a := New(WithShape(4, 2), WithBacking([]float64{1, 1, 1, 1, 1, 1, 1, 1})) b := New(WithShape(2, 4), WithBacking([]float64{0, 1, 0, 1, 0, 1, 0, 1})) c, _ := a.MatMul(b) _, err := Div(c, 2) if err == nil { t.Fatal("Expected an error") } } var ltoiTestCases = []struct { name string shape Shape strides []int coordinates []int correct int willErr bool }{ {"\"scalar\" - scalarshape", Shape{}, nil, []int{0}, 0, false}, {"\"scalar\" - scalarshape, non empty strides", Shape{}, []int{1}, []int{0}, 0, false}, {"\"scalar\" - scalarlike", Shape{1, 1, 1}, []int{1, 1, 1}, []int{0, 0, 0}, 0, false}, {"vector", Shape{10}, []int{1}, []int{1}, 1, false}, {"rowvec", Shape{1, 10}, []int{10, 1}, []int{0, 1}, 1, false}, {"colvec", Shape{10, 1}, []int{1, 1}, []int{1, 0}, 1, false}, {"rowvec- funny strides", Shape{1, 10}, []int{1}, []int{0, 1}, 1, false}, {"colvec - funny strides", Shape{10, 1}, []int{1}, []int{1, 0}, 1, false}, } func TestIssue90(t *testing.T) { for i, c := range ltoiTestCases { at, err := Ltoi(c.shape, c.strides, c.coordinates...) if !checkErr(t, c.willErr, err, c.name, i) { continue } if at != c.correct { t.Errorf("Expected Ltoi(%v, %v, %v) to be %v. Got %v instead", c.shape, c.strides, c.coordinates, c.correct, at) } } } tensor-0.9.24/known_race_test.go000066400000000000000000000021041426512615100166340ustar00rootroot00000000000000// +build ignore // +build !race package tensor import ( "testing" "unsafe" "github.com/stretchr/testify/assert" ) // This test will fail the `go test -race`. // // This is because FromMemory() will use uintptr in a way that is incorrect according to the checkptr directive of Go 1.14+ // // Though it's incorrect, it's the only way to use heterogenous, readable memory (i.e. CUDA). func TestFromMemory(t *testing.T) { // dummy memory - this could be an externally malloc'd memory, or a mmap'ed file. // but here we're just gonna let Go manage memory. s := make([]float64, 100) ptr := uintptr(unsafe.Pointer(&s[0])) size := uintptr(100 * 8) T := New(Of(Float32), WithShape(50, 4), FromMemory(ptr, size)) if len(T.Float32s()) != 200 { t.Error("expected 200 Float32s") } assert.Equal(t, make([]float32, 200), T.Data()) assert.True(t, T.IsManuallyManaged(), "Unamanged %v |%v | q: %v", ManuallyManaged, T.flag, (T.flag>>ManuallyManaged)&MemoryFlag(1)) fail := func() { New(FromMemory(ptr, size), Of(Float32)) } assert.Panics(t, fail, "Expected bad New() call to panic") } tensor-0.9.24/mathutils.go000066400000000000000000000001111426512615100154550ustar00rootroot00000000000000// +build amd64,!noasm package tensor func divmod(a, b int) (q, r int) tensor-0.9.24/mathutils_go.go000066400000000000000000000001531426512615100161500ustar00rootroot00000000000000// +build !amd64 noasm package tensor func divmod(a, b int) (q, r int) { q = a / b r = a % b return } tensor-0.9.24/mathutils_test.go000066400000000000000000000011251426512615100165220ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) func TestDivmod(t *testing.T) { as := []int{0, 1, 2, 3, 4, 5} bs := []int{1, 2, 3, 3, 2, 3} qs := []int{0, 0, 0, 1, 2, 1} rs := []int{0, 1, 2, 0, 0, 2} for i, a := range as { b := bs[i] eq := qs[i] er := rs[i] q, r := divmod(a, b) if q != eq { t.Errorf("Expected %d / %d to equal %d. Got %d instead", a, b, eq, q) } if r != er { t.Errorf("Expected %d %% %d to equal %d. Got %d instead", a, b, er, r) } } assert := assert.New(t) fail := func() { divmod(1, 0) } assert.Panics(fail) } tensor-0.9.24/media/000077500000000000000000000000001426512615100142025ustar00rootroot00000000000000tensor-0.9.24/media/dense.dia000066400000000000000000000070301426512615100157570ustar00rootroot00000000000000]ݎSMR8A/ @ %qj@^o.~E_}?I\I$^ GDPw̜3}f12{;$i6YNo?~wy;˓FvX}{qq{{wXh1?"] ߽ `{|7)|>~_,IY|Mr+^xrgνJf8O_O˘&^ioVRܭv9cʣlusU]h-Hqyc"4 a+'Í[ 7__y'br\.$ E>m$ kbuE/EsWb}HO۱O,OpQsvE:z>^UW?ϊN_oi l0թH0{?=f3]]zc[[b!]A\2 ֣{=egZ,o'I^ 8|pB 2?[gDsV{bQʐo߉]Qrۑ$-$|-|rHo.'%J{:HpON(v@}e7iofo*<|*sA.l5I3և[ d| F_gS>_4H1"č#mQNPU{6u(oW_$Z= fKe o L[!ȈQe@)r,*r;Ne*`3X<:'mjd$ ĥ($n1b-)aI'AOGAA_:ڧsSGAYcZLJR?]@y̕HnHҀLTSdXsna:{髗|K_b_r*L/S^ gm]S7niK Rv/cte:8fld=c*nN9stHHkv4hĕL!j*QצطvIRB/ = "uaց>rʙ5W9u1ʹSaa:v q)D@(nC"@ #ds+:*v}3_rN&^JBl@DSRP:1h9b k4.G1h Om7+K݅Qb(?mqc 0cl@;^0L1gA/0f#NmfvI3B2D2aK]&Et2YnIڧA6~ "D#C=B)r<,CJS1BKA2;Üȸ tAB)#CNZ^-lSy.^O1 e%]V"h5z6; Vax CHE-amIj/Cf[t?}|W?ϟSl> [k~krw<5ehxMXo=ϯw1 6c=;a= [H@{.kV ~ߧ: X \˝ˑNXV~M|{3440Dq_}r5VmGF۠Nx(b.PtH$ q 2muN: t!'ǎ' ';+Z6e}m:ا-/|qƪDW2nt}wWԓтb~}c .}e(I܍@h2b6oFzq#Z $(+`HO*ŖZZ$u- 5 Hڸ'!qGY]t5%a(.ykDv%CJ2N"%]t5)9CxG)i9 nY$fFJȈ3|KIV'))y@JK E+%KJtXhq+jY$1rĕw ELSN2+se_MCQO z(\am* +88R|u&d ,EjK^ìsD9u 1#KCO$ϓ_!&;GXbFgZYPGu)+R)jiGiM^D Y$Fw5v/Sf.EuIs0Q(g#BE־jtJEu_-/<*$\y좓 (Ա4'ԱWu4Lqf p(7RH]i#Z٪ y)ax}a 2Ma#Hqͻ}'دidei6ݿH53Ym_=BÓ. 엔ոqQ {'/<_Og\y m!κ ns7 #Z|  ]UmR~yw)cN:# |40f&n!AK B^7*rGz(%Pm0{|KZ8 MSjszڋ.߽ w'7^3tensor-0.9.24/media/dense.png000066400000000000000000000473671426512615100160270ustar00rootroot00000000000000PNG  IHDRl]19bKGD IDATxy|SU?M6[ޒ-e Bٔ2 <8猣( < @A(E޽M$mmt-In~Jn$%.EQij<.p\#B֭[z!S@xE`je:!l6#@0VD+**l6S[',_O?ݲe̙3AYbg}l'p 5|}KKKۿ?#>u]ps(EQXRx@|YG㤤$PzjB/OKK{g'O֭[k֬u:]qq#ѣ?B~aRb/__;O !L2%!!ѣGݾ}{XXun߾]PP@Yvmaaa< "wyoy7ڷD\q(> >>>ڳgOdd=s CQQQ􍠠 B` 8qbĉׯWՎη+++KJJX,֔)SAuuuyRi6o޼~ܸ ` "33333u޽sh4BÖj:333''dٛ6m裏:Y>Cg{ݑ~5%/]Μ93ppS}ǏMRRRYYY333U}ذa;vp?$O p!/-?ٳ7n8bĈ߸q>[UUu?Oŋ߿ű{ァP(D"իoذacƌ|>ŊݻwϘ1p–C9zU*UMMO?Xzd2 v}:.**JP 2f9 QcvAbѣfp"_|QQQq?~… 'c9rqapg=cȑ{UV7ε[n&dddq5TfKEtǎ#FPfԩsOgɩS>mY,yu6JL2lʕzI|绦v6o'CCCPBCCJy(JMM}Ç3ޣW>}D"8p={!j(rӧuֵO.[Ä[l0`@w^g}p,\f:=~D7m4t_ȑ#tO>ׯ_o3x#<"U*D{T:eʔСC/_NOO?~رc=9(!$;;ϙ3fѫc>`&QQ,<ɨ(***˗۶mc|{t_|ꫯv=ڐ!Cσ6{sbp|98' =3mڴiӦuݦy;:v]$` (@/(@/c]\t.l6ZM p8Th(y\ZMwLg*cL3/*Cpg^TD2μ:w~RDtpio)mO9rŋ+**զ >aEv{4(37nt'X,?? Eоd˖-IIIB0::z^f͚ &$%%䴹Z۷oOMMuFP?~\ן>}:''wq<'V*˗/-Y$<<û%,X{nBf{dz+-tcN8oBx(22ҹZlɓz뭯jΜ9{E>FVgffZ^prM%.5jRpl7ocs=ߪje:,@0PD=8Y p8UVL<ٹ?Ӏ;w>UUULZlbX x~n*_2dHFFƿ/@3xܔ={) t5}Iii_U0<<|YYYrWWW/YɓW_}ct5L.\`ƍgh4>uɓ'_N/ 5k?>uTӣ'% bڴi222ݺuy޸q&666>>cdRյuuujb_'%% 6,%%A?~Ա\.7{IMMMII|6iXF#!d2 !!n;V1ttv]YV@/oii1L BBBBCCBBBBBB ELLLddddd$=o)))9w܏?/ZMT* EtttxxxTTTHH#\:Qs7=9zW(ǿts \.W,T*VUU/\r7]&,JR)}Ϳ?00xPe{f*+++++O֪zddCb^T"X,E`brB!!чÌ5H$#|K2XRѧ~U*Jh47((ס t BEc*JQTyy7_~ƍ7oT/q&HbH$w7RH$(Y,E_Gȿrt]4KK$ DBH$<O$; wCEC.\?8@8 U(aaa >)*P jt6>Mv&'N'gΫ8뢱K򨨨0ǿqqq hS' 'N((( H$k׮XB?"7n?~F9rݻ 澐;?K/tqB>t(rRE@!!!=&i޼yfƌgϞEN#GϾ۷ow}ϟg:GBz:Ν;^:dȐÇc5/7~ .Y}}}QQQxxRd6VzjppoNQԩS`B=Qnxxxhh(I~GݮP(ltLr!m6}EQ6'@Zmkkkpp0?@3}^@ {d˖-nJ}+W$,}yLplj6#'d6ɞN:v(I?j>_Rm[[[3i4fů<8XS3MW?HJ^~f.9әM9ڪ;c̽&ׅoO\<9w\fEi<`7m(661ב#G͛צE.ߗ3($}lRU1䣷?"$JzM;kfcVkkkBBd6 .Mc^ލE0قFߜ(x^QB4iI:A&7ohm7=X;l#{bbbu!:MЧ~@eV^8{le78p ###((W]@_k:t(77l6DL&[r믿Ӎp.5kco!Cn޼VӦM#cXDiZJ5j(nRR@ ݽ]Qyz"K$ ЭPDD"BO'!D E|L&t}͛ͩ.(SNu6'ܸq͛7{c8l6dX!&d2usEQ@ee%KL6bX,>?` ㏞ CQ3Μ96|FYзQD:tGss3!… >(=!dʕE~g!!!cǎ=v"}fyĉ]p8oFijj /Yor/Қ5kf3AuN\:5fM$#L4iŊ}٩S}]Ǡw, ~_b=ωhʊd3f(++WIKK{L2tДg˷l/Hž}YzagO3yd/*555,,?&(sxa/{O?yD"rJ^^^g-.\Rn޼Iy'򪫫SRRfϞ혭,''?,((Xj… !nZf銋yegOr!Cw۔)SrrrU:gEIWOg޸q# 6TZmeeѣGonݺ۷o͖.]JI>sZ???._8zhBHO_^ofZZnJJ˗#Gv钧K_Σf(/=I}'b:ů'̑#gv`c%I9Sʫx?f>WX&LVRRbL,f|>b0?={DFFs='N t$Ryuѻjժ|]HT__O\5vSJKK.]g>Ҫ9K0Á jk $3g([pɝ9uҚK{\iMA0&VXuDFF> IDATN[`|Ҽye9 [lJӧO:ܹs7hР &'Ntq1dP(׭['=ԩS\3gȶvڔ@| + 4ٳgwәSܤ9FLA֩utá CǸ}uoNpȑ4Yg=/QPom;J B.\'aXouuu6mJNNbŊW0eʔ~844^7o}P\R]]=|"rA}>! skM:ݯL63{o(~zӦMc7D"QttÇh/Eԭd %)$Mךtv:s9e:t(++blذ΃4@;|7/^Z3gμ{" }͛hEzpLV^^.(/]Μ93p^? EQ)}Uv}hv_ؘlٲ" >h4nܸ<11>0+?8iҤӧGDD|Wݜs9z({JKK !!!!nJyy9=cZ6PD,[q i+Æ kll(_~1iwk׮9A;(pl("":PDz E~='=3~i7?lBWz*I*5!$Ÿ Mb1[!۷o&1̈́˗x&),A7[w$. \^&vمooU1مﮗ0䖪駟^r$V}gMAEu15<@Oxxaȑ#+xN3 ]4nٲ%))I(FGG^bҞ}ɓ'=zԨQΝsiޣ@j4GT.󒒒Dfً-o755|[T$DDDٽ͇JP?~\ן>}:''wq}Fq]Զ:[PPf͚rrr!ϟoIz>+++>>^(:4//0s'--m͚5&LHJJ1YYYqqq2lƌeeeKݻ o5s;fs8ѣG;q\;{C:KCL&Veَr8 ?mڴ6^<ϪUƍxBݝz롇o+Fcǎ|.Q_bZMo7;;۹٠Aܦ&73f{G6 +W >}zii#_ɓ'''':sa3gN4n߼yCׯ'$$lڴɑ÷URSSJeUU}wΜ9ӧOimm]vmzzf(駟^l~x]5s7wŋSxEz_שSvtx<^hhR7oEQ7oNLL Zjlv?! ӿmVXX{uǒp:u'~ܤ7E&Ldcǎڵlƌ\#ڵkrssƹv͛Dyݻ7&&U[ hɟ;{N߮ 8y?Sw_D)Zx1E;CH;mvGP\tjn۶M$'j={lnn^w,wuSfee_|P,4h݄sxvng}ùj:33sժU%%%eee/t}oaaS"##EݚRYYy۷[K. !̜9%---rJ'NL8166666vjP7ߺ^s+())aXSLILLLLLLNN՝Y^c,['OZ3gt< izᇕJ%JLL\|_xhÇp8˗/>zh6QSScZbP(t^N999~aAAU.\H/| Euus纸8ux≢ꔔٳgw6$_h4v{ZZjv r<=o޼۷Tٳg4m[mjC=Di p8f٭ ߼%7ao8B!,,7gZ XHylM>#ǣu.#CyYu־k$0j(Ryȑcǎ-Zȹ\7{ĉofqq1!izGRBwu;v8[n=hѢZp>_|KۯkX=Z^^N׭[uւc Ѕoذaĉ .|}ꩧRݻO8oK.l[:jvxx͛7۴p8g{7SNuFcbbf̘lٲ nݺU\\*un6k֬%KGG>c5v#GܹSZZ|wGtZmsss@@@1ac=~T Ē>j:@֛sW^}֭s}oօ )^OǏw]ΎuX{T;Huwu7o7߿РA"hܹ{~۷oРA&L9qD׻8oI?S>œ?c%%%{~7ɻ\{|Ͷcǎġ_###gYW]x4]Jo_#\x~555>pvsf̙3~bqy+zU [6[ZZ!ovy_ʫ8/'&&J$ɓ'Ssƍ6m3<3fGչiiih4~w~|ܮ?JsfZ=qDɅEI_@QT}}CE} *~~~bXw=tK/8 7x7h&??q; .zyy~ׯ_w]zի7w-lBRnݺuVEuСCK.  _/O0N&w}sν: $u:FqyrneZ /:\?7HRd߸o_蝙L .p8^ˋť.]  RT>|mۘ}\*$AAA6,nAQTg=[SJ$cǦoذD=QBHiiiiiixxzuuu׮]F򆎞}[%utjPTPwE400P f^t3 \.%#a"Jd#xcPw "J<;@`6j5rL H$7LMMMLgpJery@@YXDY,}D;gvz~سAO F1fY(b#J$.k4M&Y\~ZDl6= vFtF3@߮E>ѳ#BxXd26hX{&Iղll[D9D"( ;*++) x[D l555QXtިZ}<_QHd~]D /j\y(rBVLggKdd$`J/|>?00j66628B`: @ߋ(]MnBc: @"X,:-LC%AAAz,R]]md2@ `: @"JO(قPơ[jukk@ g"BX,kiiinnf: Зao"J!,Q6zh4w ^raaa"Ӣnݺ軎C9Q`c0Z[[tHhmmU, Sx _l2 쌂Wj%TUUBƪ`.x!.Klvֳl6Oeee PDC&lNgX+Ǟhmml b`0\t`0`6\EazQ=Qԡj:??U,>W0E.xFF|U"_"Z__Cd%"j2!<b 0`l6> Çx_"0F_7oVWW2\h4BD"QBBe D{O.WWWk4>DVB ӡ{T*p8d2ux|bOFGG`4"h`Zj###DQTuuuIIb!)Js@+r\Vk4QPXXHwJJ2((P3PDL&c:b1zHBA||<=@]r(|b),,Z-0`ȑ#g;v]NPD!v2//1f/hȑ{e: .6,V?ZVT:rAAx>Igee ¡CBlْ$ W^xiii>ɓG=jԨs1ѻ/v{CCYc!??_~iii)))L3a%߂5k$$$̟?nf͚ &$%%x hʊd3fp {O2eС)))gϞuF?9sF=z>P(?O>;8;v/^|WO#@< ZYYYnnיmL7o9s&77￧'e6Rjjj|||]]EQ_~X,VE1";;۹Rrk1cƼ{9sL>uڵ6ND޻woLLN.ڼ[=};55W^q<4bĈ;v* @`OZ^:ŋUUU,+**j̘1QQQmf_ağ'̔)S=a%K{&ReeѣGonݺ۷o;fYt)x̙6w*6O81qHќߐp6PD]Z,Nt ䷩m6L&5jTBB-Y Q}Jg:KAu{ljժ 2/=Q*>t~ASg „Xt(_PD]F"555577 e---t|#ùtƍ0oZ:D]I.h4Q(>T*p8z1p:["$$$`l(pRFjL  @@(.&5FAK>rcbbp d2`ZgVB,))'OZw1???DРj˜{OJRRӟPD]O.744h4ijj***'x/"zrPLLRQӟPD] B ɘv{UUUiijeX~~~LQAѠvABRiBBB`` ӡzE-ryIIF4h'V:[ $$$3 PD"00P 477766zLimm-++"FFFw(QwEfT*UYYfcqqqJ > _a( LgaZ.**2LLpPDE(L&؟ EEEOL } X,\^UUhgmmm-..%xpEԍE4..,e***驳###ccc9ӹ\ EԍR)5--- EQ!!!J2 \n"F,K&j4h]CCCQQh$D"Rӟз\.EXVBx<^||B`:ۡWpp0nll4}rFLZ^^NOpcbb0v<(pRVjLq%zR'~ErVh4}:O-Hg7PDN.ߺujQZ-!0Sg@O$566ׇ2r111:9QO illh4>ZD)RTSg''BVk}n SgtEFN٧FcaaN#R)ɘEPD=D.FFEl6VWWS:"!rT 8Лn,//OFEEalC|~KK^eZ۷1u6@7z\.h4^XD Caa!=uvPPPBBD"a:CGU*Lgֲ*BND"&o%bT*UYYfctO>|cz=huuFaꢢ"ρblB(\NX2B^O ^x'zT*p8d2y~ϯbccqPD=fjFlrzX}xRU[[[\\LOT*q%PD=-88f766Z,???nh4BD"QBBH$rQOr^* BHiiNq᰺%%%jm 2@.k4zN'Z啕蘘 'zh "[nl6LwEUWWX,BHXXR"!ڵkDz>88d2;^c6;Ҷ"JJ%]}PD=ORX,VBB\.|>5oݺh ἼHBA|||HH^HҡCTVVr8BQ\\jR)ƱZFr/>X,eee*(.ӟ"9C zjYY ljjjhh t=l.,,$8xRRJKK驳#""a@(\>dȐk׮g7)<ubJV[XXBJ g?zZHHoܸ!"ZSSCMLL4 EEE@Td2%6PDP(v[!E΋hkk+} 7..(L %PDa;֭[VU l6eۋ; j,!$$$..bL&fBgS2)&&nl9llVuDÇPVhE駟nӵXI&TBQb1Vbt .t~-!x^/--% <1DVVV ˜c8̚5k͚5L=QW*--ݳg?X,...niiinnnii1mzN)**<.qE#B^WTL d#Bu~ ?Ӷm!ߝ9{ /n 9993gd|_믿lp Q׋]x1;FQF 8$Lv2r!BHhhqMrUf`U^B%Q^B%Q^B%Q^B%Q^B%Q^B.6m!!!gϮe*Ɂ222\X^vmJJ@ Z|^g*+2h >/gϞ]\\T(^;tPnnniil^`SId2o~Bُ8q՚75)RSJ)(q9R\r 7 JB}W_=<{Ww;33ㆇH$ST:VU288d|>訪.S\eccc~~CDbX[[[6mii!988#Z~T"PU[~z.XS~ZWWw{{Uϻ$IxVVVpD]|Sx m҆p8lΎ9x^?Gz|| fhiZcc]]]\Nu=r|uuU~f Bgg*7XZZ'''[*JBEuQwlmnn_ɯDX,ڶ%"m۶$CDfgg777m۟J2D0 BJb#. H/TUU%IU%ۚE"bXx<˅BrƿY&hhhnjj:::R'Zj.555X,h4FUW)J+DDS ™(18Ĉ# C(18Ĉ# C(18Ĉ# C(ʻW`FXmm˲D,+ۻS[bˆVa[[[+T "bY 40T#RIENDB`tensor-0.9.24/media/dense_v1.png000066400000000000000000000400171426512615100164160ustar00rootroot00000000000000PNG  IHDRu8|bKGD IDATx{|S?$(?'\ =&q:`rs{  0E`W@lʥP K4m.mY-miKI~#MNyW9|iB:nĉ迕?Zv=ܙ3g^ؾl4?  ׬YgL _zw֮]յ`E'ZѣG-ˉ'ʶm~7|ktvv"~߼yܹssŃݻwZυ Ə?`gϞ-**o|6@ѡޞPTTNHH8wBG}tJ|}L&S ^u`0 t:]nڴGoꫯ_8qۛBMMMI#H***(xرw~;..~'|P(v<( xcǎ_JJJ$ Bj";6}ux>nii(8;;;;;;77W$QB-[֭[W^^6oj-RAĈ1ᢴtٳg޼yRt- CiiiYY?b6ojmmmǏxb|Q}<|˕+Wڵĉtׯ_y!tĉґt@ _jJrڵDm~ӧO:uѢE=PW_-_\Pڵرc>ʚ6mT*-**:vܹs" 9s&9uBgƍMMMBP(d2Ycc~b˗//X`?(B|iDz[_v-BhϞ=}9s&D"Qaayݓog}v} MNNNccYZZ@ay?l6_Ox^۱cR|kٲenm?xMMMf/ի)))Ç @`h-\pƌK.=r}]Ǐ~_~eUUUwQpd2ٚ5k|>ߥKHfPWWק~*._|9K?\RR2a„۷4M䄪p%8L xܹs7n8qĽ{^v _=z%B詧ڻwnkǎZ6&&fժU_&L@ C3BaAAP(\t)۵kR={6BhѢEv}wg?tRkkN{U6fRN|v2LIIIZv^H$j ދ6SLuNGQ)6 93_~=z믿 e}#ƤI3ث+W{{D:۷oԩZ&9(Sm߾tĉ͜9{t?~C?&vϟ?vT*ղex~C砨H0oVe˖4LV˗|Z1| V^}ggg8pޱc~{[[[II\.;vᅬ2 4M[e˖*ʒkZ3B ^'ƄB!*Ak1 `x<2 m)/Bq-gf3"fe> R;А>D6]4с\"2h4\.HC@0(XBBBÔ r\]]], D dNiZRq\ҵB)oX J0"L&n t-bDO`a xzU(03(D>n4ьpZV.K3bVY,gd#Bp!@"An[*c9y@X Ap7@,Gww7  'Aqqqz9BLADF&P(D 0 @nHQLR NJmU@"A.gbx**ć0P3ߍj2BA^`0 Zm( `f^x<2L$ ih@? f6lv\\\ `ehlv `e*܍ 2.K,Ą I"NgWWłI~BA:X.H2h@A&n|BcN37&n x<! Pp3* PTp3 r; 1TY,,X`*ංֻ`xK>tx< @ HLL$]BQU^^N01ߩZt\6mDePF̩t.?Sfq*@Zʕ+i{{{GGVo\PuuT* `1x{⦅'d+)**oV_?y򅼼m.B1cK{ \=4]]]JMMdm4"l %^ /Vַ,ÏbqzZfjfgWWD@0֘1c;tP?ݺA?N-[̘1!|W^!t} A3vF`* ?y_t:ϧR|~` 6׻jժ8X 'Aj `ŋ}f֬YEQ% njhѢ.44_bw͛:ujrr2EQw- ͛zBP(k _^f L|@?7믟x <_!l2<ȑ#qqqSL9|pJ].l :;;{{{F4a H@ߺukzz\.h4+VO|QfA}ю;;G{@I9rhޝF<]]]~\.OHH|srѢE5557o3Luuu .[ .?>H_p!0tdWw*թSAU /Ν;wƍ'Nܻwkb9F?~w4 [v7fO?4gΜ9MMM\. .fX|]w!Iwww_xo 6}&&&+0s5vd_6T}fG#ƍ x!d4 BҥK۲!R)*..$'L[K[oO? / Bwڵk҂N~@+W|w333X,Ru5X,-f0"J"}}%&&7=}:={̛7355u]zuDUUUEڢ17o޼p+@VZZz9ޤq4xvիWWZ׿u˖-E<裋/gFx<'Np8|>?..(6=NJKK{'NFZ) P{{;^$//b?dI7`ۺu+U(%%%mmm_~c=_M&J;6xZR\vC`;9s&9u="())gdW_}5++K(Ν;n/_޾`HS(p6\>~+W׳줤$ {T?xU*K. ׯz;(7_ V^߼ysnnnssO>tҿCG~?Ojl~ϟ…!饗x iDQVmhhhoo'Aq]v3DYf.?yVsm޼y;2e ~|JHCS&] BitPuuuWWrg-j^zRH_A|>_.|u dwJnc۶m'OܰaÈ5gΜ/?ϯH㻻u:C$GeddnNw墢"k˖-ӟFF>^R655 s!bcc\j!]7NT ,@_ƍO:^m60DfX,Zo*Ed2yeiXr]N8p8[l6_tiɒ%iiiyodfB(!!!l6;??_"VVV6ٸqcSSSvv6r}݌3JJJ sȼRjvvvs"p8/^X,W^˃ $ɨGXQwA ƨ1x`W$Alwwn']KB0??fwtt֒.D%K9r$xG0a>JwwDBvtttttSJyyy|>?%%tE ԅ^?@ATvQ(xBY]]˅ dB :4&fP?Ɋ v O>CR̼qlm<N}$}x G}?snc=6o<۽~k׮{_4t|l%>t ƎKnJ0FL)~ŤP(lBҥK_=3gδ֭khh@ jդFeehdrz=O(s: p>gdZIBE1,|Q`0\rE,OvXܦWPPz?;;̙3ïy敔t:y^{%Km[>?s̙1cSSSsM{챧zi 7|~7sLFAG6mZJJJJJJ2e ~`ΝfϞ/́ yf믿Nt[[[yyŋ^FjjsvK/xގoM!/4jjj_z<DRQQAAmAjkkkDSO=x}zӦM"HVŋ3R>&&PԄ3o߻ֿovvvvvvnn.^w7}CCdɒ?f}gΜ9n?8A===>bƝ;wJ$w?~aa;:w܀V_{GBGlZޮƌ2QVU"kl>Iq[ bܸqTL LIDAT7orZv*Xw@mxn!a}Ç_p\Ki ܍<(==}ӧO:uEz衾WZ|Bصkױcǂhϟzo?kjjt::ȣz{{ݬu:]WG4[!p555Fqm2{%K4774}u~fmJ~7n駟.^i ܍sUV]~ٳ}٦M.qrE|$իW)~d2HrWFFƽ޻~z)$&&LW\-<{ʚ:ujLLc=?CZ?NoHGZ9oTPP-; ᚚ+++Qnv [ZZ~{/'=ҵkO>mZU||}}2`3EVFSWWg4].W@&C xbb⧟~iʕn?"YYY`0TVV 篏Ⱦ}\.I'4߮Px*#99t9#@QTNNބp8~ B)*c4+++E"QTTTXVTZXXȄY܉z[Jl uM *3aHdZ+++z#߸\n~~>2(3}?<`0PXPPp:;;q;QTfP(^`0eb lv[[+a'J31xዉ?~uww_v-2D* DQTt-# 'Lpz}߾0 dBSSX$H@T)`, rutt%r9PV__Igy @4B,66/eqm"dr\_,b$$$4]]]M~2?SX#%---))]rj. PGGGDj0 0dbTxRgܸqJv_|r. C(*77W&9Hdj6ݍWm_TFjP X\axBY4r2h`<b%}>_eeeL404 ňx<4ȠABz>JST*NgEEL(4(HxN-͕Jv0 dP"l D"QOO, B2h(x d;xB7L0 dPl6nUB!>?a.y HtxNX'ד.D,Ƞېdbruuu%bbb@--- 2jt?* O( RAh2h(<t{<OR4')))4M_r755 ;ȠaB3&>>VUU'iZprdа(J>o٢yZyVVV\\^O(S(EfQ F 2hXSX4'r2#h쾷 _ hb$nzJ!c` K(r=SXp2ZUU%BFt] \A@dz[ x>rȠp8% ߬Vhlhh/_lZ9^g.hASX}?(!!fwuu]zB.X,T2d E[cd2YVV=3vXTx:;;}>>O@T*H$n;jr8ĉ'N4)))^4ML&Ձ4b==wҥKCyyU Cjz}{{{JJ EQ!lƌ l ǎs8A]]]YYY>]~l 4 B 8ٌ G+V̙3c˖-#[ٳ.\HW^پ};IEQpg x| +w2h|\.|Q8f =h X.kZp +4z, +p 舶) dk ,Aݩ(o Sjfwwwvҵ~ ÉEQ\;1hBHA\. N k4MO>( ISI@Lao6ÈN̩$ Oa5nt-`46mڴyf҅0Ѐ T*a (|Gh~ꩧ^{5JdP@9~oKl$d3T*X,111 nbڵn)7|CUJp0E)czzz 2Ξ=YPP>}K/͕R|||sssGGGFF^} t"V2o޼iӦG?h$ I$d2l0^_0_ccSl%"H$qZ-ɢP 0Vk6!`)JKK#]hZ vgN% Oa56t-U__OӴVQ 2(lZFpgzpzb_0dP0N%#UWWJHHkAL&.t-cXF#NII!] `Ƞ)'A<t- B).t- e28NRRZS@ST4MÝBӘdP* H2(XJ%Ϸlft-4O` 2(X(;~~ _ C|>_cc#B(==łrD \z nb<7L("VNHo֬Y`ٚ5kMSVVj{zz/_RfϞT}qqq^^^~~ӧx'|ԩS?ZѣG-ˉ'ʶm~7|ktvvh]]]_}#]K:uԩS6z?\& 'N{eddȻ{ǎyJJJt:|狊^/$''gϞ@\mmlڴG ^}UK'Nܾ}{! ΃BD. a2Hte}BBP(/K833?p/٨Ζ?wh4<oڵ7nܨ¯>Ӹ9s455L:رcӧOOMMMMM]n]} ---8ȠOanV`0X)111Xݢ\H_D#pZր4--ஒ~tʕ/gL赵2(t))*))ꀿH(*5^׾1s̠4%%eK,inniuuu===>b~Cv}y[oΝ;7 dP|\"δdp8n0/R|||MMMh+G}t޽{Ncuvv_~SN]hC=w_W˗/W(v:vCa9R:.oYr"]|9##zŋ&M:qDH+F!+ӦM*ǂ}PLo}\jժU>##㥗^ vUs5B}Wn;"vvvVUUq)S ,pʕ- }m.tp-R,KѠH3M߭555kPuttdddDV^#e}0ܶGJ 5X,JVkggg"X# Z#xL$]vH4> j5k -C9D 8}c[[[ND")` c#^/7f8 D\. Nt-wrd2RI nO:efV SXFc lllx<*J.+ bxP(|>_L@CALB}O[+%t\.'SXvH$w cX8w*Էe=tk!=tJҸ`!@X,J=OgggwZYA!xnL&S*9j5ܭniiA3&` SX;fwkUT2,` †@Mau:mmm †\. N; ;lhh|jZ"ddP8 iݮe= 2(hZF]4/RNx~EQnF3L</77>dA @PTTc/^bb2hA p8bbrJrҤI** '\.@T\K.?]]]vp8B^beffN0΀@6V=ۋnA,(|*4qDr[[[)p8n{ СLX0 dPX(jر<<7wQ Xjj*˽qMCd\.Ɂ)򀙨@5'tvvر#33sxГO>aЊ+Ȟ}8ο/|>ه|70 8<_v&M"[ Ţiv`2 >SI?1-$ IA $AH  2@d$ IA ۷oԩ2?|~~H$JJJzg- J^}լ,P;wܺ:R d@^wժUqqqbxܹ*QT˖-{7H7o\WWw'O.]T%GikkrIU$A///ohhp\?8Jf͚pTR V\"& ނe-[O*//OKK#XP( *)ȅ v111!d2'+sSN;t-AӴfkoo {NR%&&?YSSc z^1p%Xr]N8p8N'2|>ߺu몫fK,YM`AKz<ѽW_MOOvh3g?J?ovto߱cGatn(kZGWFww7BH&^Tr܄'xH%bw3LSN'Uŋ/^L~fZ,ä. l b8 2@d$ IA $AH  2@d$9>ăodk7nHgbBN_'XB`3Q Q|a@&L&U1ZaIENDB`tensor-0.9.24/media/slice.dia000066400000000000000000000027651426512615100157720ustar00rootroot00000000000000Mo6|MinRb;; A[U I˾d(en$QK :h CE|fa@2 xuc;x{!{o%gE^t@x A&(SYij^Yyq:< b7/>VUoDIj=ƞܩ=L✉+Hʏw i=fd'UZ?Sdxu;N&U*ox S7Qxc|R=Eʥʅ|yTr$qNb]Ʌ(fKg* FK]*5Epևq>X0뿎r{2Z+O0^}0;i]U^zP J"{V?0mU?p8_SY$2%嚧<ڇE\+ۈ?گ`Ng6FPT.hF #_(=ё$)R#q$  D82FldVVg8ƔTp4ߔ|RAŝ٬(+8:H4p4p4p4pv91 o.O/TxtG71 bz:$$YԖ*ƥüo!<5&2S`<jG0U3ДK['Xۚ@zӓ&ʼw.oFStensor-0.9.24/media/slice.png000066400000000000000000000063321426512615100160130ustar00rootroot00000000000000PNG  IHDRJᙵsBITO IDATxL wr N8q CZꘀMa Wf~ijIJ17`3ͻ6i19P)8kqO>"<~{Sh"᭿vP9@D$S&7PofBz0 ` ,Y7Pof zjǒ裏~AxA??}zW=C8p x |S-[hTWW0x=9vMٲe維[0ʹbyw󳲲O>My7z%K,I;"""͛G;!a_~;x`III___uuuSS믿/NA'}ѽ{ʲ(1-[}q$ټys\\!ŋǏw\7oei<7nLHH(//w:*OOhZBX||<$@<1+W'ҴZmAA͛7ϟ$S3 .0 ki͝;8G;H%N) d {9sVy) .N_Y ݶl"\ Z[[nh\nݎ;D9ɲeFGG|AhM6:u\.qO DZHJ'ʹqA|g3je   (XqF 9@Q?Fh h8!:y НB,pP h8@wX(9rxC 3@ Z-Aq'xaW@Soqp.`z1vL&^BYt^m611lX~+V477S :eʕΝsUWW[VUUUU999GX,Vjii~6M&lܸƍ#99+22?.^sڵIIIeee---qqq'NXfMwwpq0&ڵk ,8n---ʕ+_nyGOw}k֬q\|wy'///333++ԩS-F}}}i׮]OJW˗/o/M6_dI]]ݔFqx /p…[nB~q 6ܔ>hhhHkb4/_,o${oP "h˗sVUUuuu C/55?844TTTTQQ{q~d0I zm4MKWgddddddfft2=M5䧟~W^tqGOOύ7nݺeX̙3<<}yUÇӭVktt3hPIHa-Bz0 ` ,Y7PofBz0 `\>o۶mqqq/..ȑ#V5***2yDovvvNKJJzn7$Z666VqMPڵkWmmnw\φ h%1 | k׮UWWן>pdff<{ssfsll,n߾]rl PReedr:ÅO>$x^SWWx<$<99t1o*I|>_UUGFFZ[[m6[jjĄ$ܼym;!###Rpz@[ NnݺWJL":̾}&} II"o&Ÿ{-JbWTTp=,ZD_e9.<\nI$$p10Y7PofBz0 ` ,Y7Pofc3P12 &|i~zIENDB`tensor-0.9.24/native/000077500000000000000000000000001426512615100144115ustar00rootroot00000000000000tensor-0.9.24/native/doc.go000066400000000000000000000007251426512615100155110ustar00rootroot00000000000000// package native is a utility package for gorgonia.org/tensor. // // Amongst other things, it provides iterators that use Go slice semantics, while keeping a reference to the underlying memory. // This means you can update the slices and the changes will be reflected back into the original tensor. // // There is of course a cost of using the native iterators and selectors - allocation costs. // For best performance, don't use these in a tight loop. package native tensor-0.9.24/native/example_test.go000066400000000000000000000057051426512615100174410ustar00rootroot00000000000000package native import ( "fmt" . "gorgonia.org/tensor" ) // There are times where it is more effective to use native Go slice semantics to do work (for example, when performing batch work over kernels). // Iterators are useful for this purpose. This package provides iterators for the standard types // However, custom types are also available. See Vector, Matrix and Tensor3 examples. func Example_iterator() { var T *Dense T = New(WithShape(2, 3), WithBacking(Range(Float64, 0, 6))) x, err := MatrixF64(T) if err != nil { fmt.Printf("ERR: %v", err) } for _, row := range x { fmt.Printf("%v\n", row) } // Output: // [0 1 2] // [3 4 5] } // The NativeSelect function squashes the dimensions, and returns an iterator in native Go slice semantics. func Example_select() { // Selection is a bit of an interesting use case. Sometimes you don't want to iterate through the layers. // // For example, in a number of use cases where you have a 4-Tensor, you'd typically reshape it to some // 2D matrix which can then be plugged into BLAS algorithms directly. Sometimes you wouldn't need to reshape. // All you have to do is squash the dimensions inwards. This function does that. // // The best way to explain the Select functions is through concrete examples. // Imagine a tensor with (2,3,4,5) shape. Arbitrarily, we call them (NCHW) - Batch Size, Channel Count, Height, Width. // If we want to select all the channels, across all batches, then `NativeSelectX(T, 1)` would yield all channels. The resulting matrix will be (6, 20) // If we want to select all the heights, across all channels and batches, then `NativeSelectX(T, 2) will yield all heights. The resulting matrix will be (24, 5) // // If for some reason the format was in NHWC, then you would need to reshape. This wouldn't be useful. var T *Dense T = New(WithShape(2, 3, 4, 5), WithBacking(Range(Float64, 0, 2*3*4*5))) x, err := SelectF64(T, 1) if err != nil { fmt.Printf("ERR %v", err) } for _, row := range x { fmt.Printf("%3.0f\n", row) } // Output: // [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19] // [ 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39] // [ 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59] // [ 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79] // [ 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99] // [100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119] } // The iterators are iteratos in the truest sense. The data isn't copied, as this example shows func Example_clobber() { var T *Dense T = New(WithShape(2, 3), WithBacking(Range(Float64, 0, 6))) fmt.Printf("Before :\n%v", T) xx, _ := MatrixF64(T) xx[1][1] = 10000 fmt.Printf("After :\n%v", T) // Output: // Before : // ⎡0 1 2⎤ // ⎣3 4 5⎦ // After : // ⎡ 0 1 2⎤ // ⎣ 3 10000 5⎦ } tensor-0.9.24/native/generic.go000066400000000000000000000033771426512615100163660ustar00rootroot00000000000000package native import ( "reflect" "unsafe" . "gorgonia.org/tensor" ) func Vector(t *Dense) (interface{}, error) { if err := checkNativeIterable(t, 1, t.Dtype()); err != nil { return nil, err } return t.Data(), nil } func Matrix(t *Dense) (interface{}, error) { if err := checkNativeIterable(t, 2, t.Dtype()); err != nil { return nil, err } shape := t.Shape() strides := t.Strides() typ := t.Dtype().Type rows := shape[0] cols := shape[1] rowStride := strides[0] retVal := reflect.MakeSlice(reflect.SliceOf(reflect.SliceOf(typ)), rows, rows) ptr := t.Uintptr() for i := 0; i < rows; i++ { e := retVal.Index(i) sh := (*reflect.SliceHeader)(unsafe.Pointer(e.Addr().Pointer())) sh.Data = uintptr(i*rowStride)*typ.Size() + ptr sh.Len = cols sh.Cap = cols } return retVal.Interface(), nil } func Tensor3(t *Dense) (interface{}, error) { if err := checkNativeIterable(t, 3, t.Dtype()); err != nil { return nil, err } shape := t.Shape() strides := t.Strides() typ := t.Dtype().Type layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal := reflect.MakeSlice(reflect.SliceOf(reflect.SliceOf(reflect.SliceOf(typ))), layers, layers) ptr := t.Uintptr() for i := 0; i < layers; i++ { el := retVal.Index(i) inner := reflect.MakeSlice(reflect.SliceOf(reflect.SliceOf(typ)), rows, rows) for j := 0; j < rows; j++ { e := inner.Index(j) sh := (*reflect.SliceHeader)(unsafe.Pointer(e.Addr().Pointer())) sh.Data = uintptr(i*layerStride+j*rowStride)*typ.Size() + ptr sh.Len = cols sh.Cap = cols } sh := (*reflect.SliceHeader)(unsafe.Pointer(el.Addr().Pointer())) sh.Data = inner.Index(0).Addr().Pointer() sh.Len = rows sh.Cap = rows } return retVal.Interface(), nil } tensor-0.9.24/native/generic_test.go000066400000000000000000000021431426512615100174130ustar00rootroot00000000000000package native_test import ( "fmt" "gorgonia.org/tensor" . "gorgonia.org/tensor/native" ) type MyType int func Example_vector() { backing := []MyType{ 0, 1, 2, 3, } T := tensor.New(tensor.WithShape(4), tensor.WithBacking(backing)) val, err := Vector(T) if err != nil { fmt.Printf("error: %v", err) } it := val.([]MyType) fmt.Println(it) // Output: // [0 1 2 3] } func Example_matrix() { backing := []MyType{ 0, 1, 2, 3, 4, 5, } T := tensor.New(tensor.WithShape(3, 2), tensor.WithBacking(backing)) val, err := Matrix(T) if err != nil { fmt.Printf("error: %v", err) } it := val.([][]MyType) fmt.Println(it) // Output: // [[0 1] [2 3] [4 5]] } func Example_tensor3() { backing := []MyType{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, } T := tensor.New(tensor.WithShape(2, 3, 4), tensor.WithBacking(backing)) val, err := Tensor3(T) if err != nil { fmt.Printf("error: %v", err) } it := val.([][][]MyType) fmt.Println(it) //Output: // [[[0 1 2 3] [4 5 6 7] [8 9 10 11]] [[12 13 14 15] [16 17 18 19] [20 21 22 23]]] } tensor-0.9.24/native/iterator_native.go000066400000000000000000000732501426512615100201460ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package native import ( "reflect" "unsafe" "github.com/pkg/errors" . "gorgonia.org/tensor" ) func checkNativeIterable(t *Dense, dims int, dt Dtype) error { // checks: if !t.IsNativelyAccessible() { return errors.Errorf("Cannot convert *Dense to *mat.Dense. Data is inaccessible") } if t.Shape().Dims() != dims { return errors.Errorf("Cannot convert *Dense to native iterator. Expected number of dimension: %d, T has got %d dimensions (Shape: %v)", dims, t.Dims(), t.Shape()) } if t.F() || t.RequiresIterator() { return errors.Errorf("Not yet implemented: native matrix for colmajor or unpacked matrices") } if t.Dtype() != dt { return errors.Errorf("Conversion to native iterable only works on %v. Got %v", dt, t.Dtype()) } return nil } /* Native Iterables for bool */ // VectorB converts a *Dense into a []bool // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorB(t *Dense) (retVal []bool, err error) { if err = checkNativeIterable(t, 1, Bool); err != nil { return nil, err } return t.Bools(), nil } // MatrixB converts a *Dense into a [][]bool // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixB(t *Dense) (retVal [][]bool, err error) { if err = checkNativeIterable(t, 2, Bool); err != nil { return nil, err } data := t.Bools() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]bool, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]bool, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3B converts a *Dense into a [][][]bool. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3B(t *Dense) (retVal [][][]bool, err error) { if err = checkNativeIterable(t, 3, Bool); err != nil { return nil, err } data := t.Bools() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]bool, layers) for i := range retVal { retVal[i] = make([][]bool, rows) for j := range retVal[i] { retVal[i][j] = make([]bool, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for int */ // VectorI converts a *Dense into a []int // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorI(t *Dense) (retVal []int, err error) { if err = checkNativeIterable(t, 1, Int); err != nil { return nil, err } return t.Ints(), nil } // MatrixI converts a *Dense into a [][]int // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixI(t *Dense) (retVal [][]int, err error) { if err = checkNativeIterable(t, 2, Int); err != nil { return nil, err } data := t.Ints() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]int, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]int, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3I converts a *Dense into a [][][]int. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3I(t *Dense) (retVal [][][]int, err error) { if err = checkNativeIterable(t, 3, Int); err != nil { return nil, err } data := t.Ints() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]int, layers) for i := range retVal { retVal[i] = make([][]int, rows) for j := range retVal[i] { retVal[i][j] = make([]int, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for int8 */ // VectorI8 converts a *Dense into a []int8 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorI8(t *Dense) (retVal []int8, err error) { if err = checkNativeIterable(t, 1, Int8); err != nil { return nil, err } return t.Int8s(), nil } // MatrixI8 converts a *Dense into a [][]int8 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixI8(t *Dense) (retVal [][]int8, err error) { if err = checkNativeIterable(t, 2, Int8); err != nil { return nil, err } data := t.Int8s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]int8, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]int8, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3I8 converts a *Dense into a [][][]int8. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3I8(t *Dense) (retVal [][][]int8, err error) { if err = checkNativeIterable(t, 3, Int8); err != nil { return nil, err } data := t.Int8s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]int8, layers) for i := range retVal { retVal[i] = make([][]int8, rows) for j := range retVal[i] { retVal[i][j] = make([]int8, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for int16 */ // VectorI16 converts a *Dense into a []int16 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorI16(t *Dense) (retVal []int16, err error) { if err = checkNativeIterable(t, 1, Int16); err != nil { return nil, err } return t.Int16s(), nil } // MatrixI16 converts a *Dense into a [][]int16 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixI16(t *Dense) (retVal [][]int16, err error) { if err = checkNativeIterable(t, 2, Int16); err != nil { return nil, err } data := t.Int16s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]int16, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]int16, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3I16 converts a *Dense into a [][][]int16. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3I16(t *Dense) (retVal [][][]int16, err error) { if err = checkNativeIterable(t, 3, Int16); err != nil { return nil, err } data := t.Int16s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]int16, layers) for i := range retVal { retVal[i] = make([][]int16, rows) for j := range retVal[i] { retVal[i][j] = make([]int16, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for int32 */ // VectorI32 converts a *Dense into a []int32 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorI32(t *Dense) (retVal []int32, err error) { if err = checkNativeIterable(t, 1, Int32); err != nil { return nil, err } return t.Int32s(), nil } // MatrixI32 converts a *Dense into a [][]int32 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixI32(t *Dense) (retVal [][]int32, err error) { if err = checkNativeIterable(t, 2, Int32); err != nil { return nil, err } data := t.Int32s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]int32, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]int32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3I32 converts a *Dense into a [][][]int32. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3I32(t *Dense) (retVal [][][]int32, err error) { if err = checkNativeIterable(t, 3, Int32); err != nil { return nil, err } data := t.Int32s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]int32, layers) for i := range retVal { retVal[i] = make([][]int32, rows) for j := range retVal[i] { retVal[i][j] = make([]int32, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for int64 */ // VectorI64 converts a *Dense into a []int64 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorI64(t *Dense) (retVal []int64, err error) { if err = checkNativeIterable(t, 1, Int64); err != nil { return nil, err } return t.Int64s(), nil } // MatrixI64 converts a *Dense into a [][]int64 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixI64(t *Dense) (retVal [][]int64, err error) { if err = checkNativeIterable(t, 2, Int64); err != nil { return nil, err } data := t.Int64s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]int64, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]int64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3I64 converts a *Dense into a [][][]int64. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3I64(t *Dense) (retVal [][][]int64, err error) { if err = checkNativeIterable(t, 3, Int64); err != nil { return nil, err } data := t.Int64s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]int64, layers) for i := range retVal { retVal[i] = make([][]int64, rows) for j := range retVal[i] { retVal[i][j] = make([]int64, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for uint */ // VectorU converts a *Dense into a []uint // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorU(t *Dense) (retVal []uint, err error) { if err = checkNativeIterable(t, 1, Uint); err != nil { return nil, err } return t.Uints(), nil } // MatrixU converts a *Dense into a [][]uint // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixU(t *Dense) (retVal [][]uint, err error) { if err = checkNativeIterable(t, 2, Uint); err != nil { return nil, err } data := t.Uints() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]uint, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]uint, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3U converts a *Dense into a [][][]uint. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3U(t *Dense) (retVal [][][]uint, err error) { if err = checkNativeIterable(t, 3, Uint); err != nil { return nil, err } data := t.Uints() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]uint, layers) for i := range retVal { retVal[i] = make([][]uint, rows) for j := range retVal[i] { retVal[i][j] = make([]uint, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for uint8 */ // VectorU8 converts a *Dense into a []uint8 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorU8(t *Dense) (retVal []uint8, err error) { if err = checkNativeIterable(t, 1, Uint8); err != nil { return nil, err } return t.Uint8s(), nil } // MatrixU8 converts a *Dense into a [][]uint8 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixU8(t *Dense) (retVal [][]uint8, err error) { if err = checkNativeIterable(t, 2, Uint8); err != nil { return nil, err } data := t.Uint8s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]uint8, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]uint8, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3U8 converts a *Dense into a [][][]uint8. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3U8(t *Dense) (retVal [][][]uint8, err error) { if err = checkNativeIterable(t, 3, Uint8); err != nil { return nil, err } data := t.Uint8s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]uint8, layers) for i := range retVal { retVal[i] = make([][]uint8, rows) for j := range retVal[i] { retVal[i][j] = make([]uint8, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for uint16 */ // VectorU16 converts a *Dense into a []uint16 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorU16(t *Dense) (retVal []uint16, err error) { if err = checkNativeIterable(t, 1, Uint16); err != nil { return nil, err } return t.Uint16s(), nil } // MatrixU16 converts a *Dense into a [][]uint16 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixU16(t *Dense) (retVal [][]uint16, err error) { if err = checkNativeIterable(t, 2, Uint16); err != nil { return nil, err } data := t.Uint16s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]uint16, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]uint16, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3U16 converts a *Dense into a [][][]uint16. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3U16(t *Dense) (retVal [][][]uint16, err error) { if err = checkNativeIterable(t, 3, Uint16); err != nil { return nil, err } data := t.Uint16s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]uint16, layers) for i := range retVal { retVal[i] = make([][]uint16, rows) for j := range retVal[i] { retVal[i][j] = make([]uint16, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for uint32 */ // VectorU32 converts a *Dense into a []uint32 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorU32(t *Dense) (retVal []uint32, err error) { if err = checkNativeIterable(t, 1, Uint32); err != nil { return nil, err } return t.Uint32s(), nil } // MatrixU32 converts a *Dense into a [][]uint32 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixU32(t *Dense) (retVal [][]uint32, err error) { if err = checkNativeIterable(t, 2, Uint32); err != nil { return nil, err } data := t.Uint32s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]uint32, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]uint32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3U32 converts a *Dense into a [][][]uint32. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3U32(t *Dense) (retVal [][][]uint32, err error) { if err = checkNativeIterable(t, 3, Uint32); err != nil { return nil, err } data := t.Uint32s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]uint32, layers) for i := range retVal { retVal[i] = make([][]uint32, rows) for j := range retVal[i] { retVal[i][j] = make([]uint32, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for uint64 */ // VectorU64 converts a *Dense into a []uint64 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorU64(t *Dense) (retVal []uint64, err error) { if err = checkNativeIterable(t, 1, Uint64); err != nil { return nil, err } return t.Uint64s(), nil } // MatrixU64 converts a *Dense into a [][]uint64 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixU64(t *Dense) (retVal [][]uint64, err error) { if err = checkNativeIterable(t, 2, Uint64); err != nil { return nil, err } data := t.Uint64s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]uint64, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]uint64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3U64 converts a *Dense into a [][][]uint64. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3U64(t *Dense) (retVal [][][]uint64, err error) { if err = checkNativeIterable(t, 3, Uint64); err != nil { return nil, err } data := t.Uint64s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]uint64, layers) for i := range retVal { retVal[i] = make([][]uint64, rows) for j := range retVal[i] { retVal[i][j] = make([]uint64, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for float32 */ // VectorF32 converts a *Dense into a []float32 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorF32(t *Dense) (retVal []float32, err error) { if err = checkNativeIterable(t, 1, Float32); err != nil { return nil, err } return t.Float32s(), nil } // MatrixF32 converts a *Dense into a [][]float32 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixF32(t *Dense) (retVal [][]float32, err error) { if err = checkNativeIterable(t, 2, Float32); err != nil { return nil, err } data := t.Float32s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]float32, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]float32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3F32 converts a *Dense into a [][][]float32. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3F32(t *Dense) (retVal [][][]float32, err error) { if err = checkNativeIterable(t, 3, Float32); err != nil { return nil, err } data := t.Float32s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]float32, layers) for i := range retVal { retVal[i] = make([][]float32, rows) for j := range retVal[i] { retVal[i][j] = make([]float32, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for float64 */ // VectorF64 converts a *Dense into a []float64 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorF64(t *Dense) (retVal []float64, err error) { if err = checkNativeIterable(t, 1, Float64); err != nil { return nil, err } return t.Float64s(), nil } // MatrixF64 converts a *Dense into a [][]float64 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixF64(t *Dense) (retVal [][]float64, err error) { if err = checkNativeIterable(t, 2, Float64); err != nil { return nil, err } data := t.Float64s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]float64, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]float64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3F64 converts a *Dense into a [][][]float64. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3F64(t *Dense) (retVal [][][]float64, err error) { if err = checkNativeIterable(t, 3, Float64); err != nil { return nil, err } data := t.Float64s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]float64, layers) for i := range retVal { retVal[i] = make([][]float64, rows) for j := range retVal[i] { retVal[i][j] = make([]float64, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for complex64 */ // VectorC64 converts a *Dense into a []complex64 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorC64(t *Dense) (retVal []complex64, err error) { if err = checkNativeIterable(t, 1, Complex64); err != nil { return nil, err } return t.Complex64s(), nil } // MatrixC64 converts a *Dense into a [][]complex64 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixC64(t *Dense) (retVal [][]complex64, err error) { if err = checkNativeIterable(t, 2, Complex64); err != nil { return nil, err } data := t.Complex64s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]complex64, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]complex64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3C64 converts a *Dense into a [][][]complex64. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3C64(t *Dense) (retVal [][][]complex64, err error) { if err = checkNativeIterable(t, 3, Complex64); err != nil { return nil, err } data := t.Complex64s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]complex64, layers) for i := range retVal { retVal[i] = make([][]complex64, rows) for j := range retVal[i] { retVal[i][j] = make([]complex64, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for complex128 */ // VectorC128 converts a *Dense into a []complex128 // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorC128(t *Dense) (retVal []complex128, err error) { if err = checkNativeIterable(t, 1, Complex128); err != nil { return nil, err } return t.Complex128s(), nil } // MatrixC128 converts a *Dense into a [][]complex128 // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixC128(t *Dense) (retVal [][]complex128, err error) { if err = checkNativeIterable(t, 2, Complex128); err != nil { return nil, err } data := t.Complex128s() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]complex128, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]complex128, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3C128 converts a *Dense into a [][][]complex128. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3C128(t *Dense) (retVal [][][]complex128, err error) { if err = checkNativeIterable(t, 3, Complex128); err != nil { return nil, err } data := t.Complex128s() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]complex128, layers) for i := range retVal { retVal[i] = make([][]complex128, rows) for j := range retVal[i] { retVal[i][j] = make([]complex128, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } /* Native Iterables for string */ // VectorStr converts a *Dense into a []string // If the *Dense does not represent a vector of the wanted type, it will return // an error. func VectorStr(t *Dense) (retVal []string, err error) { if err = checkNativeIterable(t, 1, String); err != nil { return nil, err } return t.Strings(), nil } // MatrixStr converts a *Dense into a [][]string // If the *Dense does not represent a matrix of the wanted type, it // will return an error. func MatrixStr(t *Dense) (retVal [][]string, err error) { if err = checkNativeIterable(t, 2, String); err != nil { return nil, err } data := t.Strings() shape := t.Shape() strides := t.Strides() rows := shape[0] cols := shape[1] rowStride := strides[0] retVal = make([][]string, rows) for i := range retVal { start := i * rowStride retVal[i] = make([]string, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } return } // Tensor3Str converts a *Dense into a [][][]string. // If the *Dense does not represent a 3-tensor of the wanted type, it will return an error. func Tensor3Str(t *Dense) (retVal [][][]string, err error) { if err = checkNativeIterable(t, 3, String); err != nil { return nil, err } data := t.Strings() shape := t.Shape() strides := t.Strides() layers := shape[0] rows := shape[1] cols := shape[2] layerStride := strides[0] rowStride := strides[1] retVal = make([][][]string, layers) for i := range retVal { retVal[i] = make([][]string, rows) for j := range retVal[i] { retVal[i][j] = make([]string, 0) start := i*layerStride + j*rowStride hdr := (*reflect.SliceHeader)(unsafe.Pointer(&retVal[i][j])) hdr.Data = uintptr(unsafe.Pointer(&data[start])) hdr.Cap = cols hdr.Len = cols } } return } tensor-0.9.24/native/iterator_native2.go000066400000000000000000000350271426512615100202300ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package native import ( "reflect" "unsafe" "github.com/pkg/errors" . "gorgonia.org/tensor" ) func checkNativeSelectable(t *Dense, axis int, dt Dtype) error { if !t.IsNativelyAccessible() { return errors.New("Cannot select on non-natively accessible data") } if axis >= t.Shape().Dims() && !(t.IsScalar() && axis == 0) { return errors.Errorf("Cannot select on axis %d. Shape is %v", axis, t.Shape()) } if t.F() || t.RequiresIterator() { return errors.Errorf("Not yet implemented: native select for colmajor or unpacked matrices") } if t.Dtype() != dt { return errors.Errorf("Native selection only works on %v. Got %v", dt, t.Dtype()) } return nil } /* Native Select for bool */ // SelectB creates a slice of flat data types. See Example of NativeSelectF64. func SelectB(t *Dense, axis int) (retVal [][]bool, err error) { if err := checkNativeSelectable(t, axis, Bool); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]bool, 1) retVal[0] = t.Bools() case 2: if axis == 0 { return MatrixB(t) } fallthrough default: // size := t.Shape()[axis] data := t.Bools() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]bool, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]bool, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for int */ // SelectI creates a slice of flat data types. See Example of NativeSelectF64. func SelectI(t *Dense, axis int) (retVal [][]int, err error) { if err := checkNativeSelectable(t, axis, Int); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]int, 1) retVal[0] = t.Ints() case 2: if axis == 0 { return MatrixI(t) } fallthrough default: // size := t.Shape()[axis] data := t.Ints() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]int, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]int, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for int8 */ // SelectI8 creates a slice of flat data types. See Example of NativeSelectF64. func SelectI8(t *Dense, axis int) (retVal [][]int8, err error) { if err := checkNativeSelectable(t, axis, Int8); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]int8, 1) retVal[0] = t.Int8s() case 2: if axis == 0 { return MatrixI8(t) } fallthrough default: // size := t.Shape()[axis] data := t.Int8s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]int8, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]int8, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for int16 */ // SelectI16 creates a slice of flat data types. See Example of NativeSelectF64. func SelectI16(t *Dense, axis int) (retVal [][]int16, err error) { if err := checkNativeSelectable(t, axis, Int16); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]int16, 1) retVal[0] = t.Int16s() case 2: if axis == 0 { return MatrixI16(t) } fallthrough default: // size := t.Shape()[axis] data := t.Int16s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]int16, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]int16, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for int32 */ // SelectI32 creates a slice of flat data types. See Example of NativeSelectF64. func SelectI32(t *Dense, axis int) (retVal [][]int32, err error) { if err := checkNativeSelectable(t, axis, Int32); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]int32, 1) retVal[0] = t.Int32s() case 2: if axis == 0 { return MatrixI32(t) } fallthrough default: // size := t.Shape()[axis] data := t.Int32s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]int32, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]int32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for int64 */ // SelectI64 creates a slice of flat data types. See Example of NativeSelectF64. func SelectI64(t *Dense, axis int) (retVal [][]int64, err error) { if err := checkNativeSelectable(t, axis, Int64); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]int64, 1) retVal[0] = t.Int64s() case 2: if axis == 0 { return MatrixI64(t) } fallthrough default: // size := t.Shape()[axis] data := t.Int64s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]int64, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]int64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for uint */ // SelectU creates a slice of flat data types. See Example of NativeSelectF64. func SelectU(t *Dense, axis int) (retVal [][]uint, err error) { if err := checkNativeSelectable(t, axis, Uint); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]uint, 1) retVal[0] = t.Uints() case 2: if axis == 0 { return MatrixU(t) } fallthrough default: // size := t.Shape()[axis] data := t.Uints() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]uint, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]uint, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for uint8 */ // SelectU8 creates a slice of flat data types. See Example of NativeSelectF64. func SelectU8(t *Dense, axis int) (retVal [][]uint8, err error) { if err := checkNativeSelectable(t, axis, Uint8); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]uint8, 1) retVal[0] = t.Uint8s() case 2: if axis == 0 { return MatrixU8(t) } fallthrough default: // size := t.Shape()[axis] data := t.Uint8s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]uint8, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]uint8, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for uint16 */ // SelectU16 creates a slice of flat data types. See Example of NativeSelectF64. func SelectU16(t *Dense, axis int) (retVal [][]uint16, err error) { if err := checkNativeSelectable(t, axis, Uint16); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]uint16, 1) retVal[0] = t.Uint16s() case 2: if axis == 0 { return MatrixU16(t) } fallthrough default: // size := t.Shape()[axis] data := t.Uint16s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]uint16, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]uint16, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for uint32 */ // SelectU32 creates a slice of flat data types. See Example of NativeSelectF64. func SelectU32(t *Dense, axis int) (retVal [][]uint32, err error) { if err := checkNativeSelectable(t, axis, Uint32); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]uint32, 1) retVal[0] = t.Uint32s() case 2: if axis == 0 { return MatrixU32(t) } fallthrough default: // size := t.Shape()[axis] data := t.Uint32s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]uint32, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]uint32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for uint64 */ // SelectU64 creates a slice of flat data types. See Example of NativeSelectF64. func SelectU64(t *Dense, axis int) (retVal [][]uint64, err error) { if err := checkNativeSelectable(t, axis, Uint64); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]uint64, 1) retVal[0] = t.Uint64s() case 2: if axis == 0 { return MatrixU64(t) } fallthrough default: // size := t.Shape()[axis] data := t.Uint64s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]uint64, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]uint64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for float32 */ // SelectF32 creates a slice of flat data types. See Example of NativeSelectF64. func SelectF32(t *Dense, axis int) (retVal [][]float32, err error) { if err := checkNativeSelectable(t, axis, Float32); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]float32, 1) retVal[0] = t.Float32s() case 2: if axis == 0 { return MatrixF32(t) } fallthrough default: // size := t.Shape()[axis] data := t.Float32s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]float32, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]float32, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for float64 */ // SelectF64 creates a slice of flat data types. See Example of NativeSelectF64. func SelectF64(t *Dense, axis int) (retVal [][]float64, err error) { if err := checkNativeSelectable(t, axis, Float64); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]float64, 1) retVal[0] = t.Float64s() case 2: if axis == 0 { return MatrixF64(t) } fallthrough default: // size := t.Shape()[axis] data := t.Float64s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]float64, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]float64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for complex64 */ // SelectC64 creates a slice of flat data types. See Example of NativeSelectF64. func SelectC64(t *Dense, axis int) (retVal [][]complex64, err error) { if err := checkNativeSelectable(t, axis, Complex64); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]complex64, 1) retVal[0] = t.Complex64s() case 2: if axis == 0 { return MatrixC64(t) } fallthrough default: // size := t.Shape()[axis] data := t.Complex64s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]complex64, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]complex64, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for complex128 */ // SelectC128 creates a slice of flat data types. See Example of NativeSelectF64. func SelectC128(t *Dense, axis int) (retVal [][]complex128, err error) { if err := checkNativeSelectable(t, axis, Complex128); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]complex128, 1) retVal[0] = t.Complex128s() case 2: if axis == 0 { return MatrixC128(t) } fallthrough default: // size := t.Shape()[axis] data := t.Complex128s() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]complex128, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]complex128, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } /* Native Select for string */ // SelectStr creates a slice of flat data types. See Example of NativeSelectF64. func SelectStr(t *Dense, axis int) (retVal [][]string, err error) { if err := checkNativeSelectable(t, axis, String); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]string, 1) retVal[0] = t.Strings() case 2: if axis == 0 { return MatrixStr(t) } fallthrough default: // size := t.Shape()[axis] data := t.Strings() stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]string, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]string, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } tensor-0.9.24/native/iterator_native2_test.go000066400000000000000000000430651426512615100212700ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package native import ( "testing" "github.com/stretchr/testify/assert" . "gorgonia.org/tensor" ) func TestSelectB(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]bool T = New(Of(Bool), WithShape(2, 3, 4, 5)) if x, err = SelectB(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Bool), WithShape(2, 3, 4, 5)) if x, err = SelectB(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Bool), WithShape(2, 3, 4, 5)) if x, err = SelectB(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Bool), WithShape(2, 3)) if x, err = SelectB(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Bool), WithShape(2, 3)) if x, err = SelectB(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(false)) if x, err = SelectB(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectB(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectI(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]int T = New(Of(Int), WithShape(2, 3, 4, 5)) if x, err = SelectI(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Int), WithShape(2, 3, 4, 5)) if x, err = SelectI(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Int), WithShape(2, 3, 4, 5)) if x, err = SelectI(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Int), WithShape(2, 3)) if x, err = SelectI(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Int), WithShape(2, 3)) if x, err = SelectI(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(int(0))) if x, err = SelectI(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectI(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectI8(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]int8 T = New(Of(Int8), WithShape(2, 3, 4, 5)) if x, err = SelectI8(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Int8), WithShape(2, 3, 4, 5)) if x, err = SelectI8(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Int8), WithShape(2, 3, 4, 5)) if x, err = SelectI8(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Int8), WithShape(2, 3)) if x, err = SelectI8(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Int8), WithShape(2, 3)) if x, err = SelectI8(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(int8(0))) if x, err = SelectI8(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectI8(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectI16(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]int16 T = New(Of(Int16), WithShape(2, 3, 4, 5)) if x, err = SelectI16(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Int16), WithShape(2, 3, 4, 5)) if x, err = SelectI16(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Int16), WithShape(2, 3, 4, 5)) if x, err = SelectI16(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Int16), WithShape(2, 3)) if x, err = SelectI16(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Int16), WithShape(2, 3)) if x, err = SelectI16(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(int16(0))) if x, err = SelectI16(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectI16(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectI32(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]int32 T = New(Of(Int32), WithShape(2, 3, 4, 5)) if x, err = SelectI32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Int32), WithShape(2, 3, 4, 5)) if x, err = SelectI32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Int32), WithShape(2, 3, 4, 5)) if x, err = SelectI32(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Int32), WithShape(2, 3)) if x, err = SelectI32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Int32), WithShape(2, 3)) if x, err = SelectI32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(int32(0))) if x, err = SelectI32(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectI32(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectI64(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]int64 T = New(Of(Int64), WithShape(2, 3, 4, 5)) if x, err = SelectI64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Int64), WithShape(2, 3, 4, 5)) if x, err = SelectI64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Int64), WithShape(2, 3, 4, 5)) if x, err = SelectI64(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Int64), WithShape(2, 3)) if x, err = SelectI64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Int64), WithShape(2, 3)) if x, err = SelectI64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(int64(0))) if x, err = SelectI64(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectI64(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectU(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]uint T = New(Of(Uint), WithShape(2, 3, 4, 5)) if x, err = SelectU(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Uint), WithShape(2, 3, 4, 5)) if x, err = SelectU(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Uint), WithShape(2, 3, 4, 5)) if x, err = SelectU(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Uint), WithShape(2, 3)) if x, err = SelectU(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Uint), WithShape(2, 3)) if x, err = SelectU(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(uint(0))) if x, err = SelectU(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectU(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectU8(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]uint8 T = New(Of(Uint8), WithShape(2, 3, 4, 5)) if x, err = SelectU8(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Uint8), WithShape(2, 3, 4, 5)) if x, err = SelectU8(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Uint8), WithShape(2, 3, 4, 5)) if x, err = SelectU8(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Uint8), WithShape(2, 3)) if x, err = SelectU8(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Uint8), WithShape(2, 3)) if x, err = SelectU8(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(uint8(0))) if x, err = SelectU8(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectU8(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectU16(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]uint16 T = New(Of(Uint16), WithShape(2, 3, 4, 5)) if x, err = SelectU16(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Uint16), WithShape(2, 3, 4, 5)) if x, err = SelectU16(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Uint16), WithShape(2, 3, 4, 5)) if x, err = SelectU16(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Uint16), WithShape(2, 3)) if x, err = SelectU16(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Uint16), WithShape(2, 3)) if x, err = SelectU16(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(uint16(0))) if x, err = SelectU16(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectU16(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectU32(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]uint32 T = New(Of(Uint32), WithShape(2, 3, 4, 5)) if x, err = SelectU32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Uint32), WithShape(2, 3, 4, 5)) if x, err = SelectU32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Uint32), WithShape(2, 3, 4, 5)) if x, err = SelectU32(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Uint32), WithShape(2, 3)) if x, err = SelectU32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Uint32), WithShape(2, 3)) if x, err = SelectU32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(uint32(0))) if x, err = SelectU32(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectU32(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectU64(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]uint64 T = New(Of(Uint64), WithShape(2, 3, 4, 5)) if x, err = SelectU64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Uint64), WithShape(2, 3, 4, 5)) if x, err = SelectU64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Uint64), WithShape(2, 3, 4, 5)) if x, err = SelectU64(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Uint64), WithShape(2, 3)) if x, err = SelectU64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Uint64), WithShape(2, 3)) if x, err = SelectU64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(uint64(0))) if x, err = SelectU64(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectU64(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectF32(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]float32 T = New(Of(Float32), WithShape(2, 3, 4, 5)) if x, err = SelectF32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Float32), WithShape(2, 3, 4, 5)) if x, err = SelectF32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Float32), WithShape(2, 3, 4, 5)) if x, err = SelectF32(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Float32), WithShape(2, 3)) if x, err = SelectF32(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Float32), WithShape(2, 3)) if x, err = SelectF32(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(float32(0))) if x, err = SelectF32(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectF32(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectF64(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]float64 T = New(Of(Float64), WithShape(2, 3, 4, 5)) if x, err = SelectF64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Float64), WithShape(2, 3, 4, 5)) if x, err = SelectF64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Float64), WithShape(2, 3, 4, 5)) if x, err = SelectF64(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Float64), WithShape(2, 3)) if x, err = SelectF64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Float64), WithShape(2, 3)) if x, err = SelectF64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(float64(0))) if x, err = SelectF64(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectF64(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectC64(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]complex64 T = New(Of(Complex64), WithShape(2, 3, 4, 5)) if x, err = SelectC64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Complex64), WithShape(2, 3, 4, 5)) if x, err = SelectC64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Complex64), WithShape(2, 3, 4, 5)) if x, err = SelectC64(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Complex64), WithShape(2, 3)) if x, err = SelectC64(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Complex64), WithShape(2, 3)) if x, err = SelectC64(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(complex64(0))) if x, err = SelectC64(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectC64(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectC128(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]complex128 T = New(Of(Complex128), WithShape(2, 3, 4, 5)) if x, err = SelectC128(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(Complex128), WithShape(2, 3, 4, 5)) if x, err = SelectC128(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(Complex128), WithShape(2, 3, 4, 5)) if x, err = SelectC128(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(Complex128), WithShape(2, 3)) if x, err = SelectC128(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(Complex128), WithShape(2, 3)) if x, err = SelectC128(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar(complex128(0))) if x, err = SelectC128(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectC128(T, 10); err == nil { t.Fatal("Expected errors") } } func TestSelectStr(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]string T = New(Of(String), WithShape(2, 3, 4, 5)) if x, err = SelectStr(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of(String), WithShape(2, 3, 4, 5)) if x, err = SelectStr(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of(String), WithShape(2, 3, 4, 5)) if x, err = SelectStr(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of(String), WithShape(2, 3)) if x, err = SelectStr(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of(String), WithShape(2, 3)) if x, err = SelectStr(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar("")) if x, err = SelectStr(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = SelectStr(T, 10); err == nil { t.Fatal("Expected errors") } } tensor-0.9.24/native/iterator_native_test.go000066400000000000000000000277261426512615100212140ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package native import ( "testing" "github.com/stretchr/testify/assert" . "gorgonia.org/tensor" ) func Test_VectorB(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(Bool), WithShape(6)) it, err := VectorB(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixB(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(Bool), WithShape(2, 3)) it, err := MatrixB(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3B(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(Bool), WithShape(2, 3, 4)) it, err := Tensor3B(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorI(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int, 0, 6)), WithShape(6)) it, err := VectorI(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixI(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int, 0, 6)), WithShape(2, 3)) it, err := MatrixI(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3I(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3I(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorI8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int8, 0, 6)), WithShape(6)) it, err := VectorI8(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixI8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int8, 0, 6)), WithShape(2, 3)) it, err := MatrixI8(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3I8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int8, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3I8(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorI16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int16, 0, 6)), WithShape(6)) it, err := VectorI16(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixI16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int16, 0, 6)), WithShape(2, 3)) it, err := MatrixI16(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3I16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int16, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3I16(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorI32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int32, 0, 6)), WithShape(6)) it, err := VectorI32(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixI32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int32, 0, 6)), WithShape(2, 3)) it, err := MatrixI32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3I32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int32, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3I32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorI64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int64, 0, 6)), WithShape(6)) it, err := VectorI64(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixI64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int64, 0, 6)), WithShape(2, 3)) it, err := MatrixI64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3I64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Int64, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3I64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorU(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint, 0, 6)), WithShape(6)) it, err := VectorU(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixU(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint, 0, 6)), WithShape(2, 3)) it, err := MatrixU(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3U(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3U(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorU8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint8, 0, 6)), WithShape(6)) it, err := VectorU8(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixU8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint8, 0, 6)), WithShape(2, 3)) it, err := MatrixU8(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3U8(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint8, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3U8(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorU16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint16, 0, 6)), WithShape(6)) it, err := VectorU16(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixU16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint16, 0, 6)), WithShape(2, 3)) it, err := MatrixU16(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3U16(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint16, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3U16(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorU32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint32, 0, 6)), WithShape(6)) it, err := VectorU32(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixU32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint32, 0, 6)), WithShape(2, 3)) it, err := MatrixU32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3U32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint32, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3U32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorU64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint64, 0, 6)), WithShape(6)) it, err := VectorU64(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixU64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint64, 0, 6)), WithShape(2, 3)) it, err := MatrixU64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3U64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Uint64, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3U64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorF32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float32, 0, 6)), WithShape(6)) it, err := VectorF32(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixF32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float32, 0, 6)), WithShape(2, 3)) it, err := MatrixF32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3F32(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3F32(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorF64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float64, 0, 6)), WithShape(6)) it, err := VectorF64(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixF64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float64, 0, 6)), WithShape(2, 3)) it, err := MatrixF64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3F64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Float64, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3F64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorC64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex64, 0, 6)), WithShape(6)) it, err := VectorC64(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixC64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex64, 0, 6)), WithShape(2, 3)) it, err := MatrixC64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3C64(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex64, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3C64(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorC128(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex128, 0, 6)), WithShape(6)) it, err := VectorC128(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixC128(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex128, 0, 6)), WithShape(2, 3)) it, err := MatrixC128(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3C128(t *testing.T) { assert := assert.New(t) var T *Dense T = New(WithBacking(Range(Complex128, 0, 24)), WithShape(2, 3, 4)) it, err := Tensor3C128(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } func Test_VectorStr(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(String), WithShape(6)) it, err := VectorStr(T) if err != nil { t.Fatal(err) } assert.Equal(6, len(it)) } func Test_MatrixStr(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(String), WithShape(2, 3)) it, err := MatrixStr(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) } func Test_Tensor3Str(t *testing.T) { assert := assert.New(t) var T *Dense T = New(Of(String), WithShape(2, 3, 4)) it, err := Tensor3Str(T) if err != nil { t.Fatal(err) } assert.Equal(2, len(it)) assert.Equal(3, len(it[0])) assert.Equal(4, len(it[0][0])) } tensor-0.9.24/optimizations_test.go000066400000000000000000000005561426512615100174300ustar00rootroot00000000000000package tensor import ( "testing" ) // this file contains tests to make sure certain algorithms/optimizations aren't crazy func TestRequiresIterator(t *testing.T) { T := New(Of(Int), WithBacking([]int{1, 2, 3, 4})) sliced, _ := T.Slice(makeRS(1, 3)) if sliced.RequiresIterator() { t.Errorf("Slicing on rows should not require Iterator") } } tensor-0.9.24/perf.go000066400000000000000000000123041426512615100144060ustar00rootroot00000000000000package tensor import ( "runtime" "sync" "gorgonia.org/tensor/internal/storage" ) var habbo sync.Mutex var usePool = true // tensorPool is a pool of *Tensor grouped by size. It's guarded by poolsClosed const ( maxAPDims = 8 maxDims = 8 PoolSize = 4096 ) // UsePool enables the use of a pool of *Tensors as provided in the package. This is the default option func UsePool() { habbo.Lock() usePool = true habbo.Unlock() } // DontUsePool makes sure the functions don't use the tensor pool provided. // This is useful as certain applications don't lend themselves well to use of the pool. // Examples of such applications would be one where many tensors of wildly different sizes are created all the time. func DontUsePool() { habbo.Lock() usePool = false habbo.Unlock() } // headerPool should ever only be used by scalarToHeader var headerPool = make(chan *storage.Header, PoolSize) func borrowHeader() *storage.Header { select { case hdr := <-headerPool: return hdr default: hdr := new(storage.Header) runtime.SetFinalizer(hdr, destroyHeader) return hdr } } func returnHeader(hdr *storage.Header) { destroyHeader(hdr) if len(headerPool) < cap(headerPool) { headerPool <- hdr } } func destroyHeader(hdr *storage.Header) { hdr.Raw = nil } var densePool = make(chan *Dense, PoolSize) func borrowDense() *Dense { select { case t := <-densePool: return t default: t := new(Dense) t.e = StdEng{} // t.oe = StdEng{} return t } // return densePool.Get().(*Dense) } // ReturnTensor returns a Tensor to their respective pools. USE WITH CAUTION func ReturnTensor(t Tensor) { if !usePool { return } switch tt := t.(type) { case *Dense: tt.AP.zero() if tt.transposeWith != nil { ReturnInts(tt.transposeWith) tt.transposeWith = nil } // array reset tt.t = Dtype{} tt.array.Header.Raw = nil // engine and flag reset tt.e = StdEng{} tt.oe = nil tt.flag = 0 // other reset tt.old.zero() tt.viewOf = 0 tt.transposeWith = nil // mask related stuff - TODO: deprecate tt.mask = nil tt.maskIsSoft = false // densePool.Put(tt) if len(densePool) < cap(densePool) { densePool <- tt } } } /* ---------------------------------------------------------------- ------------------ Create Pools ------------------------------------------------------------------*/ /* APLIST POOL */ // Init function func init() { for i := range intsPool { size := i intsPool[i].New = func() interface{} { return make([]int, size) } } // for i := range boolsPool { // size := i // boolsPool[i].New = func() interface{} { return make([]bool, size) } // } } /* INTS POOL */ var intsPool [maxDims + 1]sync.Pool // var intsPool = make(chan []int, PoolSize) /* BOOLS POOL */ var boolsPool = make(chan []bool, PoolSize) // var boolsPool [PoolSize]sync.Pool // BorrowInts borrows a slice of ints from the pool. USE WITH CAUTION. func BorrowInts(size int) []int { if size > maxDims { return make([]int, size, size) } // select { // case ints := <-intsPool: // ints = ints[:size] // return ints // default: // ints := make([]int, size, 8) // return ints // } retVal := intsPool[size].Get() if retVal == nil { return make([]int, size) } // log.Printf("Borrowing %p. Called by %v", retVal, string(debug.Stack())) return retVal.([]int)[:size] } // ReturnInts returns a slice from the pool. USE WITH CAUTION. func ReturnInts(is []int) { // log.Printf("Returning %p. Called by %v", is, string(debug.Stack())) if is == nil { return } // if len(is) == 2 && is[0] == 52 && is[1] == 10 { // log.Printf("ints %v", is) // pc, _, _, _ := runtime.Caller(3) // log.Printf("Called: %v", runtime.FuncForPC(pc).Name()) // } size := cap(is) if size > maxDims { return } is = is[:cap(is)] for i := range is { is[i] = 0 } // if len(intsPool) < cap(intsPool) { // intsPool <- is // } intsPool[size].Put(is) } // BorrowBools borrows a slice of bools from the pool. USE WITH CAUTION. func BorrowBools(size int) []bool { if size >= 8 { return make([]bool, size) } select { case bools := <-boolsPool: return bools default: bools := make([]bool, 8) bools = bools[:size] return bools } // retVal := boolsPool[size].Get() // if retVal == nil { // return make([]bool, size) // } // return retVal.([]bool) } // ReturnBools returns a slice from the pool. USE WITH CAUTION. func ReturnBools(is []bool) { if is == nil { return } size := cap(is) if size >= 8 { return } is = is[:cap(is)] for i := range is { is[i] = false } if len(boolsPool) < cap(boolsPool) { boolsPool <- is } // boolsPool[size].Put(is) } // var optPool = make(chan *OpOpt, PoolSize) // var optPool = newRingbuffer(PoolSize) var optPool = &sync.Pool{ New: func() interface{} { return new(OpOpt) }, } func borrowOpOpt() *OpOpt { // select { // case fo := <-optPool: // return fo // default: // return new(OpOpt) // } return optPool.Get().(*OpOpt) // if fo, err := optPool.Get(); err == nil { // return (*OpOpt)(fo) // } // return new(OpOpt) } func returnOpOpt(oo *OpOpt) { oo.reuse = nil oo.incr = nil oo.unsafe = false oo.same = false oo.t = Dtype{} // if len(optPool) < cap(optPool) { // optPool <- oo // } optPool.Put(oo) // optPool.Put(unsafe.Pointer(oo)) } tensor-0.9.24/release.go000066400000000000000000000004761426512615100151010ustar00rootroot00000000000000// +build !debug package tensor const DEBUG = false var TABCOUNT uint32 = 0 func tabcount() int { return 0 } func enterLoggingContext() {} func leaveLoggingContext() {} func logf(format string, others ...interface{}) {} func loggc() {} tensor-0.9.24/shape.go000066400000000000000000000220001426512615100145440ustar00rootroot00000000000000package tensor import ( "fmt" "github.com/pkg/errors" ) var scalarShape = Shape{} // ScalarShape represents a scalar. It has no dimensions, no sizes func ScalarShape() Shape { return scalarShape } // Shape represents the dimensions of a Tensor. A (2,3) matrix has a shape of (2,3) - 2 rows, 3 columns. // Likewise, a shape of (2,3,4) means a Tensor has 3 dimensions: 2 layers, 3 rows, 4 columns. // // Vectors are of particular note. This package defines a shape of (x, 1) as a column vector and // a (1, x) as a row vector. Row vectors and column vectors are matrices as well. It is important to note that // row and column vectors and vanilla vectors are comparable under some circumstances type Shape []int // TotalSize returns the number of elements expected in a Tensor of a certain shape func (s Shape) TotalSize() int { return ProdInts([]int(s)) } // CalcStrides calculates the default strides for a shape func (s Shape) CalcStrides() []int { if s.IsScalar() { return nil } retVal := BorrowInts(len(s)) // if s.IsVector() { // retVal[0] = 1 // retVal = retVal[:1] // return retVal // } acc := 1 for i := len(s) - 1; i >= 0; i-- { retVal[i] = acc d := s[i] if d < 0 { panic("negative dimension size does not make sense") } acc *= d } return retVal } // CalcStridesWithMask is similar to CalcStrides, except that it has an argument, masks. It is used to mask out given dimensions // during calculation of stride func (s Shape) CalcStridesWithMask(mask []bool) []int { if s.IsScalarEquiv() { return nil } retVal := BorrowInts(len(s)) if s.IsVector() { retVal[0] = 1 retVal = retVal[:1] return retVal } if len(mask) != s.Dims() { panic("mask length must be equal to number of shape dimensions") } acc := 1 for i := len(s) - 1; i >= 0; i-- { if mask[i] { retVal[i] = acc } else { retVal[i] = 0 } d := s[i] if d < 0 { panic("negative dimension size does not make sense") } if mask[i] { acc *= d } } return retVal } // CalcStridesColMajor is like CalcStrides, but assumes a col major layout func (s Shape) CalcStridesColMajor() []int { if s.IsScalarEquiv() { return nil } retVal := BorrowInts(len(s)) if s.IsVector() { retVal[0] = 1 retVal = retVal[:1] return retVal } acc := 1 for i := 0; i < len(s); i++ { retVal[i] = acc d := s[i] if d < 0 { panic("negative dimension size does not make sense") } acc *= d } return retVal } // Eq indicates if a shape is equal with another. There is a soft concept of equality when it comes to vectors. // // If s is a column vector and other is a vanilla vector, they're considered equal if the size of the column dimension is the same as the vector size; // if s is a row vector and other is a vanilla vector, they're considered equal if the size of the row dimension is the same as the vector size func (s Shape) Eq(other Shape) bool { if s.IsScalar() && other.IsScalar() { return true } if s.IsVector() && other.IsVector() { switch { case len(s) == 2 && len(other) == 1: if (s.IsColVec() && s[0] == other[0]) || (s.IsRowVec() && s[1] == other[0]) { return true } return false case len(s) == 1 && len(other) == 2: if (other.IsColVec() && other[0] == s[0]) || (other.IsRowVec() && other[1] == s[0]) { return true } return false } } if len(s) != len(other) { return false } for i, v := range s { if other[i] != v { return false } } return true } // Clone clones a shape. func (s Shape) Clone() Shape { retVal := BorrowInts(len(s)) copy(retVal, s) return retVal } // IsScalar returns true if the access pattern indicates it's a scalar value func (s Shape) IsScalar() bool { return len(s) == 0 } // IsScalarEquiv returns true if the access pattern indicates it's a scalar-like value func (s Shape) IsScalarEquiv() bool { if len(s) == 0 { return true } isEquiv := true for i := range s { if s[i] != 1 { return false } } return isEquiv } // IsVector returns whether the access pattern falls into one of three possible definitions of vectors: // vanilla vector (not a row or a col) // column vector // row vector func (s Shape) IsVector() bool { return s.IsColVec() || s.IsRowVec() || (len(s) == 1) } // IsColVec returns true when the access pattern has the shape (x, 1) func (s Shape) IsColVec() bool { return len(s) == 2 && (s[1] == 1 && s[0] > 1) } // IsRowVec returns true when the access pattern has the shape (1, x) func (s Shape) IsRowVec() bool { return len(s) == 2 && (s[0] == 1 && s[1] > 1) } // IsVectorLike returns true when the shape looks like a vector // e.g. a number that is surrounded by 1s: // (1, 1, ... 1, 10, 1, 1... 1) func (s Shape) IsVectorLike() bool { var nonOnes int for _, i := range s { if i != 1 { nonOnes++ } } return nonOnes == 1 || nonOnes == 0 // if there is only one non-one then it's a vector or a scalarlike. } // IsMatrix returns true if it's a matrix. This is mostly a convenience method. RowVec and ColVecs are also considered matrices func (s Shape) IsMatrix() bool { return len(s) == 2 } // Dims returns the number of dimensions in the shape func (s Shape) Dims() int { return len(s) } // DimSize returns the size of the dimension wanted. // // This method implemnents the DimSizer interface in Gorgonia. func (s Shape) DimSize(d int) (size int, err error) { if (s.IsScalar() && d != 0) || (!s.IsScalar() && d >= len(s)) { err = errors.Errorf(dimMismatch, len(s), d) return } switch { case s.IsScalar(): return 0, nil default: return s[d], nil } } // S gives the new shape after a shape has been sliced. It's repeated from the AP S() method mainly because there are other functions in Gorgonia that uses only shape func (s Shape) S(slices ...Slice) (retVal Shape, err error) { opDims := len(s) if len(slices) > opDims { err = errors.Errorf(dimMismatch, opDims, len(slices)) return } retVal = s.Clone() for d, size := range s { var sl Slice // default is a nil Slice if d <= len(slices)-1 { sl = slices[d] } var start, end, step int if start, end, step, err = SliceDetails(sl, size); err != nil { return } if step > 0 { retVal[d] = (end - start) / step //fix if retVal[d] <= 0 { retVal[d] = 1 } } else { retVal[d] = (end - start) } } // drop any dimension with size 1, except the last dimension offset := 0 dims := s.Dims() for d := 0; d < dims; d++ { if retVal[d] == 1 && offset+d <= len(slices)-1 && slices[offset+d] != nil /*&& d != t.dims-1 && dims > 2*/ { retVal = append(retVal[:d], retVal[d+1:]...) d-- dims-- offset++ } } if retVal.IsScalar() { ReturnInts(retVal) return ScalarShape(), nil } return } // Repeat returns the expected new shape given the repetition parameters. func (s Shape) Repeat(axis int, repeats ...int) (newShape Shape, finalRepeats []int, size int, err error) { switch { case axis == AllAxes: size = s.TotalSize() newShape = Shape{size} axis = 0 case s.IsScalar(): size = 1 // special case for row vecs if axis == 1 { newShape = Shape{1, 0} } else { // otherwise it will be repeated into a vanilla vector newShape = Shape{0} } case s.IsVector() && !s.IsRowVec() && !s.IsColVec() && axis == 1: size = 1 newShape = s.Clone() newShape = append(newShape, 1) default: if axis >= len(s) { // error err = errors.Errorf(invalidAxis, axis, s.Dims()) return } size = s[axis] newShape = s.Clone() } // special case to allow generic repeats if len(repeats) == 1 { rep := repeats[0] repeats = make([]int, size) for i := range repeats { repeats[i] = rep } } reps := len(repeats) if reps != size { err = errors.Errorf(broadcastError, size, reps) return } newSize := SumInts(repeats) newShape[axis] = newSize finalRepeats = repeats return } // Concat returns the expected new shape given the concatenation parameters func (s Shape) Concat(axis int, ss ...Shape) (newShape Shape, err error) { dims := s.Dims() // check that all the concatenates have the same dimensions for _, shp := range ss { if shp.Dims() != dims { err = errors.Errorf(dimMismatch, dims, shp.Dims()) return } } // special case if axis == AllAxes { axis = 0 } // nope... no negative indexing here. if axis < 0 { err = errors.Errorf(invalidAxis, axis, len(s)) return } if axis >= dims { err = errors.Errorf(invalidAxis, axis, len(s)) return } newShape = Shape(BorrowInts(dims)) copy(newShape, s) for _, shp := range ss { for d := 0; d < dims; d++ { if d == axis { newShape[d] += shp[d] } else { // validate that the rest of the dimensions match up if newShape[d] != shp[d] { err = errors.Wrapf(errors.Errorf(dimMismatch, newShape[d], shp[d]), "Axis: %d, dimension it failed at: %d", axis, d) return } } } } return } // Format implements fmt.Formatter, and formats a shape nicely func (s Shape) Format(st fmt.State, r rune) { switch r { case 'v', 's': st.Write([]byte("(")) for i, v := range s { fmt.Fprintf(st, "%d", v) if i < len(s)-1 { st.Write([]byte(", ")) } } st.Write([]byte(")")) default: fmt.Fprintf(st, "%v", []int(s)) } } tensor-0.9.24/shape_test.go000066400000000000000000000175251426512615100156230ustar00rootroot00000000000000package tensor import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestShapeBasics(t *testing.T) { var s Shape var ds int var err error s = Shape{1, 2} if ds, err = s.DimSize(0); err != nil { t.Error(err) } if ds != 1 { t.Error("Expected DimSize(0) to be 1") } if ds, err = s.DimSize(2); err == nil { t.Error("Expected a DimensionMismatch error") } s = ScalarShape() if ds, err = s.DimSize(0); err != nil { t.Error(err) } if ds != 0 { t.Error("Expected DimSize(0) of a scalar to be 0") } // format for completeness sake s = Shape{2, 1} if fmt.Sprintf("%d", s) != "[2 1]" { t.Error("Shape.Format() error") } } func TestShapeIsX(t *testing.T) { assert := assert.New(t) var s Shape // scalar shape s = Shape{} assert.True(s.IsScalar()) assert.True(s.IsScalarEquiv()) assert.False(s.IsVector()) assert.False(s.IsColVec()) assert.False(s.IsRowVec()) // vectors // scalar-equiv vector s = Shape{1} assert.False(s.IsScalar()) assert.True(s.IsScalarEquiv()) assert.True(s.IsVector()) assert.True(s.IsVectorLike()) assert.True(s.IsVector()) assert.False(s.IsColVec()) assert.False(s.IsRowVec()) // vanila vector s = Shape{2} assert.False(s.IsScalar()) assert.True(s.IsVector()) assert.False(s.IsColVec()) assert.False(s.IsRowVec()) // col vec s = Shape{2, 1} assert.False(s.IsScalar()) assert.True(s.IsVector()) assert.True(s.IsVectorLike()) assert.True(s.IsColVec()) assert.False(s.IsRowVec()) // row vec s = Shape{1, 2} assert.False(s.IsScalar()) assert.True(s.IsVector()) assert.True(s.IsVectorLike()) assert.False(s.IsColVec()) assert.True(s.IsRowVec()) // matrix and up s = Shape{2, 2} assert.False(s.IsScalar()) assert.False(s.IsVector()) assert.False(s.IsColVec()) assert.False(s.IsRowVec()) // scalar equiv matrix s = Shape{1, 1} assert.False(s.IsScalar()) assert.True(s.IsScalarEquiv()) assert.True(s.IsVectorLike()) assert.False(s.IsVector()) } func TestShapeCalcStride(t *testing.T) { assert := assert.New(t) var s Shape // scalar shape s = Shape{} assert.Nil(s.CalcStrides()) // vector shape s = Shape{1} assert.Equal([]int{1}, s.CalcStrides()) s = Shape{2, 1} assert.Equal([]int{1, 1}, s.CalcStrides()) s = Shape{1, 2} assert.Equal([]int{2, 1}, s.CalcStrides()) s = Shape{2} assert.Equal([]int{1}, s.CalcStrides()) // matrix strides s = Shape{2, 2} assert.Equal([]int{2, 1}, s.CalcStrides()) s = Shape{5, 2} assert.Equal([]int{2, 1}, s.CalcStrides()) // 3D strides s = Shape{2, 3, 4} assert.Equal([]int{12, 4, 1}, s.CalcStrides()) // stupid shape s = Shape{-2, 1, 2} fail := func() { s.CalcStrides() } assert.Panics(fail) } func TestShapeEquality(t *testing.T) { assert := assert.New(t) var s1, s2 Shape // scalar s1 = Shape{} s2 = Shape{} assert.True(s1.Eq(s2)) assert.True(s2.Eq(s1)) // scalars and scalar equiv are not the same! s1 = Shape{1} s2 = Shape{} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) // vector s1 = Shape{3} s2 = Shape{5} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) s1 = Shape{2, 1} s2 = Shape{2, 1} assert.True(s1.Eq(s2)) assert.True(s2.Eq(s1)) s2 = Shape{2} assert.True(s1.Eq(s2)) assert.True(s2.Eq(s1)) s2 = Shape{1, 2} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) s1 = Shape{2} assert.True(s1.Eq(s2)) assert.True(s2.Eq(s1)) s2 = Shape{2, 3} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) // matrix s1 = Shape{2, 3} assert.True(s1.Eq(s2)) assert.True(s2.Eq(s1)) s2 = Shape{3, 2} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) // just for that green coloured code s1 = Shape{2} s2 = Shape{1, 3} assert.False(s1.Eq(s2)) assert.False(s2.Eq(s1)) } var shapeSliceTests = []struct { name string s Shape sli []Slice expected Shape err bool }{ {"slicing a scalar shape", ScalarShape(), nil, ScalarShape(), false}, {"slicing a scalar shape", ScalarShape(), []Slice{rs{0, 0, 0}}, nil, true}, {"vec[0]", Shape{2}, []Slice{rs{0, 1, 0}}, ScalarShape(), false}, {"vec[3]", Shape{2}, []Slice{rs{3, 4, 0}}, nil, true}, {"vec[:, 0]", Shape{2}, []Slice{nil, rs{0, 1, 0}}, nil, true}, {"vec[1:4:2]", Shape{5}, []Slice{rs{1, 4, 2}}, ScalarShape(), false}, {"tensor[0, :, :]", Shape{1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil}, Shape{2, 2}, false}, {"tensor[:, 0, :]", Shape{1, 2, 2}, []Slice{nil, rs{0, 1, 1}, nil}, Shape{1, 2}, false}, {"tensor[0, :, :, :]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil, nil}, Shape{1, 2, 2}, false}, {"tensor[0,]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}}, Shape{1, 2, 2}, false}, } func TestShape_Slice(t *testing.T) { for i, ssts := range shapeSliceTests { newShape, err := ssts.s.S(ssts.sli...) if checkErr(t, ssts.err, err, "Shape slice", i) { continue } if !ssts.expected.Eq(newShape) { t.Errorf("Test %q: Expected shape %v. Got %v instead", ssts.name, ssts.expected, newShape) } } } var shapeRepeatTests = []struct { name string s Shape repeats []int axis int expected Shape expectedRepeats []int expectedSize int err bool }{ {"scalar repeat on axis 0", ScalarShape(), []int{3}, 0, Shape{3}, []int{3}, 1, false}, {"scalar repeat on axis 1", ScalarShape(), []int{3}, 1, Shape{1, 3}, []int{3}, 1, false}, {"vector repeat on axis 0", Shape{2}, []int{3}, 0, Shape{6}, []int{3, 3}, 2, false}, {"vector repeat on axis 1", Shape{2}, []int{3}, 1, Shape{2, 3}, []int{3}, 1, false}, {"colvec repeats on axis 0", Shape{2, 1}, []int{3}, 0, Shape{6, 1}, []int{3, 3}, 2, false}, {"colvec repeats on axis 1", Shape{2, 1}, []int{3}, 1, Shape{2, 3}, []int{3}, 1, false}, {"rowvec repeats on axis 0", Shape{1, 2}, []int{3}, 0, Shape{3, 2}, []int{3}, 1, false}, {"rowvec repeats on axis 1", Shape{1, 2}, []int{3}, 1, Shape{1, 6}, []int{3, 3}, 2, false}, {"3-Tensor repeats", Shape{2, 3, 2}, []int{1, 2, 1}, 1, Shape{2, 4, 2}, []int{1, 2, 1}, 3, false}, {"3-Tensor generic repeats", Shape{2, 3, 2}, []int{2}, AllAxes, Shape{24}, []int{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, 12, false}, {"3-Tensor generic repeat, axis specified", Shape{2, 3, 2}, []int{2}, 2, Shape{2, 3, 4}, []int{2, 2}, 2, false}, // stupids {"nonexisting axis 2", Shape{2, 1}, []int{3}, 2, nil, nil, 0, true}, {"mismatching repeats", Shape{2, 3, 2}, []int{3, 1, 2}, 0, nil, nil, 0, true}, } func TestShape_Repeat(t *testing.T) { assert := assert.New(t) for _, srts := range shapeRepeatTests { newShape, reps, size, err := srts.s.Repeat(srts.axis, srts.repeats...) switch { case srts.err: if err == nil { t.Error("Expected an error") } continue case !srts.err && err != nil: t.Error(err) continue } assert.True(srts.expected.Eq(newShape), "Test %q: Want: %v. Got %v", srts.name, srts.expected, newShape) assert.Equal(srts.expectedRepeats, reps, "Test %q: ", srts.name) assert.Equal(srts.expectedSize, size, "Test %q: ", srts.name) } } var shapeConcatTests = []struct { name string s Shape axis int ss []Shape expected Shape err bool }{ {"standard, axis 0 ", Shape{2, 2}, 0, []Shape{{2, 2}, {2, 2}}, Shape{6, 2}, false}, {"standard, axis 1 ", Shape{2, 2}, 1, []Shape{{2, 2}, {2, 2}}, Shape{2, 6}, false}, {"standard, axis AllAxes ", Shape{2, 2}, -1, []Shape{{2, 2}, {2, 2}}, Shape{6, 2}, false}, {"concat to empty", Shape{2}, 0, nil, Shape{2}, false}, {"stupids: different dims", Shape{2, 2}, 0, []Shape{{2, 3, 2}}, nil, true}, {"stupids: negative axes", Shape{2, 2}, -5, []Shape{{2, 2}}, nil, true}, {"stupids: toobig axis", Shape{2, 2}, 5, []Shape{{2, 2}}, nil, true}, {"subtle stupids: dim mismatch", Shape{2, 2}, 0, []Shape{{2, 2}, {2, 3}}, nil, true}, } func TestShape_Concat(t *testing.T) { assert := assert.New(t) for _, scts := range shapeConcatTests { newShape, err := scts.s.Concat(scts.axis, scts.ss...) switch { case scts.err: if err == nil { t.Error("Expected an error") } continue case !scts.err && err != nil: t.Error(err) continue } assert.Equal(scts.expected, newShape) } } tensor-0.9.24/slice.go000066400000000000000000000027641426512615100145620ustar00rootroot00000000000000package tensor // A Slice represents a slicing operation for a Tensor. type Slice interface { Start() int End() int Step() int } type rs struct { start, end, step int } func (s rs) Start() int { return s.start } func (s rs) End() int { return s.end } func (s rs) Step() int { return s.step } // makeRS creates a ranged slice. It takes an optional step param. func makeRS(start, end int, opts ...int) rs { step := 1 if len(opts) > 0 { step = opts[0] } return rs{ start: start, end: end, step: step, } } // ss is a single slice, representing this: [start:start+1:0] type ss int func (s ss) Start() int { return int(s) } func (s ss) End() int { return int(s) + 1 } func (s ss) Step() int { return 0 } // sli is slice. It's named sli to prevent confusion over naming type sli struct { start, end, step int } // S creates a Slice. // end is optional. It should be passed in as the first param of the optionals. // step is optional. It should be passed in as the second param of the optionals. // // Default end is start+1. Default step is 1, unless end == step+1, then it defaults to 0 func S(start int, opt ...int) Slice { var end, step int if len(opt) > 0 { end = opt[0] } else { end = start + 1 } step = 1 if len(opt) > 1 { step = opt[1] } else if end == start+1 { step = 0 } return &sli{ start: start, end: end, step: step, } } func (s *sli) Start() int { return s.start } func (s *sli) End() int { return s.end } func (s *sli) Step() int { return s.step } tensor-0.9.24/sparse.go000066400000000000000000000213441426512615100147530ustar00rootroot00000000000000package tensor import ( "reflect" "sort" "github.com/pkg/errors" ) var ( _ Sparse = &CS{} ) // Sparse is a sparse tensor. type Sparse interface { Tensor Densor NonZeroes() int // NonZeroes returns the number of nonzero values } // coo is an internal representation of the Coordinate type sparse matrix. // It's not exported because you probably shouldn't be using it. // Instead, constructors for the *CS type supports using a coordinate as an input. type coo struct { o DataOrder xs, ys []int data array } func (c *coo) Len() int { return c.data.Len() } func (c *coo) Less(i, j int) bool { if c.o.IsColMajor() { return c.colMajorLess(i, j) } return c.rowMajorLess(i, j) } func (c *coo) Swap(i, j int) { c.xs[i], c.xs[j] = c.xs[j], c.xs[i] c.ys[i], c.ys[j] = c.ys[j], c.ys[i] c.data.swap(i, j) } func (c *coo) colMajorLess(i, j int) bool { if c.ys[i] < c.ys[j] { return true } if c.ys[i] == c.ys[j] { // check xs if c.xs[i] <= c.xs[j] { return true } } return false } func (c *coo) rowMajorLess(i, j int) bool { if c.xs[i] < c.xs[j] { return true } if c.xs[i] == c.xs[j] { // check ys if c.ys[i] <= c.ys[j] { return true } } return false } // CS is a compressed sparse data structure. It can be used to represent both CSC and CSR sparse matrices. // Refer to the individual creation functions for more information. type CS struct { s Shape o DataOrder e Engine f MemoryFlag z interface{} // z is the "zero" value. Typically it's not used. indices []int indptr []int array } // NewCSR creates a new Compressed Sparse Row matrix. The data has to be a slice or it panics. func NewCSR(indices, indptr []int, data interface{}, opts ...ConsOpt) *CS { t := new(CS) t.indices = indices t.indptr = indptr t.array = arrayFromSlice(data) t.o = NonContiguous t.e = StdEng{} for _, opt := range opts { opt(t) } return t } // NewCSC creates a new Compressed Sparse Column matrix. The data has to be a slice, or it panics. func NewCSC(indices, indptr []int, data interface{}, opts ...ConsOpt) *CS { t := new(CS) t.indices = indices t.indptr = indptr t.array = arrayFromSlice(data) t.o = MakeDataOrder(ColMajor, NonContiguous) t.e = StdEng{} for _, opt := range opts { opt(t) } return t } // CSRFromCoord creates a new Compressed Sparse Row matrix given the coordinates. The data has to be a slice or it panics. func CSRFromCoord(shape Shape, xs, ys []int, data interface{}) *CS { t := new(CS) t.s = shape t.o = NonContiguous t.array = arrayFromSlice(data) t.e = StdEng{} // coord matrix cm := &coo{t.o, xs, ys, t.array} sort.Sort(cm) r := shape[0] c := shape[1] if r <= cm.xs[len(cm.xs)-1] || c <= MaxInts(cm.ys...) { panic("Cannot create sparse matrix where provided shape is smaller than the implied shape of the data") } indptr := make([]int, r+1) var i, j, tmp int for i = 1; i < r+1; i++ { for j = tmp; j < len(xs) && xs[j] < i; j++ { } tmp = j indptr[i] = j } t.indices = ys t.indptr = indptr return t } // CSRFromCoord creates a new Compressed Sparse Column matrix given the coordinates. The data has to be a slice or it panics. func CSCFromCoord(shape Shape, xs, ys []int, data interface{}) *CS { t := new(CS) t.s = shape t.o = MakeDataOrder(NonContiguous, ColMajor) t.array = arrayFromSlice(data) t.e = StdEng{} // coord matrix cm := &coo{t.o, xs, ys, t.array} sort.Sort(cm) r := shape[0] c := shape[1] // check shape if r <= MaxInts(cm.xs...) || c <= cm.ys[len(cm.ys)-1] { panic("Cannot create sparse matrix where provided shape is smaller than the implied shape of the data") } indptr := make([]int, c+1) var i, j, tmp int for i = 1; i < c+1; i++ { for j = tmp; j < len(ys) && ys[j] < i; j++ { } tmp = j indptr[i] = j } t.indices = xs t.indptr = indptr return t } func (t *CS) Shape() Shape { return t.s } func (t *CS) Strides() []int { return nil } func (t *CS) Dtype() Dtype { return t.t } func (t *CS) Dims() int { return 2 } func (t *CS) Size() int { return t.s.TotalSize() } func (t *CS) DataSize() int { return t.Len() } func (t *CS) Engine() Engine { return t.e } func (t *CS) DataOrder() DataOrder { return t.o } func (t *CS) Slice(...Slice) (View, error) { return nil, errors.Errorf("Slice for sparse tensors not implemented yet") } func (t *CS) At(coord ...int) (interface{}, error) { if len(coord) != t.Dims() { return nil, errors.Errorf("Expected coordinates to be of %d-dimensions. Got %v instead", t.Dims(), coord) } if i, ok := t.at(coord...); ok { return t.Get(i), nil } if t.z == nil { return reflect.Zero(t.t.Type).Interface(), nil } return t.z, nil } func (t *CS) SetAt(v interface{}, coord ...int) error { if i, ok := t.at(coord...); ok { t.Set(i, v) return nil } return errors.Errorf("Cannot set value in a compressed sparse matrix: Coordinate %v not found", coord) } func (t *CS) Reshape(...int) error { return errors.New("compressed sparse matrix cannot be reshaped") } // T transposes the matrix. Concretely, it just changes a bit - the state goes from CSC to CSR, and vice versa. func (t *CS) T(axes ...int) error { dims := t.Dims() if len(axes) != dims && len(axes) != 0 { return errors.Errorf("Cannot transpose along axes %v", axes) } if len(axes) == 0 || axes == nil { axes = make([]int, dims) for i := 0; i < dims; i++ { axes[i] = dims - 1 - i } } UnsafePermute(axes, []int(t.s)) t.o = t.o.toggleColMajor() t.o = MakeDataOrder(t.o, Transposed) return errors.Errorf(methodNYI, "T", t) } // UT untransposes the CS func (t *CS) UT() { t.T(); t.o = t.o.clearTransposed() } // Transpose is a no-op. The data does not move func (t *CS) Transpose() error { return nil } func (t *CS) Apply(fn interface{}, opts ...FuncOpt) (Tensor, error) { return nil, errors.Errorf(methodNYI, "Apply", t) } func (t *CS) Eq(other interface{}) bool { if ot, ok := other.(*CS); ok { if t == ot { return true } if len(ot.indices) != len(t.indices) { return false } if len(ot.indptr) != len(t.indptr) { return false } if !t.s.Eq(ot.s) { return false } if ot.o != t.o { return false } for i, ind := range t.indices { if ot.indices[i] != ind { return false } } for i, ind := range t.indptr { if ot.indptr[i] != ind { return false } } return t.array.Eq(&ot.array) } return false } func (t *CS) Clone() interface{} { retVal := new(CS) retVal.s = t.s.Clone() retVal.o = t.o retVal.e = t.e retVal.indices = make([]int, len(t.indices)) retVal.indptr = make([]int, len(t.indptr)) copy(retVal.indices, t.indices) copy(retVal.indptr, t.indptr) retVal.array = makeArray(t.t, t.array.Len()) copyArray(&retVal.array, &t.array) retVal.e = t.e return retVal } func (t *CS) IsScalar() bool { return false } func (t *CS) ScalarValue() interface{} { panic("Sparse Matrices cannot represent Scalar Values") } func (t *CS) MemSize() uintptr { return uintptr(calcMemSize(t.t, t.array.Len())) } func (t *CS) Uintptr() uintptr { return t.array.Uintptr() } // NonZeroes returns the nonzeroes. In academic literature this is often written as NNZ. func (t *CS) NonZeroes() int { return t.Len() } func (t *CS) RequiresIterator() bool { return true } func (t *CS) Iterator() Iterator { return NewFlatSparseIterator(t) } func (t *CS) at(coord ...int) (int, bool) { var r, c int if t.o.IsColMajor() { r = coord[1] c = coord[0] } else { r = coord[0] c = coord[1] } for i := t.indptr[r]; i < t.indptr[r+1]; i++ { if t.indices[i] == c { return i, true } } return -1, false } // Dense creates a Dense tensor from the compressed one. func (t *CS) Dense() *Dense { if t.e != nil && t.e != (StdEng{}) { // use } d := recycledDense(t.t, t.Shape().Clone(), WithEngine(t.e)) if t.o.IsColMajor() { for i := 0; i < len(t.indptr)-1; i++ { for j := t.indptr[i]; j < t.indptr[i+1]; j++ { d.SetAt(t.Get(j), t.indices[j], i) } } } else { for i := 0; i < len(t.indptr)-1; i++ { for j := t.indptr[i]; j < t.indptr[i+1]; j++ { d.SetAt(t.Get(j), i, t.indices[j]) } } } return d } // Other Accessors func (t *CS) Indptr() []int { retVal := BorrowInts(len(t.indptr)) copy(retVal, t.indptr) return retVal } func (t *CS) Indices() []int { retVal := BorrowInts(len(t.indices)) copy(retVal, t.indices) return retVal } func (t *CS) AsCSR() { if t.o.IsRowMajor() { return } t.o.toggleColMajor() } func (t *CS) AsCSC() { if t.o.IsColMajor() { return } t.o.toggleColMajor() } func (t *CS) IsNativelyAccessible() bool { return t.f.nativelyAccessible() } func (t *CS) IsManuallyManaged() bool { return t.f.manuallyManaged() } func (t *CS) arr() array { return t.array } func (t *CS) arrPtr() *array { return &t.array } func (t *CS) standardEngine() standardEngine { return nil } tensor-0.9.24/sparse_io.go000066400000000000000000000026121426512615100154370ustar00rootroot00000000000000package tensor import ( "bytes" "encoding/gob" "fmt" "io" "github.com/pkg/errors" ) func (t *CS) GobEncode() (p []byte, err error) { var buf bytes.Buffer encoder := gob.NewEncoder(&buf) if err = encoder.Encode(t.s); err != nil { return } if err = encoder.Encode(t.o); err != nil { return } if err = encoder.Encode(t.indices); err != nil { return } if err = encoder.Encode(t.indptr); err != nil { return } data := t.Data() if err = encoder.Encode(&data); err != nil { return } return buf.Bytes(), nil } func (t *CS) GobDecode(p []byte) (err error) { buf := bytes.NewBuffer(p) decoder := gob.NewDecoder(buf) var shape Shape if err = decoder.Decode(&shape); err != nil { return } t.s = shape var o DataOrder if err = decoder.Decode(&o); err != nil { return } var indices []int if err = decoder.Decode(&indices); err != nil { return } t.indices = indices var indptr []int if err = decoder.Decode(&indptr); err != nil { return } t.indptr = indptr var data interface{} if err = decoder.Decode(&data); err != nil { return } t.array = arrayFromSlice(data) return nil } func (t *CS) WriteNpy(w io.Writer) error { return errors.Errorf("Cannot write to Npy") } func (t *CS) ReadNpy(r io.Reader) error { return errors.Errorf("Cannot read from npy") } func (t *CS) Format(s fmt.State, c rune) {} func (t *CS) String() string { return "CS" } tensor-0.9.24/sparse_test.go000066400000000000000000000043661426512615100160170ustar00rootroot00000000000000package tensor import ( "testing" "github.com/stretchr/testify/assert" ) func TestCS_Basics(t *testing.T) { assert := assert.New(t) xs0 := []int{1, 2, 6, 8} ys0 := []int{1, 2, 1, 6} xs1 := []int{1, 2, 6, 8} ys1 := []int{1, 2, 1, 6} vals0 := []float64{3, 1, 4, 1} vals1 := []float64{3, 1, 4, 1} var T0, T1 *CS var d0, d1 *Dense var dp0, dp1 *Dense var err error fails := func() { CSCFromCoord(Shape{7, 6}, xs0, ys0, vals0) } assert.Panics(fails) // Test CSC T0 = CSCFromCoord(Shape{9, 7}, xs0, ys0, vals0) d0 = T0.Dense() T0.T() dp0 = T0.Dense() T0.UT() // untranspose as Materialize() will be called below // Test CSR fails = func() { CSRFromCoord(Shape{7, 6}, xs1, ys1, vals1) } T1 = CSRFromCoord(Shape{9, 7}, xs1, ys1, vals1) d1 = T1.Dense() T1.T() dp1 = T1.Dense() T1.UT() t.Logf("%v %v", T0.indptr, T0.indices) t.Logf("%v %v", T1.indptr, T1.indices) assert.True(d0.Eq(d1), "%+#v\n %+#v\n", d0, d1) assert.True(dp0.Eq(dp1)) assert.True(T1.Eq(T1)) assert.False(T0.Eq(T1)) // At var got interface{} correct := float64(3.0) if got, err = T0.At(1, 1); err != nil { t.Error(err) } if got.(float64) != correct { t.Errorf("Expected %v. Got %v - T0[1,1]", correct, got) } if got, err = T1.At(1, 1); err != nil { t.Error(err) } if got.(float64) != correct { t.Errorf("Expected %v. Got %v - T1[1,1]", correct, got) } correct = 0.0 if got, err = T0.At(3, 3); err != nil { t.Error(err) } if got.(float64) != correct { t.Errorf("Expected %v. Got %v - T0[3,3]", correct, got) } if got, err = T1.At(3, 3); err != nil { t.Error(err) } if got.(float64) != correct { t.Errorf("Expected %v. Got %v - T1[3,3]", correct, got) } // Test clone T2 := T0.Clone() assert.True(T0.Eq(T2)) // Scalar representation assert.False(T0.IsScalar()) fails = func() { T0.ScalarValue() } assert.Panics(fails) assert.Equal(len(vals0), T0.NonZeroes()) // Sparse Iterator it := T0.Iterator() var valids []int correctValids := []int{0, 2, 1, 3} for i, valid, err := it.NextValidity(); err == nil; i, valid, err = it.NextValidity() { if valid { valids = append(valids, i) } } assert.Equal(correctValids, valids) } tensor-0.9.24/tensor.go000066400000000000000000000076221426512615100147730ustar00rootroot00000000000000// Package tensor is a package that provides efficient, generic n-dimensional arrays in Go. // Also in this package are functions and methods that are used commonly in arithmetic, comparison and linear algebra operations. package tensor // import "gorgonia.org/tensor" import ( "encoding/gob" "fmt" "io" "github.com/pkg/errors" ) var ( _ Tensor = &Dense{} _ Tensor = &CS{} _ View = &Dense{} ) func init() { gob.Register(&Dense{}) gob.Register(&CS{}) } // Tensor represents a variety of n-dimensional arrays. The most commonly used tensor is the Dense tensor. // It can be used to represent a vector, matrix, 3D matrix and n-dimensional tensors. type Tensor interface { // info about the ndarray Shape() Shape Strides() []int Dtype() Dtype Dims() int Size() int DataSize() int // Data access related RequiresIterator() bool Iterator() Iterator DataOrder() DataOrder // ops Slicer At(...int) (interface{}, error) SetAt(v interface{}, coord ...int) error Reshape(...int) error T(axes ...int) error UT() Transpose() error // Transpose actually moves the data Apply(fn interface{}, opts ...FuncOpt) (Tensor, error) // data related interface Zeroer MemSetter Dataer Eq Cloner // type overloading methods IsScalar() bool ScalarValue() interface{} // engine/memory related stuff // all Tensors should be able to be expressed of as a slab of memory // Note: the size of each element can be acquired by T.Dtype().Size() Memory // Tensors all implement Memory Engine() Engine // Engine can be nil IsNativelyAccessible() bool // Can Go access the memory IsManuallyManaged() bool // Must Go manage the memory // formatters fmt.Formatter fmt.Stringer // all Tensors are serializable to these formats WriteNpy(io.Writer) error ReadNpy(io.Reader) error gob.GobEncoder gob.GobDecoder standardEngine() standardEngine headerer arrayer } // New creates a new Dense Tensor. For sparse arrays use their relevant construction function func New(opts ...ConsOpt) *Dense { d := borrowDense() for _, opt := range opts { opt(d) } d.fix() if err := d.sanity(); err != nil { panic(err) } return d } func assertDense(t Tensor) (*Dense, error) { if t == nil { return nil, errors.New("nil is not a *Dense") } if retVal, ok := t.(*Dense); ok { return retVal, nil } if retVal, ok := t.(Densor); ok { return retVal.Dense(), nil } return nil, errors.Errorf("%T is not *Dense", t) } func getDenseTensor(t Tensor) (DenseTensor, error) { switch tt := t.(type) { case DenseTensor: return tt, nil case Densor: return tt.Dense(), nil default: return nil, errors.Errorf("Tensor %T is not a DenseTensor", t) } } // getFloatDense extracts a *Dense from a Tensor and ensures that the .data is a Array that implements Float func getFloatDenseTensor(t Tensor) (retVal DenseTensor, err error) { if t == nil { return } if err = typeclassCheck(t.Dtype(), floatTypes); err != nil { err = errors.Wrapf(err, "getFloatDense only handles floats. Got %v instead", t.Dtype()) return } if retVal, err = getDenseTensor(t); err != nil { err = errors.Wrapf(err, opFail, "getFloatDense") return } if retVal == nil { return } return } // getFloatDense extracts a *Dense from a Tensor and ensures that the .data is a Array that implements Float func getFloatComplexDenseTensor(t Tensor) (retVal DenseTensor, err error) { if t == nil { return } if err = typeclassCheck(t.Dtype(), floatcmplxTypes); err != nil { err = errors.Wrapf(err, "getFloatDense only handles floats and complex. Got %v instead", t.Dtype()) return } if retVal, err = getDenseTensor(t); err != nil { err = errors.Wrapf(err, opFail, "getFloatDense") return } if retVal == nil { return } return } func sliceDense(t *Dense, slices ...Slice) (retVal *Dense, err error) { var sliced Tensor if sliced, err = t.Slice(slices...); err != nil { return nil, err } return sliced.(*Dense), nil } tensor-0.9.24/test_test.go000066400000000000000000000235671426512615100155050ustar00rootroot00000000000000// Code generated by genlib2. DO NOT EDIT. package tensor import ( "fmt" "math" "math/cmplx" "unsafe" "github.com/chewxy/math32" ) func anyToFloat64s(x interface{}) (retVal []float64) { switch xt := x.(type) { case []int: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []int8: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []int16: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []int32: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []int64: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []uint: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []uint8: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []uint16: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []uint32: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []uint64: retVal = make([]float64, len(xt)) for i, v := range xt { retVal[i] = float64(v) } return case []float32: retVal = make([]float64, len(xt)) for i, v := range xt { switch { case math32.IsNaN(v): retVal[i] = math.NaN() case math32.IsInf(v, 1): retVal[i] = math.Inf(1) case math32.IsInf(v, -1): retVal[i] = math.Inf(-1) default: retVal[i] = float64(v) } } return case []float64: return xt case []complex64: retVal = make([]float64, len(xt)) for i, v := range xt { switch { case cmplx.IsNaN(complex128(v)): retVal[i] = math.NaN() case cmplx.IsInf(complex128(v)): retVal[i] = math.Inf(1) default: retVal[i] = float64(real(v)) } } return case []complex128: retVal = make([]float64, len(xt)) for i, v := range xt { switch { case cmplx.IsNaN(v): retVal[i] = math.NaN() case cmplx.IsInf(v): retVal[i] = math.Inf(1) default: retVal[i] = real(v) } } return } panic("Unreachable") } func identityVal(x int, dt Dtype) interface{} { switch dt { case Int: return int(x) case Int8: return int8(x) case Int16: return int16(x) case Int32: return int32(x) case Int64: return int64(x) case Uint: return uint(x) case Uint8: return uint8(x) case Uint16: return uint16(x) case Uint32: return uint32(x) case Uint64: return uint64(x) case Float32: return float32(x) case Float64: return float64(x) case Complex64: var c complex64 if x == 0 { return c } c = 1 return c case Complex128: var c complex128 if x == 0 { return c } c = 1 return c case Bool: if x == 0 { return false } return true case String: if x == 0 { return "" } return fmt.Sprintf("%v", x) default: return x } } func threewayEq(a, b, c interface{}) bool { switch at := a.(type) { case []int: bt := b.([]int) ct := c.([]int) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []int8: bt := b.([]int8) ct := c.([]int8) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []int16: bt := b.([]int16) ct := c.([]int16) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []int32: bt := b.([]int32) ct := c.([]int32) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []int64: bt := b.([]int64) ct := c.([]int64) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []uint: bt := b.([]uint) ct := c.([]uint) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []uint8: bt := b.([]uint8) ct := c.([]uint8) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []uint16: bt := b.([]uint16) ct := c.([]uint16) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []uint32: bt := b.([]uint32) ct := c.([]uint32) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []uint64: bt := b.([]uint64) ct := c.([]uint64) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []float32: bt := b.([]float32) ct := c.([]float32) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []float64: bt := b.([]float64) ct := c.([]float64) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []complex64: bt := b.([]complex64) ct := c.([]complex64) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case []complex128: bt := b.([]complex128) ct := c.([]complex128) for i, va := range at { if va == 1 && bt[i] == 1 { if ct[i] != 1 { return false } } } return true case int: bt := b.(int) ct := c.(int) if (at == 1 && bt == 1) && ct != 1 { return false } return true case int8: bt := b.(int8) ct := c.(int8) if (at == 1 && bt == 1) && ct != 1 { return false } return true case int16: bt := b.(int16) ct := c.(int16) if (at == 1 && bt == 1) && ct != 1 { return false } return true case int32: bt := b.(int32) ct := c.(int32) if (at == 1 && bt == 1) && ct != 1 { return false } return true case int64: bt := b.(int64) ct := c.(int64) if (at == 1 && bt == 1) && ct != 1 { return false } return true case uint: bt := b.(uint) ct := c.(uint) if (at == 1 && bt == 1) && ct != 1 { return false } return true case uint8: bt := b.(uint8) ct := c.(uint8) if (at == 1 && bt == 1) && ct != 1 { return false } return true case uint16: bt := b.(uint16) ct := c.(uint16) if (at == 1 && bt == 1) && ct != 1 { return false } return true case uint32: bt := b.(uint32) ct := c.(uint32) if (at == 1 && bt == 1) && ct != 1 { return false } return true case uint64: bt := b.(uint64) ct := c.(uint64) if (at == 1 && bt == 1) && ct != 1 { return false } return true case float32: bt := b.(float32) ct := c.(float32) if (at == 1 && bt == 1) && ct != 1 { return false } return true case float64: bt := b.(float64) ct := c.(float64) if (at == 1 && bt == 1) && ct != 1 { return false } return true case complex64: bt := b.(complex64) ct := c.(complex64) if (at == 1 && bt == 1) && ct != 1 { return false } return true case complex128: bt := b.(complex128) ct := c.(complex128) if (at == 1 && bt == 1) && ct != 1 { return false } return true } return false } func identityB(a bool) bool { return a } func identityI(a int) int { return a } func identityI8(a int8) int8 { return a } func identityI16(a int16) int16 { return a } func identityI32(a int32) int32 { return a } func identityI64(a int64) int64 { return a } func identityU(a uint) uint { return a } func identityU8(a uint8) uint8 { return a } func identityU16(a uint16) uint16 { return a } func identityU32(a uint32) uint32 { return a } func identityU64(a uint64) uint64 { return a } func identityUintptr(a uintptr) uintptr { return a } func identityF32(a float32) float32 { return a } func identityF64(a float64) float64 { return a } func identityC64(a complex64) complex64 { return a } func identityC128(a complex128) complex128 { return a } func identityStr(a string) string { return a } func identityUnsafePointer(a unsafe.Pointer) unsafe.Pointer { return a } func mutateB(a bool) bool { return true } func mutateI(a int) int { return 1 } func mutateI8(a int8) int8 { return 1 } func mutateI16(a int16) int16 { return 1 } func mutateI32(a int32) int32 { return 1 } func mutateI64(a int64) int64 { return 1 } func mutateU(a uint) uint { return 1 } func mutateU8(a uint8) uint8 { return 1 } func mutateU16(a uint16) uint16 { return 1 } func mutateU32(a uint32) uint32 { return 1 } func mutateU64(a uint64) uint64 { return 1 } func mutateUintptr(a uintptr) uintptr { return 0xdeadbeef } func mutateF32(a float32) float32 { return 1 } func mutateF64(a float64) float64 { return 1 } func mutateC64(a complex64) complex64 { return 1 } func mutateC128(a complex128) complex128 { return 1 } func mutateStr(a string) string { return "Hello World" } func mutateUnsafePointer(a unsafe.Pointer) unsafe.Pointer { return unsafe.Pointer(uintptr(0xdeadbeef)) } tensor-0.9.24/testutils_test.go000066400000000000000000000323651426512615100165620ustar00rootroot00000000000000package tensor import ( "bytes" "errors" "math" "math/cmplx" "math/rand" "reflect" "testing" "testing/quick" "time" "unsafe" "github.com/chewxy/math32" "gorgonia.org/tensor/internal/storage" ) func randomBool() bool { i := rand.Intn(11) return i > 5 } // from : https://stackoverflow.com/a/31832326/3426066 const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" const ( letterIdxBits = 6 // 6 bits to represent a letter index letterIdxMask = 1<= 0; { if remain == 0 { cache, remain = src.Int63(), letterIdxMax } if idx := int(cache & letterIdxMask); idx < len(letterBytes) { b[i] = letterBytes[idx] i-- } cache >>= letterIdxBits remain-- } return string(b) } // taken from the Go Stdlib package math func tolerancef64(a, b, e float64) bool { d := a - b if d < 0 { d = -d } // note: b is correct (expected) value, a is actual value. // make error tolerance a fraction of b, not a. if b != 0 { e = e * b if e < 0 { e = -e } } return d < e } func closeenoughf64(a, b float64) bool { return tolerancef64(a, b, 1e-8) } func closef64(a, b float64) bool { return tolerancef64(a, b, 1e-14) } func veryclosef64(a, b float64) bool { return tolerancef64(a, b, 4e-16) } func soclosef64(a, b, e float64) bool { return tolerancef64(a, b, e) } func alikef64(a, b float64) bool { switch { case math.IsNaN(a) && math.IsNaN(b): return true case a == b: return math.Signbit(a) == math.Signbit(b) } return false } // taken from math32, which was taken from the Go std lib func tolerancef32(a, b, e float32) bool { d := a - b if d < 0 { d = -d } // note: b is correct (expected) value, a is actual value. // make error tolerance a fraction of b, not a. if b != 0 { e = e * b if e < 0 { e = -e } } return d < e } func closef32(a, b float32) bool { return tolerancef32(a, b, 1e-5) } // the number gotten from the cfloat standard. Haskell's Linear package uses 1e-6 for floats func veryclosef32(a, b float32) bool { return tolerancef32(a, b, 1e-6) } // from wiki func soclosef32(a, b, e float32) bool { return tolerancef32(a, b, e) } func alikef32(a, b float32) bool { switch { case math32.IsNaN(a) && math32.IsNaN(b): return true case a == b: return math32.Signbit(a) == math32.Signbit(b) } return false } // taken from math/cmplx test func cTolerance(a, b complex128, e float64) bool { d := cmplx.Abs(a - b) if b != 0 { e = e * cmplx.Abs(b) if e < 0 { e = -e } } return d < e } func cClose(a, b complex128) bool { return cTolerance(a, b, 1e-14) } func cSoclose(a, b complex128, e float64) bool { return cTolerance(a, b, e) } func cVeryclose(a, b complex128) bool { return cTolerance(a, b, 4e-16) } func cAlike(a, b complex128) bool { switch { case cmplx.IsNaN(a) && cmplx.IsNaN(b): return true case a == b: return math.Signbit(real(a)) == math.Signbit(real(b)) && math.Signbit(imag(a)) == math.Signbit(imag(b)) } return false } func allClose(a, b interface{}, approxFn ...interface{}) bool { switch at := a.(type) { case []float64: closeness := closef64 var ok bool if len(approxFn) > 0 { if closeness, ok = approxFn[0].(func(a, b float64) bool); !ok { closeness = closef64 } } bt := b.([]float64) for i, v := range at { if math.IsNaN(v) { if !math.IsNaN(bt[i]) { return false } continue } if math.IsInf(v, 0) { if !math.IsInf(bt[i], 0) { return false } continue } if !closeness(v, bt[i]) { return false } } return true case []float32: closeness := closef32 var ok bool if len(approxFn) > 0 { if closeness, ok = approxFn[0].(func(a, b float32) bool); !ok { closeness = closef32 } } bt := b.([]float32) for i, v := range at { if math32.IsNaN(v) { if !math32.IsNaN(bt[i]) { return false } continue } if math32.IsInf(v, 0) { if !math32.IsInf(bt[i], 0) { return false } continue } if !closeness(v, bt[i]) { return false } } return true case []complex64: bt := b.([]complex64) for i, v := range at { if cmplx.IsNaN(complex128(v)) { if !cmplx.IsNaN(complex128(bt[i])) { return false } continue } if cmplx.IsInf(complex128(v)) { if !cmplx.IsInf(complex128(bt[i])) { return false } continue } if !cSoclose(complex128(v), complex128(bt[i]), 1e-5) { return false } } return true case []complex128: bt := b.([]complex128) for i, v := range at { if cmplx.IsNaN(v) { if !cmplx.IsNaN(bt[i]) { return false } continue } if cmplx.IsInf(v) { if !cmplx.IsInf(bt[i]) { return false } continue } if !cClose(v, bt[i]) { return false } } return true default: return reflect.DeepEqual(a, b) } } func checkErr(t *testing.T, expected bool, err error, name string, id interface{}) (cont bool) { switch { case expected: if err == nil { t.Errorf("Expected error in test %v (%v)", name, id) } return true case !expected && err != nil: t.Errorf("Test %v (%v) errored: %+v", name, id, err) return true } return false } func sliceApproxf64(a, b []float64, fn func(a, b float64) bool) bool { if len(a) != len(b) { return false } for i, v := range a { if math.IsNaN(v) { if !alikef64(v, b[i]) { return false } } if !fn(v, b[i]) { return false } } return true } func RandomFloat64(size int) []float64 { r := make([]float64, size) for i := range r { r[i] = rand.NormFloat64() } return r } func factorize(a int) []int { if a <= 0 { return nil } // all numbers are divisible by at least 1 retVal := make([]int, 1) retVal[0] = 1 fill := func(a int, e int) { n := len(retVal) for i, p := 0, a; i < e; i, p = i+1, p*a { for j := 0; j < n; j++ { retVal = append(retVal, retVal[j]*p) } } } // find factors of 2 // rightshift by 1 = division by 2 var e int for ; a&1 == 0; e++ { a >>= 1 } fill(2, e) // find factors of 3 and up for next := 3; a > 1; next += 2 { if next*next > a { next = a } for e = 0; a%next == 0; e++ { a /= next } if e > 0 { fill(next, e) } } return retVal } func shuffleInts(a []int, r *rand.Rand) { for i := range a { j := r.Intn(i + 1) a[i], a[j] = a[j], a[i] } } type TensorGenerator struct { ShapeConstraint Shape DtypeConstraint Dtype } func (g TensorGenerator) Generate(r *rand.Rand, size int) reflect.Value { var retVal Tensor // generate type of tensor return reflect.ValueOf(retVal) } func (t *Dense) Generate(r *rand.Rand, size int) reflect.Value { // generate type ri := r.Intn(len(specializedTypes.set)) of := specializedTypes.set[ri] datatyp := reflect.SliceOf(of.Type) gendat, _ := quick.Value(datatyp, r) // generate dims var scalar bool var s Shape dims := r.Intn(5) // dims4 is the max we'll generate even though we can handle much more l := gendat.Len() // generate shape based on inputs switch { case dims == 0 || l == 0: scalar = true gendat, _ = quick.Value(of.Type, r) case dims == 1: s = Shape{gendat.Len()} default: factors := factorize(l) s = Shape(BorrowInts(dims)) // fill with 1s so that we can get a non-zero TotalSize for i := 0; i < len(s); i++ { s[i] = 1 } for i := 0; i < dims; i++ { j := rand.Intn(len(factors)) s[i] = factors[j] size := s.TotalSize() if q, r := divmod(l, size); r != 0 { factors = factorize(r) } else if size != l { if i < dims-2 { factors = factorize(q) } else if i == dims-2 { s[i+1] = q break } } else { break } } shuffleInts(s, r) } // generate flags flag := MemoryFlag(r.Intn(4)) // generate order order := DataOrder(r.Intn(4)) var v *Dense if scalar { v = New(FromScalar(gendat.Interface())) } else { v = New(Of(of), WithShape(s...), WithBacking(gendat.Interface())) } v.flag = flag v.AP.o = order // generate engine oeint := r.Intn(2) eint := r.Intn(4) switch eint { case 0: v.e = StdEng{} if oeint == 0 { v.oe = StdEng{} } else { v.oe = nil } case 1: // check is to prevent panics which Float64Engine will do if asked to allocate memory for non float64s if of == Float64 { v.e = Float64Engine{} if oeint == 0 { v.oe = Float64Engine{} } else { v.oe = nil } } else { v.e = StdEng{} if oeint == 0 { v.oe = StdEng{} } else { v.oe = nil } } case 2: // check is to prevent panics which Float64Engine will do if asked to allocate memory for non float64s if of == Float32 { v.e = Float32Engine{} if oeint == 0 { v.oe = Float32Engine{} } else { v.oe = nil } } else { v.e = StdEng{} if oeint == 0 { v.oe = StdEng{} } else { v.oe = nil } } case 3: v.e = dummyEngine(true) v.oe = nil } return reflect.ValueOf(v) } // fakemem is a byteslice, while making it a Memory type fakemem []byte func (m fakemem) Uintptr() uintptr { return uintptr(unsafe.Pointer(&m[0])) } func (m fakemem) MemSize() uintptr { return uintptr(len(m)) } func (m fakemem) Pointer() unsafe.Pointer { return unsafe.Pointer(&m[0]) } // dummyEngine implements Engine. The bool indicates whether the data is native-accessible type dummyEngine bool func (e dummyEngine) AllocAccessible() bool { return bool(e) } func (e dummyEngine) Alloc(size int64) (Memory, error) { ps := make(fakemem, int(size)) return ps, nil } func (e dummyEngine) Free(mem Memory, size int64) error { return nil } func (e dummyEngine) Memset(mem Memory, val interface{}) error { return nil } func (e dummyEngine) Memclr(mem Memory) {} func (e dummyEngine) Memcpy(dst, src Memory) error { if e { var a, b storage.Header a.Raw = storage.FromMemory(src.Uintptr(), src.MemSize()) b.Raw = storage.FromMemory(dst.Uintptr(), dst.MemSize()) copy(b.Raw, a.Raw) return nil } return errors.New("Unable to copy ") } func (e dummyEngine) Accessible(mem Memory) (Memory, error) { return mem, nil } func (e dummyEngine) WorksWith(order DataOrder) bool { return true } // dummyEngine2 is used for testing additional methods that may not be provided in the stdeng type dummyEngine2 struct { e StdEng } func (e dummyEngine2) AllocAccessible() bool { return e.e.AllocAccessible() } func (e dummyEngine2) Alloc(size int64) (Memory, error) { return e.e.Alloc(size) } func (e dummyEngine2) Free(mem Memory, size int64) error { return e.e.Free(mem, size) } func (e dummyEngine2) Memset(mem Memory, val interface{}) error { return e.e.Memset(mem, val) } func (e dummyEngine2) Memclr(mem Memory) { e.e.Memclr(mem) } func (e dummyEngine2) Memcpy(dst, src Memory) error { return e.e.Memcpy(dst, src) } func (e dummyEngine2) Accessible(mem Memory) (Memory, error) { return e.e.Accessible(mem) } func (e dummyEngine2) WorksWith(order DataOrder) bool { return e.e.WorksWith(order) } func (e dummyEngine2) Argmax(t Tensor, axis int) (Tensor, error) { return e.e.Argmax(t, axis) } func (e dummyEngine2) Argmin(t Tensor, axis int) (Tensor, error) { return e.e.Argmin(t, axis) } func willerr(a *Dense, tc, eqtc *typeclass) (retVal, willFailEq bool) { if err := typeclassCheck(a.Dtype(), eqtc); err == nil { willFailEq = true } if err := typeclassCheck(a.Dtype(), tc); err != nil { return true, willFailEq } retVal = retVal || !a.IsNativelyAccessible() return } func qcErrCheck(t *testing.T, name string, a Dtyper, b interface{}, we bool, err error) (e error, retEarly bool) { switch { case !we && err != nil: t.Errorf("Tests for %v (%v) was unable to proceed: %v", name, a.Dtype(), err) return err, true case we && err == nil: if b == nil { t.Errorf("Expected error when performing %v on %T of %v ", name, a, a.Dtype()) return errors.New("Error"), true } if bd, ok := b.(Dtyper); ok { t.Errorf("Expected error when performing %v on %T of %v and %T of %v", name, a, a.Dtype(), b, bd.Dtype()) } else { t.Errorf("Expected error when performing %v on %T of %v and %v of %T", name, a, a.Dtype(), b, b) } return errors.New("Error"), true case we && err != nil: return nil, true } return nil, false } func qcIsFloat(dt Dtype) bool { if err := typeclassCheck(dt, floatcmplxTypes); err == nil { return true } return false } func qcEqCheck(t *testing.T, dt Dtype, willFailEq bool, correct, got interface{}) bool { isFloatTypes := qcIsFloat(dt) if !willFailEq && (isFloatTypes && !allClose(correct, got) || (!isFloatTypes && !reflect.DeepEqual(correct, got))) { t.Errorf("q.Dtype: %v", dt) t.Errorf("correct\n%v", correct) t.Errorf("got\n%v", got) return false } return true } // DummyState is a dummy fmt.State, used to debug things type DummyState struct { *bytes.Buffer } func (d *DummyState) Width() (int, bool) { return 0, false } func (d *DummyState) Precision() (int, bool) { return 0, false } func (d *DummyState) Flag(c int) bool { return false } tensor-0.9.24/type_test.go000066400000000000000000000032431426512615100154740ustar00rootroot00000000000000package tensor import ( "reflect" "testing" ) type Float16 uint16 func TestRegisterType(t *testing.T) { dt := Dtype{reflect.TypeOf(Float16(0))} RegisterFloat(dt) if err := typeclassCheck(dt, floatTypes); err != nil { t.Errorf("Expected %v to be in floatTypes: %v", dt, err) } if err := typeclassCheck(dt, numberTypes); err != nil { t.Errorf("Expected %v to be in numberTypes: %v", dt, err) } if err := typeclassCheck(dt, ordTypes); err != nil { t.Errorf("Expected %v to be in ordTypes: %v", dt, err) } if err := typeclassCheck(dt, eqTypes); err != nil { t.Errorf("Expected %v to be in eqTypes: %v", dt, err) } } func TestDtypeConversions(t *testing.T) { for k, v := range reverseNumpyDtypes { if npdt, err := v.numpyDtype(); npdt != k { t.Errorf("Expected %v to return numpy dtype of %q. Got %q instead", v, k, npdt) } else if err != nil { t.Errorf("Error: %v", err) } } dt := Dtype{reflect.TypeOf(Float16(0))} if _, err := dt.numpyDtype(); err == nil { t.Errorf("Expected an error when passing in type unknown to np") } for k, v := range numpyDtypes { if dt, err := fromNumpyDtype(v); dt != k { // special cases if Int.Size() == 4 && v == "i4" && dt == Int { continue } if Int.Size() == 8 && v == "i8" && dt == Int { continue } if Uint.Size() == 4 && v == "u4" && dt == Uint { continue } if Uint.Size() == 8 && v == "u8" && dt == Uint { continue } t.Errorf("Expected %q to return %v. Got %v instead", v, k, dt) } else if err != nil { t.Errorf("Error: %v", err) } } if _, err := fromNumpyDtype("EDIUH"); err == nil { t.Error("Expected error when nonsense is passed into fromNumpyDtype") } } tensor-0.9.24/types.go000066400000000000000000000267541426512615100146340ustar00rootroot00000000000000package tensor import ( "fmt" "math" "reflect" "unsafe" "github.com/chewxy/hm" "github.com/pkg/errors" ) // Dtype represents a data type of a Tensor. Concretely it's implemented as an embedded reflect.Type // which allows for easy reflection operations. It also implements hm.Type, for type inference in Gorgonia type Dtype struct { reflect.Type } // note: the Name() and String() methods are already defined in reflect.Type. Might as well use the composed methods func (dt Dtype) Apply(hm.Subs) hm.Substitutable { return dt } func (dt Dtype) FreeTypeVar() hm.TypeVarSet { return nil } func (dt Dtype) Normalize(k, v hm.TypeVarSet) (hm.Type, error) { return dt, nil } func (dt Dtype) Types() hm.Types { return nil } func (dt Dtype) Format(s fmt.State, c rune) { fmt.Fprintf(s, "%s", dt.Name()) } func (dt Dtype) Eq(other hm.Type) bool { return other == dt } var numpyDtypes map[Dtype]string var reverseNumpyDtypes map[string]Dtype func init() { numpyDtypes = map[Dtype]string{ Bool: "b1", Int: fmt.Sprintf("i%d", Int.Size()), Int8: "i1", Int16: "i2", Int32: "i4", Int64: "i8", Uint: fmt.Sprintf("u%d", Uint.Size()), Uint8: "u1", Uint16: "u2", Uint32: "u4", Uint64: "u8", Float32: "f4", Float64: "f8", Complex64: "c8", Complex128: "c16", } reverseNumpyDtypes = map[string]Dtype{ "b1": Bool, "i1": Int8, "i2": Int16, "i4": Int32, "i8": Int64, "u1": Uint8, "u2": Uint16, "u4": Uint32, "u8": Uint64, "f4": Float32, "f8": Float64, "c8": Complex64, "c16": Complex128, } } // NumpyDtype returns the Numpy's Dtype equivalent. This is predominantly used in converting a Tensor to a Numpy ndarray, // however, not all Dtypes are supported func (dt Dtype) numpyDtype() (string, error) { retVal, ok := numpyDtypes[dt] if !ok { return "v", errors.Errorf("Unsupported Dtype conversion to Numpy Dtype: %v", dt) } return retVal, nil } func fromNumpyDtype(t string) (Dtype, error) { retVal, ok := reverseNumpyDtypes[t] if !ok { return Dtype{}, errors.Errorf("Unsupported Dtype conversion from %q to Dtype", t) } if t == "i4" && Int.Size() == 4 { return Int, nil } if t == "i8" && Int.Size() == 8 { return Int, nil } if t == "u4" && Uint.Size() == 4 { return Uint, nil } if t == "u8" && Uint.Size() == 8 { return Uint, nil } return retVal, nil } type typeclass struct { name string set []Dtype } var parameterizedKinds = [...]reflect.Kind{ reflect.Array, reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.Struct, } func isParameterizedKind(k reflect.Kind) bool { for _, v := range parameterizedKinds { if v == k { return true } } return false } // oh how nice it'd be if I could make them immutable var ( Bool = Dtype{reflect.TypeOf(true)} Int = Dtype{reflect.TypeOf(int(1))} Int8 = Dtype{reflect.TypeOf(int8(1))} Int16 = Dtype{reflect.TypeOf(int16(1))} Int32 = Dtype{reflect.TypeOf(int32(1))} Int64 = Dtype{reflect.TypeOf(int64(1))} Uint = Dtype{reflect.TypeOf(uint(1))} Uint8 = Dtype{reflect.TypeOf(uint8(1))} Uint16 = Dtype{reflect.TypeOf(uint16(1))} Uint32 = Dtype{reflect.TypeOf(uint32(1))} Uint64 = Dtype{reflect.TypeOf(uint64(1))} Float32 = Dtype{reflect.TypeOf(float32(1))} Float64 = Dtype{reflect.TypeOf(float64(1))} Complex64 = Dtype{reflect.TypeOf(complex64(1))} Complex128 = Dtype{reflect.TypeOf(complex128(1))} String = Dtype{reflect.TypeOf("")} // aliases Byte = Uint8 // extras Uintptr = Dtype{reflect.TypeOf(uintptr(0))} UnsafePointer = Dtype{reflect.TypeOf(unsafe.Pointer(&Uintptr))} ) // allTypes for indexing var allTypes = &typeclass{ name: "τ", set: []Dtype{ Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, Complex64, Complex128, String, Uintptr, UnsafePointer, }, } // specialized types indicate that there are specialized code generated for these types var specializedTypes = &typeclass{ name: "Specialized", set: []Dtype{ Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, Complex64, Complex128, String, }, } var addableTypes = &typeclass{ name: "Addable", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, Complex64, Complex128, String, }, } var numberTypes = &typeclass{ name: "Number", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, Complex64, Complex128, }, } var ordTypes = &typeclass{ name: "Ord", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, String, }, } var eqTypes = &typeclass{ name: "Eq", set: []Dtype{ Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, Complex64, Complex128, String, Uintptr, UnsafePointer, }, } var unsignedTypes = &typeclass{ name: "Unsigned", set: []Dtype{Uint, Uint8, Uint16, Uint32, Uint64}, } var signedTypes = &typeclass{ name: "Signed", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Float32, Float64, Complex64, Complex128, }, } // this typeclass is ever only used by Sub tests var signedNonComplexTypes = &typeclass{ name: "Signed NonComplex", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Float32, Float64, }, } var floatTypes = &typeclass{ name: "Float", set: []Dtype{ Float32, Float64, }, } var complexTypes = &typeclass{ name: "Complex Numbers", set: []Dtype{Complex64, Complex128}, } var floatcmplxTypes = &typeclass{ name: "Real", set: []Dtype{ Float32, Float64, Complex64, Complex128, }, } var nonComplexNumberTypes = &typeclass{ name: "Non complex numbers", set: []Dtype{ Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, }, } // this typeclass is ever only used by Pow tests var generatableTypes = &typeclass{ name: "Generatable types", set: []Dtype{ Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Float32, Float64, String, }, } func isFloat(dt Dtype) bool { return dt == Float64 || dt == Float32 } func typeclassCheck(a Dtype, tc *typeclass) error { if tc == nil { return nil } for _, s := range tc.set { if s == a { return nil } } return errors.Errorf("Type %v is not a member of %v", a, tc.name) } // RegisterNumber is a function required to register a new numerical Dtype. // This package provides the following Dtype: // Int // Int8 // Int16 // Int32 // Int64 // Uint // Uint8 // Uint16 // Uint32 // Uint64 // Float32 // Float64 // Complex64 // Complex128 // // If a Dtype that is registered already exists on the list, it will not be added to the list. func RegisterNumber(a Dtype) { for _, dt := range numberTypes.set { if dt == a { return } } numberTypes.set = append(numberTypes.set, a) RegisterEq(a) } func RegisterFloat(a Dtype) { for _, dt := range floatTypes.set { if dt == a { return } } floatTypes.set = append(floatTypes.set, a) RegisterNumber(a) RegisterOrd(a) } // RegisterOrd registers a dtype as a type that can be typed func RegisterOrd(a Dtype) { for _, dt := range ordTypes.set { if dt == a { return } } ordTypes.set = append(ordTypes.set, a) RegisterEq(a) } // RegisterEq registers a dtype as a type that can be compared for equality func RegisterEq(a Dtype) { for _, dt := range eqTypes.set { if dt == a { return } } eqTypes.set = append(eqTypes.set, a) Register(a) } // Register registers a new Dtype func Register(a Dtype) { for _, dt := range allTypes.set { if a == dt { return } } allTypes.set = append(allTypes.set, a) } func dtypeID(a Dtype) int { for i, v := range allTypes.set { if a == v { return i } } return -1 } // NormOrder represents the order of the norm. Ideally, we'd only represent norms with a uint/byte. // But there are norm types that are outside numerical types, such as nuclear norm and fobenius norm. // So it is internally represented by a float. If Go could use NaN and Inf as consts, it would have been best, // Instead, we use constructors. Both Nuclear and Frobenius norm types are represented as NaNs // // The using of NaN and Inf as "special" Norm types lead to the need for IsInf() and IsFrobenius() and IsNuclear() method type NormOrder float64 func Norm(ord int) NormOrder { return NormOrder(float64(ord)) } func InfNorm() NormOrder { return NormOrder(math.Inf(1)) } func NegInfNorm() NormOrder { return NormOrder(math.Inf(-1)) } func UnorderedNorm() NormOrder { return NormOrder(math.Float64frombits(0x7ff8000000000001)) } func FrobeniusNorm() NormOrder { return NormOrder(math.Float64frombits(0x7ff8000000000002)) } func NuclearNorm() NormOrder { return NormOrder(math.Float64frombits(0x7ff8000000000003)) } // Valid() is a helper method that deterines if the norm order is valid. A valid norm order is // one where the fraction component is 0 func (n NormOrder) Valid() bool { switch { case math.IsNaN(float64(n)): nb := math.Float64bits(float64(n)) if math.Float64bits(float64(UnorderedNorm())) == nb || math.Float64bits(float64(FrobeniusNorm())) == nb || math.Float64bits(float64(NuclearNorm())) == nb { return true } case math.IsInf(float64(n), 0): return true default: if _, frac := math.Modf(float64(n)); frac == 0.0 { return true } } return false } // IsUnordered returns true if the NormOrder is not an ordered norm func (n NormOrder) IsUnordered() bool { return math.Float64bits(float64(n)) == math.Float64bits(float64(UnorderedNorm())) } // IsFrobenius returns true if the NormOrder is a Frobenius norm func (n NormOrder) IsFrobenius() bool { return math.Float64bits(float64(n)) == math.Float64bits(float64(FrobeniusNorm())) } // IsNuclear returns true if the NormOrder is a nuclear norm func (n NormOrder) IsNuclear() bool { return math.Float64bits(float64(n)) == math.Float64bits(float64(NuclearNorm())) } func (n NormOrder) IsInf(sign int) bool { return math.IsInf(float64(n), sign) } func (n NormOrder) String() string { switch { case n.IsUnordered(): return "Unordered" case n.IsFrobenius(): return "Frobenius" case n.IsNuclear(): return "Nuclear" case n.IsInf(1): return "+Inf" case n.IsInf(-1): return "-Inf" default: return fmt.Sprintf("Norm %v", float64(n)) } panic("unreachable") } // FuncOpt are optionals for calling Tensor function. type FuncOpt func(*OpOpt) // WithIncr passes in a Tensor to be incremented. func WithIncr(incr Tensor) FuncOpt { f := func(opt *OpOpt) { opt.incr = incr } return f } // WithReuse passes in a Tensor to be reused. func WithReuse(reuse Tensor) FuncOpt { f := func(opt *OpOpt) { opt.reuse = reuse } return f } // UseSafe ensures that the operation is a safe operation (copies data, does not clobber). This is the default option for most methods and functions func UseSafe() FuncOpt { f := func(opt *OpOpt) { opt.unsafe = false } return f } // UseUnsafe ensures that the operation is an unsafe operation - data will be clobbered, and operations performed inplace func UseUnsafe() FuncOpt { f := func(opt *OpOpt) { opt.unsafe = true } return f } // AsSameType makes sure that the return Tensor is the same type as input Tensors. func AsSameType() FuncOpt { f := func(opt *OpOpt) { opt.same = true } return f } // As makes sure that the the return Tensor is of the type specified. Currently only works for FromMat64 func As(t Dtype) FuncOpt { f := func(opt *OpOpt) { opt.t = t } return f } tensor-0.9.24/unsafe.go000066400000000000000000000000761426512615100147360ustar00rootroot00000000000000package tensor import _ "go4.org/unsafe/assume-no-moving-gc" tensor-0.9.24/utils.go000066400000000000000000000177601426512615100146250ustar00rootroot00000000000000package tensor import ( "github.com/pkg/errors" ) const AllAxes int = -1 // MinInt returns the lowest between two ints. If both are the same it returns the first func MinInt(a, b int) int { if a <= b { return a } return b } // MaxInt returns the highest between two ints. If both are the same, it returns the first func MaxInt(a, b int) int { if a >= b { return a } return b } // MaxInts returns the max of a slice of ints. func MaxInts(is ...int) (retVal int) { for _, i := range is { if i > retVal { retVal = i } } return } // SumInts sums a slice of ints func SumInts(a []int) (retVal int) { for _, v := range a { retVal += v } return } // ProdInts returns the internal product of an int slice func ProdInts(a []int) (retVal int) { retVal = 1 if len(a) == 0 { return } for _, v := range a { retVal *= v } return } // IsMonotonicInts returns true if the slice of ints is monotonically increasing. It also returns true for incr1 if every succession is a succession of 1 func IsMonotonicInts(a []int) (monotonic bool, incr1 bool) { var prev int incr1 = true for i, v := range a { if i == 0 { prev = v continue } if v < prev { return false, false } if v != prev+1 { incr1 = false } prev = v } monotonic = true return } // Ltoi is Location to Index. Provide a shape, a strides, and a list of integers as coordinates, and returns the index at which the element is. func Ltoi(shape Shape, strides []int, coords ...int) (at int, err error) { if shape.IsScalarEquiv() { for _, v := range coords { if v != 0 { return -1, errors.Errorf("Scalar shape only allows 0 as an index") } } return 0, nil } for i, coord := range coords { if i >= len(shape) { err = errors.Errorf(dimMismatch, len(shape), i) return } size := shape[i] if coord >= size { err = errors.Errorf(indexOOBAxis, i, coord, size) return } var stride int switch { case shape.IsVector() && len(strides) == 1: stride = strides[0] case i >= len(strides): err = errors.Errorf(dimMismatch, len(strides), i) return default: stride = strides[i] } at += stride * coord } return at, nil } // Itol is Index to Location. func Itol(i int, shape Shape, strides []int) (coords []int, err error) { dims := len(strides) for d := 0; d < dims; d++ { var coord int coord, i = divmod(i, strides[d]) if coord >= shape[d] { err = errors.Errorf(indexOOBAxis, d, coord, shape[d]) // return } coords = append(coords, coord) } return } func UnsafePermute(pattern []int, xs ...[]int) (err error) { if len(xs) == 0 { err = errors.New("Permute requres something to permute") return } dims := -1 patLen := len(pattern) for _, x := range xs { if dims == -1 { dims = len(x) if patLen != dims { err = errors.Errorf(dimMismatch, len(x), len(pattern)) return } } else { if len(x) != dims { err = errors.Errorf(dimMismatch, len(x), len(pattern)) return } } } // check that all the axes are < nDims // and that there are no axis repeated seen := make(map[int]struct{}) for _, a := range pattern { if a >= dims { err = errors.Errorf(invalidAxis, a, dims) return } if _, ok := seen[a]; ok { err = errors.Errorf(repeatedAxis, a) return } seen[a] = struct{}{} } // no op really... we did the checks for no reason too. Maybe move this up? if monotonic, incr1 := IsMonotonicInts(pattern); monotonic && incr1 { err = noopError{} return } switch dims { case 0, 1: case 2: for _, x := range xs { x[0], x[1] = x[1], x[0] } default: for i := 0; i < dims; i++ { to := pattern[i] for to < i { to = pattern[to] } for _, x := range xs { x[i], x[to] = x[to], x[i] } } } return nil } // CheckSlice checks a slice to see if it's sane func CheckSlice(s Slice, size int) error { start := s.Start() end := s.End() step := s.Step() if start > end { return errors.Errorf(invalidSliceIndex, start, end) } if start < 0 { return errors.Errorf(invalidSliceIndex, start, 0) } if step == 0 && end-start > 1 { return errors.Errorf("Slice has 0 steps. Start is %d and end is %d", start, end) } if start >= size { return errors.Errorf("Start %d is greater than size %d", start, size) } return nil } // SliceDetails is a function that takes a slice and spits out its details. The whole reason for this is to handle the nil Slice, which is this: a[:] func SliceDetails(s Slice, size int) (start, end, step int, err error) { if s == nil { start = 0 end = size step = 1 } else { if err = CheckSlice(s, size); err != nil { return } start = s.Start() end = s.End() step = s.Step() if end > size { end = size } } return } // reuseDenseCheck checks a reuse tensor, and reshapes it to be the correct one func reuseDenseCheck(reuse DenseTensor, as DenseTensor) (err error) { if reuse.DataSize() != as.Size() { err = errors.Errorf("Reused Tensor %p does not have expected shape %v. Got %v instead. Reuse Size: %v, as Size %v (real: %d)", reuse, as.Shape(), reuse.Shape(), reuse.DataSize(), as.Size(), as.DataSize()) return } return reuseCheckShape(reuse, as.Shape()) } // reuseCheckShape checks the shape and reshapes it to be correct if the size fits but the shape doesn't. func reuseCheckShape(reuse DenseTensor, s Shape) (err error) { throw := BorrowInts(len(s)) copy(throw, s) if err = reuse.reshape(throw...); err != nil { err = errors.Wrapf(err, reuseReshapeErr, s, reuse.DataSize()) return } // clean up any funny things that may be in the reuse if oldAP := reuse.oldAP(); !oldAP.IsZero() { oldAP.zero() } if axes := reuse.transposeAxes(); axes != nil { ReturnInts(axes) } if viewOf := reuse.parentTensor(); viewOf != nil { reuse.setParentTensor(nil) } return nil } // memsetBools sets boolean slice to value. // Reference http://stackoverflow.com/questions/30614165/is-there-analog-of-memset-in-go func memsetBools(a []bool, v bool) { if len(a) == 0 { return } a[0] = v for bp := 1; bp < len(a); bp *= 2 { copy(a[bp:], a[:bp]) } } func allones(a []int) bool { for i := range a { if a[i] != 1 { return false } } return true } func getFloat64s(a Tensor) []float64 { if um, ok := a.(unsafeMem); ok { return um.Float64s() } return a.Data().([]float64) } func getFloat32s(a Tensor) []float32 { if um, ok := a.(unsafeMem); ok { return um.Float32s() } return a.Data().([]float32) } func getInts(a Tensor) []int { if um, ok := a.(unsafeMem); ok { return um.Ints() } return a.Data().([]int) } /* FOR ILLUSTRATIVE PURPOSES */ // Permute permutates a pattern according to xs. This function exists for illustrative purposes (i.e. the dumb, unoptimized version) // // In reality, the UnsafePermute function is used. /* func Permute(pattern []int, xs ...[]int) (retVal [][]int, err error) { if len(xs) == 0 { err = errors.New("Permute requires something to permute") return } dims := -1 patLen := len(pattern) for _, x := range xs { if dims == -1 { dims = len(x) if patLen != dims { err = errors.Errorf(dimMismatch, len(x), len(pattern)) return } } else { if len(x) != dims { err = errors.Errorf(dimMismatch, len(x), len(pattern)) return } } } // check that all the axes are < nDims // and that there are no axis repeated seen := make(map[int]struct{}) for _, a := range pattern { if a >= dims { err = errors.Errorf(invalidAxis, a, dims) return } if _, ok := seen[a]; ok { err = errors.Errorf(repeatedAxis, a) return } seen[a] = struct{}{} } // no op really... we did the checks for no reason too. Maybe move this up? if monotonic, incr1 := IsMonotonicInts(pattern); monotonic && incr1 { retVal = xs err = noopError{} return } switch dims { case 0, 1: retVal = xs case 2: for _, x := range xs { rv := []int{x[1], x[0]} retVal = append(retVal, rv) } default: retVal = make([][]int, len(xs)) for i := range retVal { retVal[i] = make([]int, dims) } for i, v := range pattern { for j, x := range xs { retVal[j][i] = x[v] } } } return } */