pax_global_header 0000666 0000000 0000000 00000000064 14265126151 0014515 g ustar 00root root 0000000 0000000 52 comment=87b8a9a0b91fe0c08c676574c276df23d310af28
tensor-0.9.24/ 0000775 0000000 0000000 00000000000 14265126151 0013123 5 ustar 00root root 0000000 0000000 tensor-0.9.24/.gitgnore 0000664 0000000 0000000 00000000034 14265126151 0014737 0 ustar 00root root 0000000 0000000 *.vscode/*
.gitignore
*.dot
tensor-0.9.24/.github/ 0000775 0000000 0000000 00000000000 14265126151 0014463 5 ustar 00root root 0000000 0000000 tensor-0.9.24/.github/FUNDING.yml 0000664 0000000 0000000 00000001341 14265126151 0016277 0 ustar 00root root 0000000 0000000 # These are supported funding model platforms
github: [chewxy, owulveryck, dcu] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
tensor-0.9.24/.github/workflows/ 0000775 0000000 0000000 00000000000 14265126151 0016520 5 ustar 00root root 0000000 0000000 tensor-0.9.24/.github/workflows/.go.yml 0000664 0000000 0000000 00000005560 14265126151 0017734 0 ustar 00root root 0000000 0000000 on:
push:
branches: [ master ]
pull_request:
name: test and build
env:
GOPROXY: "https://proxy.golang.org"
CI_NO_PYTHON: "true"
jobs:
test:
strategy:
matrix:
go: [1.18.x, 1.17.x, 1.16.x, 1.15.x]
os: [ubuntu-latest, macos-latest, windows-latest]
tags: [avx, sse]
allowfail: [false]
include:
- go: tip
os: ubuntu-latest
allowfail: true
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.allowfail }}
timeout-minutes: 5
steps:
- name: Install Go ${{ matrix.go }} on ${{ matrix.os }}
if: matrix.go != 'tip'
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
# tempoary hack:
# https://github.com/actions/setup-go/issues/21#issuecomment-565704236
- name: Install Go ${{ matrix.go }} on ${{ matrix.os }}
if: matrix.go == 'tip'
run: |
git clone --depth=1 https://go.googlesource.com/go $HOME/gotip
cd $HOME/gotip/src
./make.bash
echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV
echo "$HOME/gotip/bin" >> $GITHUB_PATH
- name: Checkout code
uses: actions/checkout@v2
- name: Run tests
run: |
go test ./... -v -race
go test ./... -race -tags=${{ matrix.tags }}
coverage:
env:
CI_NO_PYTHON: "false"
PYTHON_COMMAND: python
strategy:
matrix:
tags: [avx, sse]
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.14.x
- name: Install Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
architecture: 'x64'
- name: Install Pip
uses: BSFishy/pip-action@v1
with:
packages: numpy
- name: Checkout code
uses: actions/checkout@v2
- name: Calc coverage
run: |
export PATH=$PATH:$(go env GOPATH)/bin
go test ./... -v -covermode=atomic -coverprofile=coverage.out
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.0
with:
infile: coverage.out
outfile: coverage.lcov
- name: Coveralls
uses: coverallsapp/github-action@v1.0.1
with:
github-token: ${{ secrets.github_token }}
path-to-lcov: coverage.lcov
build:
strategy:
matrix:
go: [1.13, 1.14]
goos: [linux, darwin]
goarch: [amd64, arm]
exclude:
# windows/386 and darwin/386 seems useless
- goarch: "arm"
goos: darwin
runs-on: ubuntu-latest
needs: [test]
steps:
- name: Install Go ${{ matrix.go }}
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Checkout code
uses: actions/checkout@v2
- name: build
run: go build .
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
tensor-0.9.24/.gitignore 0000664 0000000 0000000 00000000433 14265126151 0015113 0 ustar 00root root 0000000 0000000 # Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# vendor
/vendor tensor-0.9.24/ALTERNATIVEDESIGNS.md 0000664 0000000 0000000 00000013235 14265126151 0016244 0 ustar 00root root 0000000 0000000 # Alternateive Designs #
This document holds the alternative designs for the various tensor data structures that had been tried in the past and why they didn't make it to the final designs. That doesn't mean that the current design is the best. It just means that the authors may not have gone far enough with these other designs.
## Single interface, multiple packages ##
In this design, there is a single interface for dense tensors, which is rather similar to the one that is currently there right now:
```
type Tensor interface {
Shape() Shape
Strides() []int
Dtype() Dtype
Dims() int
Size() int
DataSize() int
// Basic operations all tensors must support
Slice(...Slice) (Tensor, error)
At(...int) (interface{}, error)
SetAt(v interface{}, coord ...int) error
Reshape(...int) error
T(axes ...int) error
UT()
Transpose() error // Transpose actually moves the data
Apply(fn interface{}, opts ...FuncOpt) (Tensor, error)
}
```
The idea is then to have subpackages for each type that would implement the `Tensor` like such:
```
// in tensor/f32
type Tensor struct {
}
// implements tensor.Tensor
// in tensor/f64
type Tensor struct {
}
// implements tensor.Tensor
```
Additionally there are interfaces which defined operational types:
```
type Adder interface {
Add(other Tensor) (Tensor, error)
}
type Number interface {
Adder
Suber
Muler
Diver
}
type Real interface {
Number
Tanher
Exper
}
type Complex interface {
Real
}
```
And there are functions which operated on the `Tensor`s:
```
func Add(a, b Tensor) (Tensor, error){
if adder, ok := a.(Adder); ok {
return a.Add(other)
}
return nil, errors.New("Cannot Add: Not an Adder")
}
```
### Pros ###
It is very idiomatic Go, and no reflection was used. It is an ideal model of an abstract data type.
### Cons ###
1. Having all packages import a common "tensor/types" (which holds `*AP`, `Shape` and `Slice` definitions).
2. It'd be ideal to keep all the packages in sync in terms of the methods and functions that the subpackages export. In reality that turns out to be more difficult than expected.
3. Performance issues in hot loops: In a number of hot loops, the amount of `runtime.assertI2I2` ended up taking up a large portion of the cycles.
4. Performance issues wrt allocation of objects. Instead of a single pool, every sub pacakge would have to implement its own object pool and manage it.
5. There was a central registry of `Dtype`s, and a variant of the SQL driver pattern was used (you had to `import _ "github.com/chewxy/gorgonia/tensor/f32" to register the `Float32` Dtype). This is ugly.
6. Cross package requirements: for `Argmax` and `Argmin` related functions, it'd be nice to be able to return a `Tensor` of `int`. That meant having `tensor/i` as a core dependency in the rest of the packages.
#### Workarounds ####
* `Slice` is a interface. All packages that implement `tensor.Tensor` *coulc* implement their own `Slice`. But that'd be a lot of repeat work.
* `AP` and `Shape` could be made interfaces, but for the latter it means dropping the ability to loop through the shape dimensions.
* Keeping the packages in sync could be solved with code generation programs, but if we were to do that, we might as well merge everything into one package
### Notes for revisits ###
This idea is nice. I'd personally love to revisit (and do from time to time). If we were to revisit this idea, there would have to be some changes, which I will suggest here:
1. Make `Transpose` and `T` functions that work on `Tensor` instead of making it a `Tensor`-defining method. This would be done the same way as `Stack` and `RollAxis` and `Concat`.
2. Perhaps re-weight the importance of having a inplace transpose. The in-place transpose was the result of dealing with a very large matrix when my machine didn't have enough memory. It's generally slower than reallocating a new backing array anyway.
# One struct, multiple backing interfaces #
In this design, we abstract away the backing array into a interface. So we'd have this:
```
type Tensor struct {
*AP
t Dtype
data Array
}
type Array interface {
Len() int
Cap() int
Get(int) interface{}
Set(int, interface{}) error
Map(fn interface{}) error
}
```
And we'd have these types which implemented the `Array` interface:
```
type Ints []int
type F32s []float64
type F64s []float32
// and so on and so forth, and each would implement Array
```
### Pros ###
* Multiple subpackages only when necessary (external, "unhandled" dtypes )
* Shared definition of `*AP`, `Shape`, `Dtype` (no more use of a common package)
* Clean package structure - easier to generate code for
### Cons ###
* Difficult to implement other tensor types (sparse for example)
* VERY VERY slow
The slowness was caused by excessive calls from `runtime.convT2E` when using `Get` and `Set` methods which for primitive types cause plenty of allocations on the heap. It was unacceptably slow for any deep learning work.
#### Workarounds ####
Type switch on known data types, and use slower methods for out-of-bounds data types that do not have specializations on it. This led to ugly unwieldly code, and also changes the pressure from `runtime.convT2E` to `runtime.assertI2I2`, which while performs better than having to allocate primitive values on the heap, still led to a lot of unnecessary cycles being spent on it.
# Reflection + Pointers + Interfaces #
This was the design that was reigning before the refactor at #127.
The idea is to combine parts of the first attempt and second attempt and fill up the remaining missing bits with the use of reflections. tensor-0.9.24/CONTRIBUTING.md 0000664 0000000 0000000 00000005540 14265126151 0015360 0 ustar 00root root 0000000 0000000 # Contributing #
We want to make contributing as easy as possible. There are a number of [issues](https://github.com/chewxy/gorgonia/issues) that can be solved. Most of the issues are labelled.
## Labels ##
Here's the current explanation of the labels:
Label | Expertise Required | Notes |
easy | Familiar with Go | Usually there would be a link to an existing implementation that is similar |
requires Linear Algebra knowledge | Linear algebra knowledge required on top of being familiar with Go | Linear Algebra knowledge will go a long way in helping identify subtler bugs |
no label | Knowledge about Gorgonia | |
help wanted | Various expertise | Typically it means the task requires extranormal speciality that the author doesn't possess |
complicated | - | The code change will affect, and involve many files |
## Steps ##
1. Fork this project on Github
2. Clone to your local drive
3. Check if there are any pending issues in the issues tracker
4. Pick an unassigned issue that you can accomplish. Comment on the issue to pick it up.
5. Work on it, using topic branches is highly recommended.
## Testing ##
Testing is important
## How To Get Your Pull Request Accepted ##
1. Test, test, test. Make sure your new code doesn't break the existing tests
2. If you add new code, you must add tests.
3. `gofmt` your code
5. Atomic pull requests - solve one issue per pull request. Some issues may break down to atomic tasks. In those cases, it's OK to solve them partially.
## Git Workflow ##
The master branch is considered to be the "canonical" branch. There is no develop branch. The author prefers use of topic branches. The workflow can best be described by the [Github Flow](https://guides.github.com/introduction/flow/). Please try to keep to this flow.
# Development #
## How the `Tensor` Libs Are Developed ##
## Debugging ##
Whilst the author encourages the use of [Delve](https://github.com/derekparker/delve), it may often be easier to log the trace using the debug loggers. Gorgonia comes with a debug build tag precisely to help with that. To build debug builds, simply do this:
```go
go build -tags='debug' .
```
The debug tag enables various tracing options, available in `debug.go`. There are several debug constants that are used:
* `compileDev`
* `shapeInferenceDev`
* `typeSystemDev`
* `symdiffDev`
* `autodiffDev`
* `machineDev`
* `stabilizationDev`
* `solverDev`
* `cudaDev`
These are the bools that you need to set in order to get a trace. If for example, you think there is something wrong with the type system, simply set `typeSystemDev` to `true` and then insert `typeSysLogf` into wherever you want to trace.
tensor-0.9.24/CONTRIBUTORS.md 0000664 0000000 0000000 00000001504 14265126151 0015402 0 ustar 00root root 0000000 0000000 # Significant Contributors #
* Xuanyi Chew (@chewxy) - initial package
* Naseer Dari (@ndari) - errors and error handling
* Joe Kabaka (@kabaka0) - masked array functionality
* Stuart Carnie (@stuartcarnie) - performance optimization for iterators
* Jorge Landivar (@docmerlin) - performance optimization for `*Dense`
# Contributors
* Andrew Murray | @radarhere
* Ankit Raj | @aj0t
* David Soller | @3ygun
* Davor Kapsa | @dvrkps
* James Michael DuPont | @h4ck3rm1k3
* Yuanlin Lian | @alienchow
* Andrew SnodGrass | @pointlander
For more contributors, check out the [github contributors page](https://github.com/gorgonia/tensor/graphs/contributors). A large number of contributors were from before the repository split off (the `tensor` repo was originally a subpackage of the `gorgonia` repository), so some bits may be missing. tensor-0.9.24/LICENCE 0000664 0000000 0000000 00000026123 14265126151 0014114 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 Gorgonia Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
tensor-0.9.24/README.md 0000664 0000000 0000000 00000033604 14265126151 0014410 0 ustar 00root root 0000000 0000000 # Package `tensor` [](https://godoc.org/gorgonia.org/tensor) [](https://badge.fury.io/gh/gorgonia%2Ftensor) [](https://travis-ci.org/gorgonia/tensor) [](https://coveralls.io/github/gorgonia/tensor?branch=master) [](https://goreportcard.com/report/gorgonia.org/tensor) [](http://github.com/badges/stability-badges)#
Package `tensor` is a package that provides efficient, generic (by some definitions of generic) n-dimensional arrays in Go. Also in this package are functions and methods that are used commonly in arithmetic, comparison and linear algebra operations.
The main purpose of this package is to support the operations required by [Gorgonia](https://gorgonia.org/gorgonia).
## Introduction ##
In the data analysis world, [Numpy](http://http://www.numpy.org/) and [Matlab](https://www.mathworks.com/products/matlab.html) currently reign supreme. Both tools rely heavily on having performant n-dimensional arrays, or tensors. **There is an obvious need for multidimensional arrays in Go**.
While slices are cool, a large majority of scientific and numeric computing work relies heavily on matrices (two-dimensional arrays), three dimensional arrays and so on. In Go, the typical way of getting multidimensional arrays is to use something like `[][]T`. Applications that are more math heavy may opt to use the very excellent Gonum [`matrix` package](https://github.com/gonum/matrix). What then if we want to go beyond having a `float64` matrix? What if we wanted a 3-dimensional `float32` array?
It comes to reason then there should be a data structure that handles these things. The `tensor` package fits in that niche.
### Basic Idea: Tensor ###
A tensor is a multidimensional array. It's like a slice, but works in multiple dimensions.
With slices, there are usage patterns that are repeated enough that warrant abstraction - `append`, `len`, `cap`, `range` are abstractions used to manipulate and query slices. Additionally slicing operations (`a[:1]` for example) are also abstractions provided by the language. Andrew Gerrand wrote a very good write up on [Go's slice usage and internals](https://blog.golang.org/go-slices-usage-and-internals).
Tensors come with their own set of usage patterns and abstractions. Most of these have analogues in slices, enumerated below (do note that certain slice operation will have more than one tensor analogue - this is due to the number of options available):
| Slice Operation | Tensor Operation |
|:---------------:|:----------------:|
| `len(a)` | `T.Shape()` |
| `cap(a)` | `T.DataSize()` |
| `a[:]` | `T.Slice(...)` |
| `a[0]` | `T.At(x,y)` |
| `append(a, ...)`| `T.Stack(...)`, `T.Concat(...)` |
| `copy(dest, src)`| `T.CopyTo(dest)`, `tensor.Copy(dest, src)` |
| `for _, v := range a` | `for i, err := iterator.Next(); err == nil; i, err = iterator.Next()` |
Some operations for a tensor does not have direct analogues to slice operations. However, they stem from the same idea, and can be considered a superset of all operations common to slices. They're enumerated below:
| Tensor Operation | Basic idea in slices |
|:----------------:|:--------------------:|
|`T.Strides()` | The stride of a slice will always be one element |
|`T.Dims()` | The dimensions of a slice will always be one |
|`T.Size()` | The size of a slice will always be its length |
|`T.Dtype()` | The type of a slice is always known at compile time |
|`T.Reshape()` | Given the shape of a slice is static, you can't really reshape a slice |
|`T.T(...)` / `T.Transpose()` / `T.UT()` | No equivalent with slices |
## The Types of Tensors ##
As of the current revision of this package, only dense tensors are supported. Support for sparse matrix (in form of a sparse column matrix and dictionary of keys matrix) will be coming shortly.
### Dense Tensors ###
The `*Dense` tensor is the primary tensor and is represented by a singular flat array, regardless of dimensions. See the [Design of `*Dense`](#design-of-dense) section for more information. It can hold any data type.
### Compressed Sparse Column Matrix ###
Documentation Coming soon
### Compressed Sparse Row Matrix ###
Documentation Coming soon
## Usage ##
To install: `go get -u "gorgonia.org/tensor"`
To create a matrix with package `tensor` is easy:
```go
// Creating a (2,2) matrix of int:
a := New(WithShape(2, 2), WithBacking([]int{1, 2, 3, 4}))
fmt.Printf("a:\n%v\n", a)
// Output:
// a:
// ⎡1 2⎤
// ⎣3 4⎦
//
```
To create a 3-Tensor is just as easy - just put the correct shape and you're good to go:
```go
// Creating a (2,3,4) 3-Tensor of float32
b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4))
fmt.Printf("b:\n%1.1f\n", b)
// Output:
// b:
// ⎡ 0.0 1.0 2.0 3.0⎤
// ⎢ 4.0 5.0 6.0 7.0⎥
// ⎣ 8.0 9.0 10.0 11.0⎦
//
// ⎡12.0 13.0 14.0 15.0⎤
// ⎢16.0 17.0 18.0 19.0⎥
// ⎣20.0 21.0 22.0 23.0⎦
```
Accessing and Setting data is fairly easy. Dimensions are 0-indexed, so if you come from an R background, suck it up like I did. Be warned, this is the inefficient way if you want to do a batch access/setting:
```go
// Accessing data:
b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4))
x, _ := b.At(0, 1, 2)
fmt.Printf("x: %v\n", x)
// Setting data
b.SetAt(float32(1000), 0, 1, 2)
fmt.Printf("b:\n%v", b)
// Output:
// x: 6
// b:
// ⎡ 0 1 2 3⎤
// ⎢ 4 5 1000 7⎥
// ⎣ 8 9 10 11⎦
// ⎡ 12 13 14 15⎤
// ⎢ 16 17 18 19⎥
// ⎣ 20 21 22 23⎦
```
Bear in mind to pass in data of the correct type. This example will cause a panic:
```go
// Accessing data:
b := New(WithBacking(Range(Float32, 0, 24)), WithShape(2, 3, 4))
x, _ := b.At(0, 1, 2)
fmt.Printf("x: %v\n", x)
// Setting data
b.SetAt(1000, 0, 1, 2)
fmt.Printf("b:\n%v", b)
```
There is a whole laundry list of methods and functions available at the [godoc](https://godoc.org/gorgonia.org/tensor) page
## Design of `*Dense` ##
The design of the `*Dense` tensor is quite simple in concept. However, let's start with something more familiar. This is a visual representation of a slice in Go (taken from rsc's excellent blog post on [Go data structures](https://research.swtch.com/godata)):

The data structure for `*Dense` is similar, but a lot more complex. Much of the complexity comes from the need to do accounting work on the data structure as well as preserving references to memory locations. This is how the `*Dense` is defined:
```go
type Dense struct {
*AP
array
e Engine
// other fields elided for simplicity's sake
}
```
And here's a visual representation of the `*Dense`.

`*Dense` draws its inspiration from Go's slice. Underlying it all is a flat array, and access to elements are controlled by `*AP`. Where a Go is able to store its metadata in a 3-word structure (obviating the need to allocate memory), a `*Dense` unfortunately needs to allocate some memory. The majority of the data is stored in the `*AP` structure, which contains metadata such as shape, stride, and methods for accessing the array.
`*Dense` embeds an `array` (not to be confused with Go's array), which is an abstracted data structure that looks like this:
```
type array struct {
storage.Header
t Dtype
v interface{}
}
```
`*storage.Header` is the same structure as `reflect.SliceHeader`, except it stores a `unsafe.Pointer` instead of a `uintptr`. This is done so that eventually when more tests are done to determine how the garbage collector marks data, the `v` field may be removed.
The `storage.Header` field of the `array` (and hence `*Dense`) is there to provide a quick and easy way to translate back into a slice for operations that use familiar slice semantics, of which much of the operations are dependent upon.
By default, `*Dense` operations try to use the language builtin slice operations by casting the `*storage.Header` field into a slice. However, to accomodate a larger subset of types, the `*Dense` operations have a fallback to using pointer arithmetic to iterate through the slices for other types with non-primitive kinds (yes, you CAN do pointer arithmetic in Go. It's slow and unsafe). The result is slower operations for types with non-primitive kinds.
### Memory Allocation ###
`New()` functions as expected - it returns a pointer of `*Dense` to a array of zeroed memory. The underlying array is allocated, depending on what `ConsOpt` is passed in. With `New()`, `ConsOpt`s are used to determine the exact nature of the `*Dense`. It's a bit icky (I'd have preferred everything to have been known statically at compile time), but it works. Let's look at some examples:
``` go
x := New(Of(Float64), WithShape(2,2)) // works
y := New(WithShape(2,2)) // panics
z := New(WithBacking([]int{1,2,3,4})) // works
```
The following will happen:
* Line 1 works: This will allocate a `float64` array of size 4.
* Line 2 will cause a panic. This is because the function doesn't know what to allocate - it only knows to allocate an array of *something* for the size of 4.
* Line 3 will NOT fail, because the array has already been allocated (the `*Dense` reuses the same backing array as the slice passed in). Its shape will be set to `(4)`.
Alternatively you may also pass in an `Engine`. If that's the case then the allocation will use the `Alloc` method of the `Engine` instead:
```go
x := New(Of(Float64), WithEngine(myEngine), WithShape(2,2))
```
The above call will use `myEngine` to allocate memory instead. This is useful in cases where you may want to manually manage your memory.
### Other failed designs ###
The alternative designs can be seen in the [ALTERNATIVE DESIGNS document](https://github.com/tensor/blob/master/ALTERNATIVEDESIGNS.md)
## Generic Features ##
Example:
```go
x := New(WithBacking([]string{"hello", "world", "hello", "world"}), WithShape(2,2))
x = New(WithBacking([]int{1,2,3,4}), WithShape(2,2))
```
The above code will not cause a compile error, because the structure holding the underlying array (of `string`s and then of `int`s) is a `*Dense`.
One could argue that this sidesteps the compiler's type checking system, deferring it to runtime (which a number of people consider dangerous). However, tools are being developed to type check these things, and until Go does support typechecked generics, unfortunately this will be the way it has to be.
Currently, the tensor package supports limited type of genericity - limited to a tensor of any primitive type.
# How This Package is Developed #
Much of the code in this package is generated. The code to generate them is in the directory `genlib2`. `genlib2` requires [`goimports`](https://godoc.org/golang.org/x/tools/cmd/goimports) binary to be available in the $PATH.
## Tests ##
Tests require python with numpy installed. You can select which python intepreter is being used by setting the environment variable `PYTHON_COMMAND` accordingly. The default value is `python`.
## Things Knowingly Untested For ##
- `complex64` and `complex128` are excluded from quick check generation process [Issue #11](https://github.com/gorgonia/tensor/issues/11)
### TODO ###
* [ ] Identity optimizations for op
* [ ] Zero value optimizations
* [ ] fix Random() - super dodgy
# How To Get Support #
The best way of support right now is to open a ticket on Github.
# Contributing #
Obviously since you are most probably reading this on Github, Github will form the major part of the workflow for contributing to this package.
See also: CONTRIBUTING.md
## Contributors and Significant Contributors ##
All contributions are welcome. However, there is a new class of contributor, called Significant Contributors.
A Significant Contributor is one who has shown *deep understanding* of how the library works and/or its environs. Here are examples of what constitutes a Significant Contribution:
* Wrote significant amounts of documentation pertaining to **why**/the mechanics of particular functions/methods and how the different parts affect one another
* Wrote code, and tests around the more intricately connected parts of Gorgonia
* Wrote code and tests, and have at least 5 pull requests accepted
* Provided expert analysis on parts of the package (for example, you may be a floating point operations expert who optimized one function)
* Answered at least 10 support questions.
Significant Contributors list will be updated once a month (if anyone even uses Gorgonia that is).
# Licence #
Gorgonia and the `tensor` package are licenced under a variant of Apache 2.0. It's for all intents and purposes the same as the Apache 2.0 Licence, with the exception of not being able to commercially profit directly from the package unless you're a Significant Contributor (for example, providing commercial support for the package). It's perfectly fine to profit directly from a derivative of Gorgonia (for example, if you use Gorgonia as a library in your product)
Everyone is still allowed to use Gorgonia for commercial purposes (example: using it in a software for your business).
## Various Other Copyright Notices ##
These are the packages and libraries which inspired and were adapted from in the process of writing Gorgonia (the Go packages that were used were already declared above):
| Source | How it's Used | Licence |
|------|---|-------|
| Numpy | Inspired large portions. Directly adapted algorithms for a few methods (explicitly labelled in the docs) | MIT/BSD-like. [Numpy Licence](https://github.com/numpy/numpy/blob/master/LICENSE.txt) |
tensor-0.9.24/ap.go 0000664 0000000 0000000 00000027340 14265126151 0014060 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"github.com/pkg/errors"
)
// An AP is an access pattern. It tells the various ndarrays how to access their data through the use of strides
// Through the AP, there are several definitions of things, most notably there are two very specific "special cases":
// Scalar has Dims() of 0.
// - (1)
// Scalarlikes are higher order tensors, but each with a size of 1. The Dims() are not 0.
// - (1, 1)
// - (1, 1, 1)
// - (1, 1, 1, 1), etc
// Vector has Dims() of 1, but its shape can take several forms:
// - (x, 1)
// - (1, x)
// - (x)
// Matrix has Dims() of 2. This is the most basic form. The len(shape) has to be equal to 2 as well
// ndarray has Dims() of n.
type AP struct {
shape Shape // len(shape) is the operational definition of the dimensions
strides []int // strides is usually calculated from shape
fin bool // is this struct change-proof?
o DataOrder
Δ Triangle
}
func makeAP(size int) AP {
return AP{
shape: Shape(BorrowInts(size)),
strides: BorrowInts(size),
}
}
// MakeAP creates an AP, given the shape and strides.
func MakeAP(shape Shape, strides []int, o DataOrder, Δ Triangle) AP {
return AP{
shape: shape,
strides: strides,
o: o,
Δ: Δ,
fin: true,
}
}
// Init initializes an already created AP with a shape and stries.
// It will panic if AP is nil.
func (ap *AP) Init(shape Shape, strides []int) {
ap.shape = shape
ap.strides = strides
ap.fin = true
}
// SetShape is for very specific times when modifying the AP is necessary, such as reshaping and doing I/O related stuff
//
// Caveats:
//
// - SetShape will recalculate the strides.
//
// - If the AP is locked, nothing will happen
func (ap *AP) SetShape(s ...int) {
if !ap.fin {
// scalars are a special case, we don't want to remove it completely
if len(s) == 0 {
if ap.shape == nil || ap.strides == nil {
ap.shape = Shape{}
}
ap.shape = ap.shape[:0]
ap.strides = ap.strides[:0]
return
}
if ap.shape != nil {
ReturnInts(ap.shape)
ap.shape = nil
}
if ap.strides != nil {
ReturnInts(ap.strides)
ap.strides = nil
}
ap.shape = Shape(s).Clone()
ap.strides = ap.calcStrides()
}
}
// Shape returns the shape of the AP
func (ap *AP) Shape() Shape { return ap.shape }
// Strides returns the strides of the AP
func (ap *AP) Strides() []int { return ap.strides }
// Dims returns the dimensions of the shape in the AP
func (ap *AP) Dims() int { return ap.shape.Dims() }
// Size returns the expected array size of the shape
func (ap *AP) Size() int { return ap.shape.TotalSize() }
// String implements fmt.Stringer and runtime.Stringer
func (ap *AP) String() string { return fmt.Sprintf("%v", ap) }
// Format implements fmt.Formatter
func (ap *AP) Format(state fmt.State, c rune) {
fmt.Fprintf(state, "Shape: %v, Stride: %v, Lock: %t", ap.shape, ap.strides, ap.fin)
}
// IsVector returns whether the access pattern falls into one of three possible definitions of vectors:
// vanilla vector (not a row or a col)
// column vector
// row vector
func (ap *AP) IsVector() bool { return ap.shape.IsVector() }
// IsVectorLike returns true if the shape is vector-like (i.e. the shape only has one dim that is a non-1).
func (ap *AP) IsVectorLike() bool {
return ap.shape.IsVectorLike() && allones(ap.strides)
}
// IsColVec returns true when the access pattern has the shape (x, 1)
func (ap *AP) IsColVec() bool { return ap.shape.IsColVec() }
// IsRowVec returns true when the access pattern has the shape (1, x)
func (ap *AP) IsRowVec() bool { return ap.shape.IsRowVec() }
// IsScalar returns true if the access pattern indicates it's a scalar value.
func (ap *AP) IsScalar() bool { return ap.shape.IsScalar() }
// IsScalarEquiv returns true if the access pattern is equivalent to a scalar shape.
func (ap *AP) IsScalarEquiv() bool { return ap.shape.IsScalarEquiv() }
// IsMatrix returns true if it's a matrix. This is mostly a convenience method. RowVec and ColVecs are also considered matrices
func (ap *AP) IsMatrix() bool { return len(ap.shape) == 2 }
// IsZero tell us if the ap has zero size
func (ap *AP) IsZero() bool {
return len(ap.shape) == 0 && len(ap.strides) == 0 && !ap.fin && ap.o == 0 && ap.Δ == 0
}
// Zero zeros out an AP.
func (ap *AP) zero() {
// log.Printf("ZEROING. Called by %v", string(debug.Stack()))
// Jorge's original implementation for zeroing a AP is as below
// but to cater for the (*Dense).fix() method of the *Dense
// a nil shape is used to signal unsetness
// so we cannot just truncate the shape even though it would be a lot more efficient
// ap.shape = ap.shape[:0]
// ap.strides = ap.strides[:0]
ReturnInts([]int(ap.shape))
ReturnInts(ap.strides)
ap.zeroOnly()
}
// side effect free zeroing
func (ap *AP) zeroOnly() {
ap.shape = nil
ap.strides = nil
ap.fin = false
ap.o = 0
ap.Δ = 0
}
func (ap *AP) zeroWithDims(dims int) {
//ap.shape = BorrowInts(dims)
//ap.strides = BorrowInts(dims)
if cap(ap.shape) >= dims {
ap.shape = ap.shape[:dims]
}
ap.shape = BorrowInts(dims)
if cap(ap.strides) >= dims {
ap.strides = ap.strides[:dims]
}
ap.strides = BorrowInts(dims)
}
// Clone clones the *AP. Clearly. It returns AP
func (ap *AP) Clone() (retVal AP) {
retVal = makeAP(cap(ap.shape))
copy(retVal.shape, ap.shape)
copy(retVal.strides, ap.strides)
// handle vectors
retVal.shape = retVal.shape[:len(ap.shape)]
retVal.strides = retVal.strides[:len(ap.strides)]
retVal.fin = ap.fin
retVal.o = ap.o
retVal.Δ = ap.Δ
return
}
func (ap *AP) CloneTo(dest *AP) {
dest.shape = append(dest.shape[:0], ap.shape...)
dest.strides = append(dest.strides[:0], ap.strides...)
dest.fin = ap.fin
dest.o = ap.o
dest.Δ = ap.Δ
}
// DataOrder returns the data order of the AP.
func (ap *AP) DataOrder() DataOrder { return ap.o }
// C returns true if the access pattern is C-contiguous array
func (ap *AP) C() bool { return ap.o.IsRowMajor() && ap.o.IsContiguous() }
// F returns true if the access pattern is Fortran contiguous array
func (ap *AP) F() bool { return ap.o.IsColMajor() && ap.o.IsContiguous() }
// S returns the metadata of the sliced tensor.
func (ap *AP) S(size int, slices ...Slice) (newAP AP, ndStart, ndEnd int, err error) {
if len(slices) > len(ap.shape) {
// error
err = errors.Errorf(dimMismatch, len(ap.shape), len(slices))
return
}
ndEnd = size
newShape := ap.shape.Clone() // the new shape
dims := ap.Dims() // reported dimensions
newStrides := BorrowInts(dims) // the new strides
var outerDim int
order := ap.o
if ap.o.IsRowMajor() || ap.IsVector() {
outerDim = 0
} else {
outerDim = len(ap.shape) - 1
}
for i := 0; i < dims; i++ {
var sl Slice
if i <= len(slices)-1 {
sl = slices[i]
}
size := ap.shape[i]
var stride int
stride = ap.strides[i]
// if ap.IsVector() {
// // handles non-vanilla vectors
// stride = ap.strides[0]
// } else {
// stride = ap.strides[i]
// }
var start, end, step int
if start, end, step, err = SliceDetails(sl, size); err != nil {
err = errors.Wrapf(err, "Unable to get slice details on slice %d with size %d: %v", i, sl, size)
return
}
// a slice where start == end is []
ndStart = ndStart + start*stride
ndEnd = ndEnd - (size-end)*stride
if step > 0 {
if newShape[i] = (end - start) / step; (end-start)%step > 0 && i > 0 {
newShape[i]++
}
newStrides[i] = stride * step
//fix
if newShape[i] <= 0 {
newShape[i] = 1
}
} else {
newShape[i] = (end - start)
newStrides[i] = stride
}
if (sl != nil && (!ap.IsVector() && i != outerDim)) || step > 1 {
order = MakeDataOrder(order, NonContiguous)
}
}
if ndEnd-ndStart == 1 {
// scalars are a special case
newAP = AP{}
newAP.SetShape() // make it a Scalar
newAP.lock()
} else {
// drop any dimension with size 1, except the last dimension
offset := 0
for d := 0; d < dims; d++ {
if newShape[d] == 1 && offset+d <= len(slices)-1 && slices[offset+d] != nil /*&& d != t.dims-1 && dims > 2*/ {
newShape = append(newShape[:d], newShape[d+1:]...)
newStrides = append(newStrides[:d], newStrides[d+1:]...)
d--
dims--
offset++
}
}
newAP = MakeAP(newShape, newStrides, order, ap.Δ)
}
return
}
// T returns the transposed metadata based on the given input
func (ap *AP) T(axes ...int) (retVal AP, a []int, err error) {
// prep axes
if len(axes) > 0 && len(axes) != ap.Dims() {
err = errors.Errorf(dimMismatch, ap.Dims(), len(axes))
return
}
dims := len(ap.shape)
if len(axes) == 0 || axes == nil {
axes = make([]int, dims)
for i := 0; i < dims; i++ {
axes[i] = dims - 1 - i
}
}
a = axes
if ap.shape.IsScalarEquiv() {
return ap.Clone(), a, noopError{}
}
// if axes is 0, 1, 2, 3... then no op
if monotonic, incr1 := IsMonotonicInts(axes); monotonic && incr1 && axes[0] == 0 {
return ap.Clone(), a, noopError{}
}
currentShape := ap.shape
currentStride := ap.strides
shape := make(Shape, len(currentShape))
strides := make([]int, len(currentStride))
switch {
case ap.IsScalar():
return
case ap.IsVector():
if axes[0] == 0 {
return
}
strides[0], strides[1] = 1, 1
shape[0], shape[1] = currentShape[1], currentShape[0]
default:
copy(shape, currentShape)
copy(strides, currentStride)
err = UnsafePermute(axes, shape, strides)
if err != nil {
err = handleNoOp(err)
}
}
o := MakeDataOrder(ap.o, Transposed)
retVal = MakeAP(shape, strides, o, ap.Δ)
retVal.fin = true
return
}
// locking and unlocking is used to ensure that the shape and stride doesn't change (it's not really safe though, as a direct mutation of the strides/shape would still mutate it, but at least the dimensions cannot change)
func (ap *AP) lock() { ap.fin = true }
func (ap *AP) unlock() { ap.fin = false }
func (ap *AP) calcStrides() []int {
switch {
case ap.o.IsRowMajor():
return ap.shape.CalcStrides()
case ap.o.IsColMajor():
return ap.shape.CalcStridesColMajor()
}
panic("unreachable")
}
// setDataOrder is a method such that any tensor that embeds *AP will have the same method
func (ap *AP) setDataOrder(o DataOrder) {
if !o.HasSameOrder(ap.o) {
ap.o = ap.o.toggleColMajor()
}
}
// TransposeIndex returns the new index given the old index
func TransposeIndex(i int, oldShape, pattern, oldStrides, newStrides []int) int {
oldCoord, err := Itol(i, oldShape, oldStrides)
if err != nil {
panic(err) // or return error?
}
/*
coordss, _ := Permute(pattern, oldCoord)
coords := coordss[0]
index, _ := Ltoi(newShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range pattern {
index += oldCoord[axis] * newStrides[i]
}
return index
}
// UntransposeIndex returns the old index given the new index
func UntransposeIndex(i int, oldShape, pattern, oldStrides, newStrides []int) int {
newPattern := make([]int, len(pattern))
for i, p := range pattern {
newPattern[p] = i
}
return TransposeIndex(i, oldShape, newPattern, oldStrides, newStrides)
}
// BroadcastStrides handles broadcasting from different shapes.
//
// Deprecated: this function will be unexported
func BroadcastStrides(destShape, srcShape Shape, destStrides, srcStrides []int) (retVal []int, err error) {
dims := len(destShape)
start := dims - len(srcShape)
if destShape.IsVector() && srcShape.IsVector() {
return []int{srcStrides[0]}, nil
}
if start < 0 {
//error
err = errors.Errorf(dimMismatch, dims, len(srcShape))
return
}
retVal = BorrowInts(len(destStrides))
for i := dims - 1; i >= start; i-- {
s := srcShape[i-start]
switch {
case s == 1:
retVal[i] = 0
case s != destShape[i]:
// error
err = errors.Errorf("Cannot broadcast from %v to %v", srcShape, destShape)
return
default:
retVal[i] = srcStrides[i-start]
}
}
for i := 0; i < start; i++ {
retVal[i] = 0
}
return
}
tensor-0.9.24/ap_test.go 0000664 0000000 0000000 00000021253 14265126151 0015114 0 ustar 00root root 0000000 0000000 package tensor
import (
//"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func dummyScalar1() AP { return AP{} }
func dummyScalar2() AP { return AP{shape: Shape{1}} }
func dummyColVec() AP {
return AP{
shape: Shape{5, 1},
strides: []int{1},
}
}
func dummyRowVec() AP {
return AP{
shape: Shape{1, 5},
strides: []int{1},
}
}
func dummyVec() AP {
return AP{
shape: Shape{5},
strides: []int{1},
}
}
func twothree() AP {
return AP{
shape: Shape{2, 3},
strides: []int{3, 1},
}
}
func twothreefour() AP {
return AP{
shape: Shape{2, 3, 4},
strides: []int{12, 4, 1},
}
}
func TestAccessPatternBasics(t *testing.T) {
assert := assert.New(t)
ap := new(AP)
ap.SetShape(1, 2)
assert.Equal(Shape{1, 2}, ap.Shape())
assert.Equal([]int{2, 1}, ap.Strides())
assert.Equal(2, ap.Dims())
assert.Equal(2, ap.Size())
ap.SetShape(2, 3, 2)
assert.Equal(Shape{2, 3, 2}, ap.Shape())
assert.Equal([]int{6, 2, 1}, ap.Strides())
assert.Equal(12, ap.Size())
ap.lock()
ap.SetShape(1, 2, 3)
assert.Equal(Shape{2, 3, 2}, ap.shape)
assert.Equal([]int{6, 2, 1}, ap.strides)
ap.unlock()
ap.SetShape(1, 2)
assert.Equal(Shape{1, 2}, ap.Shape())
assert.Equal([]int{2, 1}, ap.Strides())
assert.Equal(2, ap.Dims())
assert.Equal(2, ap.Size())
if ap.String() != "Shape: (1, 2), Stride: [2 1], Lock: false" {
t.Errorf("AP formatting error. Got %q", ap.String())
}
ap2 := ap.Clone()
assert.Equal(*ap, ap2)
}
func TestAccessPatternIsX(t *testing.T) {
assert := assert.New(t)
var ap AP
ap = dummyScalar1()
assert.True(ap.IsScalar())
assert.True(ap.IsScalarEquiv())
assert.False(ap.IsVector())
assert.False(ap.IsColVec())
assert.False(ap.IsRowVec())
ap = dummyScalar2()
assert.False(ap.IsScalar())
assert.True(ap.IsScalarEquiv())
assert.True(ap.IsVectorLike())
assert.True(ap.IsVector())
assert.False(ap.IsColVec())
assert.False(ap.IsRowVec())
ap = dummyColVec()
assert.True(ap.IsColVec())
assert.True(ap.IsVector())
assert.False(ap.IsRowVec())
assert.False(ap.IsScalar())
ap = dummyRowVec()
assert.True(ap.IsRowVec())
assert.True(ap.IsVector())
assert.False(ap.IsColVec())
assert.False(ap.IsScalar())
ap = twothree()
assert.True(ap.IsMatrix())
assert.False(ap.IsScalar())
assert.False(ap.IsVector())
assert.False(ap.IsRowVec())
assert.False(ap.IsColVec())
}
func TestAccessPatternT(t *testing.T) {
assert := assert.New(t)
var ap, apT AP
var axes []int
var err error
ap = twothree()
// test no axes
apT, axes, err = ap.T()
if err != nil {
t.Error(err)
}
assert.Equal(Shape{3, 2}, apT.shape)
assert.Equal([]int{1, 3}, apT.strides)
assert.Equal([]int{1, 0}, axes)
assert.Equal(2, apT.Dims())
// test no op
apT, _, err = ap.T(0, 1)
if err != nil {
if _, ok := err.(NoOpError); !ok {
t.Error(err)
}
}
// test 3D
ap = twothreefour()
apT, axes, err = ap.T(2, 0, 1)
if err != nil {
t.Error(err)
}
assert.Equal(Shape{4, 2, 3}, apT.shape)
assert.Equal([]int{1, 12, 4}, apT.strides)
assert.Equal([]int{2, 0, 1}, axes)
assert.Equal(3, apT.Dims())
// test stupid axes
_, _, err = ap.T(1, 2, 3)
if err == nil {
t.Error("Expected an error")
}
}
var sliceTests = []struct {
name string
shape Shape
slices []Slice
correctStart int
correctEnd int
correctShape Shape
correctStride []int
contiguous bool
}{
// vectors
{"a[0]", Shape{5}, []Slice{S(0)}, 0, 1, ScalarShape(), nil, true},
{"a[0:2]", Shape{5}, []Slice{S(0, 2)}, 0, 2, Shape{2}, []int{1}, true},
{"a[1:3]", Shape{5}, []Slice{S(1, 3)}, 1, 3, Shape{2}, []int{1}, true},
{"a[1:5:2]", Shape{5}, []Slice{S(1, 5, 2)}, 1, 5, Shape{2}, []int{2}, false},
// matrix
{"A[0]", Shape{2, 3}, []Slice{S(0)}, 0, 3, Shape{1, 3}, []int{1}, true},
{"A[1:3]", Shape{4, 5}, []Slice{S(1, 3)}, 5, 15, Shape{2, 5}, []int{5, 1}, true},
{"A[0:10] (intentionally over)", Shape{4, 5}, []Slice{S(0, 10)}, 0, 20, Shape{4, 5}, []int{5, 1}, true}, // as if nothing happened
{"A[:, 1:3]", Shape{4, 5}, []Slice{nil, S(1, 3)}, 1, 18, Shape{4, 2}, []int{5, 1}, false},
// tensor
{"tensor[0, :, :]", Shape{1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil}, 0, 4, Shape{2, 2}, []int{2, 1}, true},
{"tensor[:, 0, :]", Shape{1, 2, 2}, []Slice{nil, rs{0, 1, 1}, nil}, 0, 2, Shape{1, 2}, []int{4, 1}, false},
{"tensor[0, :, :, :]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}, nil, nil, nil}, 0, 4, Shape{1, 2, 2}, []int{4, 2, 1}, true},
{"tensor[0,]", Shape{1, 1, 2, 2}, []Slice{rs{0, 1, 1}}, 0, 4, Shape{1, 2, 2}, []int{4, 2, 1}, true},
}
func TestAccessPatternS(t *testing.T) {
assert := assert.New(t)
var ap, apS AP
var ndStart, ndEnd int
var err error
for _, sts := range sliceTests {
ap = MakeAP(sts.shape, sts.shape.CalcStrides(), 0, 0)
if apS, ndStart, ndEnd, err = ap.S(sts.shape.TotalSize(), sts.slices...); err != nil {
t.Errorf("%v errored: %v", sts.name, err)
continue
}
assert.Equal(sts.correctStart, ndStart, "Wrong start: %v. Want %d Got %d", sts.name, sts.correctStart, ndStart)
assert.Equal(sts.correctEnd, ndEnd, "Wrong end: %v. Want %d Got %d", sts.name, sts.correctEnd, ndEnd)
assert.True(sts.correctShape.Eq(apS.shape), "Wrong shape: %v. Want %v. Got %v", sts.name, sts.correctShape, apS.shape)
assert.Equal(sts.correctStride, apS.strides, "Wrong strides: %v. Want %v. Got %v", sts.name, sts.correctStride, apS.strides)
assert.Equal(sts.contiguous, apS.DataOrder().IsContiguous(), "Wrong contiguity for %v Want %t.", sts.name, sts.contiguous)
}
}
func TestTransposeIndex(t *testing.T) {
var newInd int
var oldShape Shape
var pattern, oldStrides, newStrides, corrects []int
/*
(2,3)->(3,2)
0, 1, 2
3, 4, 5
becomes
0, 3
1, 4
2, 5
1 -> 2
2 -> 4
3 -> 1
4 -> 3
0 and 5 stay the same
*/
oldShape = Shape{2, 3}
pattern = []int{1, 0}
oldStrides = []int{3, 1}
newStrides = []int{2, 1}
corrects = []int{0, 2, 4, 1, 3, 5}
for i := 0; i < 6; i++ {
newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
/*
(2,3,4) -(1,0,2)-> (3,2,4)
0, 1, 2, 3
4, 5, 6, 7
8, 9, 10, 11
12, 13, 14, 15
16, 17, 18, 19
20, 21, 22, 23
becomes
0, 1, 2, 3
12, 13, 14, 15,
4, 5, 6, 7
16, 17, 18, 19
8, 9, 10, 11
20, 21, 22, 23
*/
oldShape = Shape{2, 3, 4}
pattern = []int{1, 0, 2}
oldStrides = []int{12, 4, 1}
newStrides = []int{8, 4, 1}
corrects = []int{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23}
for i := 0; i < len(corrects); i++ {
newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
/*
(2,3,4) -(2,0,1)-> (4,2,3)
0, 1, 2, 3
4, 5, 6, 7
8, 9, 10, 11
12, 13, 14, 15
16, 17, 18, 19
20, 21, 22, 23
becomes
0, 4, 8
12, 16, 20
1, 5, 9
13, 17, 21
2, 6, 10
14, 18, 22
3, 7, 11
15, 19, 23
*/
oldShape = Shape{2, 3, 4}
pattern = []int{2, 0, 1}
oldStrides = []int{12, 4, 1}
newStrides = []int{6, 3, 1}
corrects = []int{0, 6, 12, 18, 1, 7, 13, 19, 2, 8, 14, 20, 3, 9, 15, 21, 4, 10, 16, 22, 5, 11, 17, 23}
for i := 0; i < len(corrects); i++ {
newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
}
func TestUntransposeIndex(t *testing.T) {
var newInd int
var oldShape Shape
var pattern, oldStrides, newStrides, corrects []int
// vice versa
oldShape = Shape{3, 2}
oldStrides = []int{2, 1}
newStrides = []int{3, 1}
corrects = []int{0, 3, 1, 4, 2, 5}
pattern = []int{1, 0}
for i := 0; i < 6; i++ {
newInd = UntransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
oldShape = Shape{3, 2, 4}
oldStrides = []int{8, 4, 1}
newStrides = []int{12, 4, 1}
pattern = []int{1, 0, 2}
corrects = []int{0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}
for i := 0; i < len(corrects); i++ {
newInd = TransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
oldShape = Shape{4, 2, 3}
pattern = []int{2, 0, 1}
newStrides = []int{12, 4, 1}
oldStrides = []int{6, 3, 1}
corrects = []int{0, 4, 8, 12, 16, 20}
for i := 0; i < len(corrects); i++ {
newInd = UntransposeIndex(i, oldShape, pattern, oldStrides, newStrides)
if newInd != corrects[i] {
t.Errorf("Want %d, got %d instead", corrects[i], newInd)
}
}
}
func TestBroadcastStrides(t *testing.T) {
ds := Shape{4, 4}
ss := Shape{4}
dst := []int{4, 1}
sst := []int{1}
st, err := BroadcastStrides(ds, ss, dst, sst)
if err != nil {
t.Error(err)
}
t.Log(st)
}
tensor-0.9.24/api_arith.go 0000664 0000000 0000000 00000046366 14265126151 0015431 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
// exported API for arithmetics and the stupidly crazy amount of overloaded semantics
// Add performs a pointwise a+b. a and b can either be float64 or Tensor
//
// If both operands are Tensor, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
//
// Add performs elementwise addition on the Tensor(s). These operations are supported:
// Add(*Dense, scalar)
// Add(scalar, *Dense)
// Add(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Add(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var adder Adder
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition
if oe != nil {
return oe.Add(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Add(at, bt, opts...)
}
if adder, ok = at.Engine().(Adder); ok {
return adder.Add(at, bt, opts...)
}
if adder, ok = bt.Engine().(Adder); ok {
return adder.Add(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Add")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.AddScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.AddScalar(at, bt, leftTensor, opts...)
}
if adder, ok = at.Engine().(Adder); ok {
return adder.AddScalar(at, bt, leftTensor, opts...)
}
if adder, ok = bt.Engine().(Adder); ok {
return adder.AddScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Add")
}
default:
if oe != nil {
return oe.AddScalar(at, bt, true, opts...)
}
if adder, ok = at.Engine().(Adder); ok {
return adder.AddScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Add")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.AddScalar(bt, at, false, opts...)
}
if adder, ok = bt.Engine().(Adder); ok {
return adder.AddScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Add")
default:
return nil, errors.Errorf("Cannot perform Add of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Sub performs elementwise subtraction on the Tensor(s). These operations are supported:
// Sub(*Dense, scalar)
// Sub(scalar, *Dense)
// Sub(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Sub(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var suber Suber
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor substraction
if oe != nil {
return oe.Sub(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Sub(at, bt, opts...)
}
if suber, ok = at.Engine().(Suber); ok {
return suber.Sub(at, bt, opts...)
}
if suber, ok = bt.Engine().(Suber); ok {
return suber.Sub(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Sub")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.SubScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.SubScalar(at, bt, leftTensor, opts...)
}
if suber, ok = at.Engine().(Suber); ok {
return suber.SubScalar(at, bt, leftTensor, opts...)
}
if suber, ok = bt.Engine().(Suber); ok {
return suber.SubScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Sub")
}
default:
if oe != nil {
return oe.SubScalar(at, bt, true, opts...)
}
if suber, ok = at.Engine().(Suber); ok {
return suber.SubScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Sub")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.SubScalar(bt, at, false, opts...)
}
if suber, ok = bt.Engine().(Suber); ok {
return suber.SubScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Sub")
default:
return nil, errors.Errorf("Cannot perform Sub of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Mul performs elementwise multiplication on the Tensor(s). These operations are supported:
// Mul(*Dense, scalar)
// Mul(scalar, *Dense)
// Mul(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Mul(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var muler Muler
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor multiplication
if oe != nil {
return oe.Mul(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Mul(at, bt, opts...)
}
if muler, ok = at.Engine().(Muler); ok {
return muler.Mul(at, bt, opts...)
}
if muler, ok = bt.Engine().(Muler); ok {
return muler.Mul(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Mul")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.MulScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.MulScalar(at, bt, leftTensor, opts...)
}
if muler, ok = at.Engine().(Muler); ok {
return muler.MulScalar(at, bt, leftTensor, opts...)
}
if muler, ok = bt.Engine().(Muler); ok {
return muler.MulScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Mul")
}
default: // a Tensor * b interface
if oe != nil {
return oe.MulScalar(at, bt, true, opts...)
}
if muler, ok = at.Engine().(Muler); ok {
return muler.MulScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Mul")
}
default:
switch bt := b.(type) {
case Tensor: // b Tensor * a interface
if oe = bt.standardEngine(); oe != nil {
return oe.MulScalar(bt, at, false, opts...)
}
if muler, ok = bt.Engine().(Muler); ok {
return muler.MulScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Mul")
default: // b interface * a interface
return nil, errors.Errorf("Cannot perform Mul of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Div performs elementwise division on the Tensor(s). These operations are supported:
// Div(*Dense, scalar)
// Div(scalar, *Dense)
// Div(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Div(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var diver Diver
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor division
if oe != nil {
return oe.Div(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Div(at, bt, opts...)
}
if diver, ok = at.Engine().(Diver); ok {
return diver.Div(at, bt, opts...)
}
if diver, ok = bt.Engine().(Diver); ok {
return diver.Div(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Div")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.DivScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.DivScalar(at, bt, leftTensor, opts...)
}
if diver, ok = at.Engine().(Diver); ok {
return diver.DivScalar(at, bt, leftTensor, opts...)
}
if diver, ok = bt.Engine().(Diver); ok {
return diver.DivScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Div")
}
default:
if oe != nil {
return oe.DivScalar(at, bt, true, opts...)
}
if diver, ok = at.Engine().(Diver); ok {
return diver.DivScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Div")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.DivScalar(bt, at, false, opts...)
}
if diver, ok = bt.Engine().(Diver); ok {
return diver.DivScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Div")
default:
return nil, errors.Errorf("Cannot perform Div of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Pow performs elementwise exponentiation on the Tensor(s). These operations are supported:
// Pow(*Dense, scalar)
// Pow(scalar, *Dense)
// Pow(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Pow(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var power Power
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor exponentiation
if oe != nil {
return oe.Pow(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Pow(at, bt, opts...)
}
if power, ok = at.Engine().(Power); ok {
return power.Pow(at, bt, opts...)
}
if power, ok = bt.Engine().(Power); ok {
return power.Pow(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Pow")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.PowScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.PowScalar(at, bt, leftTensor, opts...)
}
if power, ok = at.Engine().(Power); ok {
return power.PowScalar(at, bt, leftTensor, opts...)
}
if power, ok = bt.Engine().(Power); ok {
return power.PowScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Pow")
}
default:
if oe != nil {
return oe.PowScalar(at, bt, true, opts...)
}
if power, ok = at.Engine().(Power); ok {
return power.PowScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Pow")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.PowScalar(bt, at, false, opts...)
}
if power, ok = bt.Engine().(Power); ok {
return power.PowScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Pow")
default:
return nil, errors.Errorf("Cannot perform Pow of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Mod performs elementwise modulo on the Tensor(s). These operations are supported:
// Mod(*Dense, scalar)
// Mod(scalar, *Dense)
// Mod(*Dense, *Dense)
// If the Unsafe flag is passed in, the data of the first tensor will be overwritten
func Mod(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var moder Moder
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor modulo
if oe != nil {
return oe.Mod(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.Mod(at, bt, opts...)
}
if moder, ok = at.Engine().(Moder); ok {
return moder.Mod(at, bt, opts...)
}
if moder, ok = bt.Engine().(Moder); ok {
return moder.Mod(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support Mod")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.ModScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.ModScalar(at, bt, leftTensor, opts...)
}
if moder, ok = at.Engine().(Moder); ok {
return moder.ModScalar(at, bt, leftTensor, opts...)
}
if moder, ok = bt.Engine().(Moder); ok {
return moder.ModScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support Mod")
}
default:
if oe != nil {
return oe.ModScalar(at, bt, true, opts...)
}
if moder, ok = at.Engine().(Moder); ok {
return moder.ModScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support Mod")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.ModScalar(bt, at, false, opts...)
}
if moder, ok = bt.Engine().(Moder); ok {
return moder.ModScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support Mod")
default:
return nil, errors.Errorf("Cannot perform Mod of %T and %T", a, b)
}
}
panic("Unreachable")
}
// Dot is a highly opinionated API for performing dot product operations on two *Denses, a and b.
// This function is opinionated with regard to the vector operations because of how it treats operations with vectors.
// Vectors in this package comes in two flavours - column or row vectors. Column vectors have shape (x, 1), while row vectors have shape (1, x).
//
// As such, it is easy to assume that performing a linalg operation on vectors would follow the same rules (i.e shapes have to be aligned for things to work).
// For the most part in this package, this is true. This function is one of the few notable exceptions.
//
// Here I give three specific examples of how the expectations of vector operations will differ.
// Given two vectors, a, b with shapes (4, 1) and (4, 1), Dot() will perform an inner product as if the shapes were (1, 4) and (4, 1). This will result in a scalar value
// Given matrix A and vector b with shapes (2, 4) and (1, 4), Dot() will perform a matrix-vector multiplication as if the shapes were (2,4) and (4,1). This will result in a column vector with shape (2,1)
// Given vector a and matrix B with shapes (3, 1) and (3, 2), Dot() will perform a matrix-vector multiplication as if it were Bᵀ * a
//
// The main reason why this opinionated route was taken was due to the author's familiarity with NumPy, and general laziness in translating existing machine learning algorithms
// to fit the API of the package.
func Dot(x, y Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if xdottir, ok := x.Engine().(Dotter); ok {
return xdottir.Dot(x, y, opts...)
}
if ydottir, ok := y.Engine().(Dotter); ok {
return ydottir.Dot(x, y, opts...)
}
return nil, errors.New("Neither x's nor y's engines support Dot")
}
// FMA performs Y = A * X + Y.
func FMA(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) {
if xTensor, ok := x.(Tensor); ok {
if oe := a.standardEngine(); oe != nil {
return oe.FMA(a, xTensor, y)
}
if oe := xTensor.standardEngine(); oe != nil {
return oe.FMA(a, xTensor, y)
}
if oe := y.standardEngine(); oe != nil {
return oe.FMA(a, xTensor, y)
}
if e, ok := a.Engine().(FMAer); ok {
return e.FMA(a, xTensor, y)
}
if e, ok := xTensor.Engine().(FMAer); ok {
return e.FMA(a, xTensor, y)
}
if e, ok := y.Engine().(FMAer); ok {
return e.FMA(a, xTensor, y)
}
} else {
if oe := a.standardEngine(); oe != nil {
return oe.FMAScalar(a, x, y)
}
if oe := y.standardEngine(); oe != nil {
return oe.FMAScalar(a, x, y)
}
if e, ok := a.Engine().(FMAer); ok {
return e.FMAScalar(a, x, y)
}
if e, ok := y.Engine().(FMAer); ok {
return e.FMAScalar(a, x, y)
}
}
return Mul(a, x, WithIncr(y))
}
// MatMul performs matrix-matrix multiplication between two Tensors
func MatMul(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if a.Dtype() != b.Dtype() {
err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())
return
}
switch at := a.(type) {
case *Dense:
bt := b.(*Dense)
return at.MatMul(bt, opts...)
}
panic("Unreachable")
}
// MatVecMul performs matrix-vector multiplication between two Tensors. `a` is expected to be a matrix, and `b` is expected to be a vector
func MatVecMul(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if a.Dtype() != b.Dtype() {
err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())
return
}
switch at := a.(type) {
case *Dense:
bt := b.(*Dense)
return at.MatVecMul(bt, opts...)
}
panic("Unreachable")
}
// Inner finds the inner products of two vector Tensors. Both arguments to the functions are eexpected to be vectors.
func Inner(a, b Tensor) (retVal interface{}, err error) {
if a.Dtype() != b.Dtype() {
err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())
return
}
switch at := a.(type) {
case *Dense:
bt := b.(*Dense)
return at.Inner(bt)
}
panic("Unreachable")
}
// Outer performs the outer product of two vector Tensors. Both arguments to the functions are expected to be vectors.
func Outer(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if a.Dtype() != b.Dtype() {
err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())
return
}
switch at := a.(type) {
case *Dense:
bt := b.(*Dense)
return at.Outer(bt, opts...)
}
panic("Unreachable")
}
// Contract performs a contraction of given tensors along given axes
func Contract(a, b Tensor, aAxes, bAxes []int) (retVal Tensor, err error) {
if a.Dtype() != b.Dtype() {
err = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())
return
}
switch at := a.(type) {
case *Dense:
bt := b.(*Dense)
return at.TensorMul(bt, aAxes, bAxes)
default:
panic("Unreachable")
}
}
tensor-0.9.24/api_arith_generated_test.go 0000664 0000000 0000000 00000130033 14265126151 0020467 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
"testing/quick"
)
func TestAdd(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestSub(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b)
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestMul(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDiv(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b)
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestPow(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b)
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestAdd_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestSub_unsafe(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestMul_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDiv_unsafe(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestPow_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestAdd_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestSub_reuse(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestMul_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDiv_reuse(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestPow_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestAdd_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestSub_incr(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestMul_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDiv_incr(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestPow_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestAddScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(b, a)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Add(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Add (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Add(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Add (tensor as right, scalar as left) failed: %v", err)
}
}
func TestSubScalar(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b)
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(b, a)
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Sub(b, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Sub(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Sub (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Sub(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Sub (tensor as right, scalar as left) failed: %v", err)
}
}
func TestMulScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(b, a)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Mul(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Mul (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Mul(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Mul (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDivScalar(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b)
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Div(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Div (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Div(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Div (tensor as right, scalar as left) failed: %v", err)
}
}
func TestPowScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b)
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Pow(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Pow (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Pow(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Pow (tensor as right, scalar as left) failed: %v", err)
}
}
func TestAddScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(b, a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
}
func TestSubScalar_unsafe(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(b, a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Sub(b, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
}
func TestMulScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(b, a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDivScalar_unsafe(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
}
func TestPowScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
func TestAddScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(b, a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
}
func TestSubScalar_reuse(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(b, a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Sub(b, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
}
func TestMulScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(b, a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDivScalar_reuse(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
}
func TestPowScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
func TestAddScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := Add(b, a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
}
func TestSubScalar_incr(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := Sub(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Add(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
}
func TestMulScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := Mul(b, a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDivScalar_incr(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := Div(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
}
func TestPowScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := Pow(a, b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
tensor-0.9.24/api_arith_test.go 0000664 0000000 0000000 00000027457 14265126151 0016470 0 ustar 00root root 0000000 0000000 package tensor
import (
"log"
"math/rand"
"testing"
"testing/quick"
"time"
"github.com/stretchr/testify/assert"
)
// This file contains the tests for API functions that aren't generated by genlib
func TestMod(t *testing.T) {
a := New(WithBacking([]float64{1, 2, 3, 4}))
b := New(WithBacking([]float64{1, 1, 1, 1}))
var correct interface{} = []float64{0, 0, 0, 0}
// vec-vec
res, err := Mod(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar
if res, err = Mod(a, 1.0); err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestFMA(t *testing.T) {
same := func(q *Dense) bool {
a := q.Clone().(*Dense)
x := q.Clone().(*Dense)
y := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...))
y.Memset(identityVal(100, q.Dtype()))
WithEngine(q.Engine())(y)
y2 := y.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok1 := q.Engine().(FMAer)
_, ok2 := q.Engine().(Muler)
_, ok3 := q.Engine().(Adder)
we = we || (!ok1 && (!ok2 || !ok3))
f, err := FMA(a, x, y)
if err, retEarly := qcErrCheck(t, "FMA#1", a, x, we, err); retEarly {
if err != nil {
log.Printf("q.Engine() %T", q.Engine())
return false
}
return true
}
we, _ = willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
wi, err := Mul(a, x, WithIncr(y2))
if err, retEarly := qcErrCheck(t, "FMA#2", a, x, we, err); retEarly {
if err != nil {
return false
}
return true
}
return qcEqCheck(t, q.Dtype(), willFailEq, wi, f)
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(same, &quick.Config{Rand: r}); err != nil {
t.Error(err)
}
// specific engines
var eng Engine
// FLOAT64 ENGINE
// vec-vec
eng = Float64Engine{}
a := New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng))
x := New(WithBacking(Range(Float64, 1, 101)), WithEngine(eng))
y := New(Of(Float64), WithShape(100), WithEngine(eng))
f, err := FMA(a, x, y)
if err != nil {
t.Fatal(err)
}
a2 := New(WithBacking(Range(Float64, 0, 100)))
x2 := New(WithBacking(Range(Float64, 1, 101)))
y2 := New(Of(Float64), WithShape(100))
f2, err := Mul(a2, x2, WithIncr(y2))
if err != nil {
t.Fatal(err)
}
assert.Equal(t, f.Data(), f2.Data())
// vec-scalar
a = New(WithBacking(Range(Float64, 0, 100)), WithEngine(eng))
y = New(Of(Float64), WithShape(100))
if f, err = FMA(a, 2.0, y); err != nil {
t.Fatal(err)
}
a2 = New(WithBacking(Range(Float64, 0, 100)))
y2 = New(Of(Float64), WithShape(100))
if f2, err = Mul(a2, 2.0, WithIncr(y2)); err != nil {
t.Fatal(err)
}
assert.Equal(t, f.Data(), f2.Data())
// FLOAT32 engine
eng = Float32Engine{}
a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng))
x = New(WithBacking(Range(Float32, 1, 101)), WithEngine(eng))
y = New(Of(Float32), WithShape(100), WithEngine(eng))
f, err = FMA(a, x, y)
if err != nil {
t.Fatal(err)
}
a2 = New(WithBacking(Range(Float32, 0, 100)))
x2 = New(WithBacking(Range(Float32, 1, 101)))
y2 = New(Of(Float32), WithShape(100))
f2, err = Mul(a2, x2, WithIncr(y2))
if err != nil {
t.Fatal(err)
}
assert.Equal(t, f.Data(), f2.Data())
// vec-scalar
a = New(WithBacking(Range(Float32, 0, 100)), WithEngine(eng))
y = New(Of(Float32), WithShape(100))
if f, err = FMA(a, float32(2), y); err != nil {
t.Fatal(err)
}
a2 = New(WithBacking(Range(Float32, 0, 100)))
y2 = New(Of(Float32), WithShape(100))
if f2, err = Mul(a2, float32(2), WithIncr(y2)); err != nil {
t.Fatal(err)
}
assert.Equal(t, f.Data(), f2.Data())
}
func TestMulScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{2}))
b := New(WithBacking([]float64{3}))
var correct interface{} = 6.0
res, err := Mul(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Mul(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{3, 2}))
b = New(WithBacking([]float64{2}))
correct = []float64{6, 4}
res, err = Mul(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Mul(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{3, 5}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{21, 10}
res, err = Mul(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Mul(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Interface - tensor
ai := 2.0
b = NewDense(Float64, Shape{1, 1}, WithBacking([]float64{3}))
correct = []float64{6.0}
res, err = Mul(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Commutativity
res, err = Mul(b, ai)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestDivScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = 3.0
res, err := Div(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{6, 4}))
b = New(WithBacking([]float64{2}))
correct = []float64{3, 2}
res, err = Div(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{6}))
b = New(WithBacking([]float64{3, 2}))
correct = []float64{2, 3}
res, err = Div(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 10}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{3, 5}
res, err = Div(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// interface-scalar
ai := 6.0
b = New(WithBacking([]float64{2}))
correct = 3.0
res, err = Div(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-interface
a = New(WithBacking([]float64{6}))
bi := 2.0
correct = 3.0
res, err = Div(a, bi)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestAddScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{2}))
b := New(WithBacking([]float64{3}))
var correct interface{} = 5.0
res, err := Add(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Add(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{3, 2}))
b = New(WithBacking([]float64{2}))
correct = []float64{5, 4}
res, err = Add(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Add(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{3, 5}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{10, 7}
res, err = Add(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Add(b, a)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// interface-scalar
ai := 2.0
b = New(WithBacking([]float64{3}))
correct = 5.0
res, err = Add(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// Test commutativity
res, err = Add(b, ai)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestSubScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = 4.0
res, err := Sub(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{6, 4}))
b = New(WithBacking([]float64{2}))
correct = []float64{4, 2}
res, err = Sub(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{6}))
b = New(WithBacking([]float64{3, 2}))
correct = []float64{3, 4}
res, err = Sub(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 10}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{14, 8}
res, err = Sub(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// interface-scalar
ai := 6.0
b = New(WithBacking([]float64{2}))
correct = 4.0
res, err = Sub(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-interface
a = New(WithBacking([]float64{6}))
bi := 2.0
correct = 4.0
res, err = Sub(a, bi)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestModScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{5}))
b := New(WithBacking([]float64{2}))
var correct interface{} = 1.0
res, err := Mod(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{5, 4}))
b = New(WithBacking([]float64{2}))
correct = []float64{1, 0}
res, err = Mod(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{5}))
b = New(WithBacking([]float64{3, 2}))
correct = []float64{2, 1}
res, err = Mod(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{22, 10}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{1, 0}
res, err = Mod(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// interface-scalar
ai := 5.0
b = New(WithBacking([]float64{2}))
correct = 1.0
res, err = Mod(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-interface
a = New(WithBacking([]float64{5}))
bi := 2.0
correct = 1.0
res, err = Mod(a, bi)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestPowScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = 36.0
res, err := Pow(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{6, 4}))
b = New(WithBacking([]float64{2}))
correct = []float64{36, 16}
res, err = Pow(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{6}))
b = New(WithBacking([]float64{3, 2}))
correct = []float64{216, 36}
res, err = Pow(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{3, 10}))
b = New(WithBacking([]float64{7, 2}))
correct = []float64{2187, 100}
res, err = Pow(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// interface-scalar
ai := 6.0
b = New(WithBacking([]float64{2}))
correct = 36.0
res, err = Pow(ai, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-interface
a = New(WithBacking([]float64{6}))
bi := 2.0
correct = 36.0
res, err = Pow(a, bi)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
tensor-0.9.24/api_cmp.go 0000664 0000000 0000000 00000022601 14265126151 0015063 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
// public API for comparison ops
// Lt performs a elementwise less than comparison (a < b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func Lt(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var lter Lter
var ok bool
switch at := a.(type) {
case Tensor:
lter, ok = at.Engine().(Lter)
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison
if !ok {
if lter, ok = bt.Engine().(Lter); !ok {
return nil, errors.Errorf("Neither operands have engines that support Lt")
}
}
return lter.Lt(at, bt, opts...)
} else {
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if !ok {
return nil, errors.Errorf("Engine does not support Lt")
}
return lter.LtScalar(at, bt, leftTensor, opts...)
}
default:
if !ok {
return nil, errors.Errorf("Engine does not support Lt")
}
return lter.LtScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if lter, ok = bt.Engine().(Lter); !ok {
return nil, errors.Errorf("Engine does not support Lt")
}
return lter.LtScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform Lt on %T and %T", a, b)
}
}
}
// Gt performs a elementwise greater than comparison (a > b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func Gt(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var gter Gter
var ok bool
switch at := a.(type) {
case Tensor:
gter, ok = at.Engine().(Gter)
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison
if !ok {
if gter, ok = bt.Engine().(Gter); !ok {
return nil, errors.Errorf("Neither operands have engines that support Gt")
}
}
return gter.Gt(at, bt, opts...)
} else {
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if !ok {
return nil, errors.Errorf("Engine does not support Gt")
}
return gter.GtScalar(at, bt, leftTensor, opts...)
}
default:
if !ok {
return nil, errors.Errorf("Engine does not support Gt")
}
return gter.GtScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if gter, ok = bt.Engine().(Gter); !ok {
return nil, errors.Errorf("Engine does not support Gt")
}
return gter.GtScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform Gt on %T and %T", a, b)
}
}
}
// Lte performs a elementwise less than eq comparison (a <= b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func Lte(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var lteer Lteer
var ok bool
switch at := a.(type) {
case Tensor:
lteer, ok = at.Engine().(Lteer)
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison
if !ok {
if lteer, ok = bt.Engine().(Lteer); !ok {
return nil, errors.Errorf("Neither operands have engines that support Lte")
}
}
return lteer.Lte(at, bt, opts...)
} else {
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if !ok {
return nil, errors.Errorf("Engine does not support Lte")
}
return lteer.LteScalar(at, bt, leftTensor, opts...)
}
default:
if !ok {
return nil, errors.Errorf("Engine does not support Lte")
}
return lteer.LteScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if lteer, ok = bt.Engine().(Lteer); !ok {
return nil, errors.Errorf("Engine does not support Lte")
}
return lteer.LteScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform Lte on %T and %T", a, b)
}
}
}
// Gte performs a elementwise greater than eq comparison (a >= b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func Gte(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var gteer Gteer
var ok bool
switch at := a.(type) {
case Tensor:
gteer, ok = at.Engine().(Gteer)
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison
if !ok {
if gteer, ok = bt.Engine().(Gteer); !ok {
return nil, errors.Errorf("Neither operands have engines that support Gte")
}
}
return gteer.Gte(at, bt, opts...)
} else {
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if !ok {
return nil, errors.Errorf("Engine does not support Gte")
}
return gteer.GteScalar(at, bt, leftTensor, opts...)
}
default:
if !ok {
return nil, errors.Errorf("Engine does not support Gte")
}
return gteer.GteScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if gteer, ok = bt.Engine().(Gteer); !ok {
return nil, errors.Errorf("Engine does not support Gte")
}
return gteer.GteScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform Gte on %T and %T", a, b)
}
}
}
// ElEq performs a elementwise equality comparison (a == b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func ElEq(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var eleqer ElEqer
var ok bool
switch at := a.(type) {
case Tensor:
eleqer, ok = at.Engine().(ElEqer)
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor comparison
if !ok {
if eleqer, ok = bt.Engine().(ElEqer); !ok {
return nil, errors.Errorf("Neither operands have engines that support ElEq")
}
}
return eleqer.ElEq(at, bt, opts...)
} else {
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if !ok {
return nil, errors.Errorf("Engine does not support ElEq")
}
return eleqer.EqScalar(at, bt, leftTensor, opts...)
}
default:
if !ok {
return nil, errors.Errorf("Engine does not support ElEq")
}
return eleqer.EqScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if eleqer, ok = bt.Engine().(ElEqer); !ok {
return nil, errors.Errorf("Engine does not support ElEq")
}
return eleqer.EqScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform ElEq on %T and %T", a, b)
}
}
}
// ElNe performs a elementwise equality comparison (a != b). a and b can either be float64 or *Dense.
// It returns the same Tensor type as its input.
//
// If both operands are *Dense, shape is checked first.
// Even though the underlying data may have the same size (say (2,2) vs (4,1)), if they have different shapes, it will error out.
func ElNe(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var eleqer ElEqer
var ok bool
switch at := a.(type) {
case Tensor:
eleqer, ok = at.Engine().(ElEqer)
switch bt := b.(type) {
case Tensor:
if !ok {
if eleqer, ok = bt.Engine().(ElEqer); !ok {
return nil, errors.Errorf("Neither operands have engines that support ElEq")
}
}
return eleqer.ElNe(at, bt, opts...)
default:
if !ok {
return nil, errors.Errorf("Engine does not support ElEq")
}
return eleqer.NeScalar(at, bt, true, opts...)
}
default:
switch bt := b.(type) {
case Tensor:
if eleqer, ok = bt.Engine().(ElEqer); !ok {
return nil, errors.Errorf("Engine does not support ElEq")
}
return eleqer.NeScalar(bt, at, false, opts...)
default:
return nil, errors.Errorf("Unable to perform ElEq on %T and %T", a, b)
}
}
}
tensor-0.9.24/api_cmp_generated_test.go 0000664 0000000 0000000 00000104151 14265126151 0020141 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"reflect"
"testing"
"testing/quick"
)
func TestGt(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Gt(a, b)
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gt(b, c)
if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gt(a, c)
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestGte(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Gte(a, b)
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gte(b, c)
if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gte(a, c)
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestLt(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Lt(a, b)
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lt(b, c)
if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lt(a, c)
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestLte(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Lte(a, b)
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lte(b, c)
if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lte(a, c)
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestEq(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := ElEq(a, b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := ElEq(b, c)
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := ElEq(a, c)
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := ElEq(a, b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElEq(b, a)
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
}
func TestNe(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := ElNe(a, b)
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElNe(b, a)
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElNe failed: %v", err)
}
}
func TestGt_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Gt(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gt(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gt(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestGte_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Gte(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gte(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gte(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestLt_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Lt(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lt(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lt(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestLte_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := Lte(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lte(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lte(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestEq_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := ElEq(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := ElEq(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := ElEq(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := ElEq(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElEq(b, a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
}
func TestNe_assame(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := ElNe(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElNe(b, a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElNe failed: %v", err)
}
}
func TestGtScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Gt(a, b)
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gt(b, c)
if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gt(a, c)
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestGteScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Gte(a, b)
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gte(b, c)
if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gte(a, c)
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestLtScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Lt(a, b)
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lt(b, c)
if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lt(a, c)
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestLteScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Lte(a, b)
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lte(b, c)
if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lte(a, c)
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestEqScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := ElEq(a, b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := ElEq(b, c)
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := ElEq(a, c)
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.(*Dense).Bools()
bc := bxc.(*Dense).Bools()
ac := axc.(*Dense).Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := ElEq(a, b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElEq(b, a)
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElEq failed: %v", err)
}
}
func TestNeScalar(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := ElNe(a, b)
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElNe(b, a)
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElNe failed: %v", err)
}
}
func TestGtScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Gt(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gt(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gt(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestGteScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Gte(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Gte(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Gte(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestLtScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Lt(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lt(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lt(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestLteScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := Lte(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := Lte(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := Lte(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestEqScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := ElEq(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := ElEq(b, c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := ElEq(a, c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := ElEq(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElEq(b, a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElEq failed: %v", err)
}
}
func TestNeScalar_assame(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := ElNe(a, b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := ElNe(b, a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElNe failed: %v", err)
}
}
tensor-0.9.24/api_cmp_test.go 0000664 0000000 0000000 00000012067 14265126151 0016127 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
// This file contains the tests for API functions that aren't generated by genlib
func TestLtScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = false
res, err := Lt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{1, 4}))
b = New(WithBacking([]float64{2}))
correct = []bool{true, false}
res, err = Lt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{3}))
b = New(WithBacking([]float64{6, 2}))
correct = []bool{true, false}
res, err = Lt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 2}))
b = New(WithBacking([]float64{7, 10}))
correct = []bool{false, true}
res, err = Lt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestGtScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = true
res, err := Gt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{1, 4}))
b = New(WithBacking([]float64{2}))
correct = []bool{false, true}
res, err = Gt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{3}))
b = New(WithBacking([]float64{6, 2}))
correct = []bool{false, true}
res, err = Gt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 2}))
b = New(WithBacking([]float64{7, 10}))
correct = []bool{true, false}
res, err = Gt(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestLteScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = false
res, err := Lte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{1, 2, 4}))
b = New(WithBacking([]float64{2}))
correct = []bool{true, true, false}
res, err = Lte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{3}))
b = New(WithBacking([]float64{6, 2}))
correct = []bool{true, false}
res, err = Lte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 2}))
b = New(WithBacking([]float64{7, 10}))
correct = []bool{false, true}
res, err = Lte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestGteScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = true
res, err := Gte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{1, 2, 4}))
b = New(WithBacking([]float64{2}))
correct = []bool{false, true, true}
res, err = Gte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{3}))
b = New(WithBacking([]float64{6, 3, 2}))
correct = []bool{false, true, true}
res, err = Gte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 31, 2}))
b = New(WithBacking([]float64{7, 31, 10}))
correct = []bool{true, true, false}
res, err = Gte(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
func TestElEqScalarScalar(t *testing.T) {
// scalar-scalar
a := New(WithBacking([]float64{6}))
b := New(WithBacking([]float64{2}))
var correct interface{} = false
res, err := ElEq(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// scalar-tensor
a = New(WithBacking([]float64{1, 2, 4}))
b = New(WithBacking([]float64{2}))
correct = []bool{false, true, false}
res, err = ElEq(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor-scalar
a = New(WithBacking([]float64{3}))
b = New(WithBacking([]float64{6, 3, 2}))
correct = []bool{false, true, false}
res, err = ElEq(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
// tensor - tensor
a = New(WithBacking([]float64{21, 10}))
b = New(WithBacking([]float64{7, 10}))
correct = []bool{false, true}
res, err = ElEq(a, b)
if err != nil {
t.Fatalf("Error: %v", err)
}
assert.Equal(t, correct, res.Data())
}
tensor-0.9.24/api_matop.go 0000664 0000000 0000000 00000014671 14265126151 0015434 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
// this file handles matops. While by default most of these matops should already have been defined as part of the
// Tensor interface, not all are possible(for example, concatenating a sparse tensor), hence the need for the following functions
// Narrow narrows the tensor.
func Narrow(t Tensor, dim, start, length int) (View, error) {
dim = resolveAxis(dim, t.Dims())
slices := make([]Slice, MinInt(dim+1, t.Dims()))
slices[dim] = S(start, start+length, 1)
return t.Slice(slices...)
}
// Repeat repeats a Tensor along the axis and given the number of repeats.
func Repeat(t Tensor, axis int, repeats ...int) (retVal Tensor, err error) {
if r, ok := t.Engine().(Repeater); ok {
return r.Repeat(t, axis, repeats...)
}
return nil, errors.New("Engine does not support Repeat")
}
// RepeatReuse repeats a Tensor along the axis and the given number of repeats, and puts the results in the provided reuse tensor. If the reuse tensor is not correctly sized, then an error will be given, but the results will still be valid.
func RepeatReuse(t, reuse Tensor, axis int, repeats ...int) (retval Tensor, err error) {
if r, ok := t.Engine().(Repeater); ok {
return r.RepeatReuse(t, reuse, axis, repeats...)
}
return nil, errors.New("Engine does not support Repeat")
}
// T safely transposes a Tensor. It returns a tensor that is not a view of the input tensor - rather, the data is all copied.
func T(t Tensor, axes ...int) (retVal Tensor, err error) {
switch tt := t.(type) {
case *Dense:
return tt.SafeT(axes...)
}
panic("Unreachable")
}
// Transpose performs transposition of a tensor according to its axes.
func Transpose(t Tensor, axes ...int) (retVal Tensor, err error) {
switch tt := t.(type) {
case *Dense:
var ret *Dense
if ret, err = tt.SafeT(axes...); err != nil {
return
}
ret.Transpose()
retVal = ret
return
}
panic("Unreachable")
}
// Concat concatenates a list of Tensors. At the moment the operation only supports Tensors of the same type
// (*Dense can only be concatenated with a bunch of *Dense, CSCs can only be concatenated with a bunch of CSC, etc)
func Concat(axis int, t Tensor, others ...Tensor) (retVal Tensor, err error) {
if len(others) == 0 {
return t, nil
}
switch T := t.(type) {
case *Dense:
ts := make([]*Dense, len(others))
for i, o := range others {
if ot, ok := o.(*Dense); ok {
ts[i] = ot
continue
}
return nil, errors.Errorf("Expected all Tensors to be *Dense")
}
return T.Concat(axis, ts...)
}
panic("Unreachable")
}
// Copy copies a tensor to another. For *Dense views, only the relevant slots are copied.
func Copy(dst, src Tensor) error {
switch st := src.(type) {
case DenseTensor:
dt, ok := dst.(DenseTensor)
if !ok {
return errors.Errorf("Cannot copy from DenseTensor to %T", dst)
}
if st.RequiresIterator() || dt.RequiresIterator() {
siter := st.Iterator()
diter := dt.Iterator()
_, err := copyDenseIter(dt, st, diter, siter)
return err
}
copyDense(dt, st)
return nil
default:
return errors.Errorf("NYI for Copy %T", src)
}
panic("Unreachable")
}
// Stack stacks a list of other Tensors. At the moment the operation only supports Tensors of the same type.
// (*Dense can only be stacked with *Dense... etc)
func Stack(axis int, t Tensor, others ...Tensor) (retVal Tensor, err error) {
if len(others) == 0 {
return t, nil
}
switch T := t.(type) {
case DenseTensor:
var dts []DenseTensor
if dts, err = tensorsToDenseTensors(others); err != nil {
return nil, errors.Wrap(err, "Cannot convert others into a slice of DenseTensors")
}
return T.stackDense(axis, dts...)
}
panic("Unreachable")
}
// Materialize takes a View and copies out the data into a new allocation.
func Materialize(t Tensor) Tensor {
switch tt := t.(type) {
case View:
return tt.Materialize()
default:
return t
}
}
func Diag(t Tensor) (retVal Tensor, err error) {
if d, ok := t.Engine().(Diager); ok {
return d.Diag(t)
}
return nil, errors.Errorf("Unable to perform diagonalization of tensor ")
}
// ByIndices allows for selection of value of `a` byt the indices listed in the `indices` tensor.
// The `indices` tensor has to be a vector-like tensor of ints.
func ByIndices(a, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if axis >= a.Shape().Dims() {
return nil, errors.Errorf("Cannot select by indices on axis %d. Input only has %d dims", axis, a.Shape().Dims())
}
if sbi, ok := a.Engine().(ByIndiceser); ok {
return sbi.SelectByIndices(a, indices, axis, opts...)
}
return nil, errors.Errorf("Unable to select by indices. Engine %T does not support that.", a.Engine())
}
// ByIndicesB is the backpropagation of ByIndices.
func ByIndicesB(a, b, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if axis >= a.Shape().Dims() {
return nil, errors.Errorf("Cannot select by indices on axis %d. Input only has %d dims", axis, a.Shape().Dims())
}
if sbi, ok := a.Engine().(ByIndiceser); ok {
return sbi.SelectByIndicesB(a, b, indices, axis, opts...)
}
return nil, errors.Errorf("Unable to select by indices. Engine %T does not support that.", a.Engine())
}
// LogSoftMax applies log softmax to the given tensor.
func LogSoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if sm, ok := x.Engine().(SoftMaxer); ok {
return sm.LogSoftMax(x, axis, opts...)
}
return nil, errors.Errorf("Unable to apply LogSoftMax. Engine %T does not support that.", x.Engine())
}
// SoftMax applies softmax to the given tensor.
func SoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if sm, ok := x.Engine().(SoftMaxer); ok {
return sm.SoftMax(x, axis, opts...)
}
return nil, errors.Errorf("Unable to apply SoftMax. Engine %T does not support that.", x.Engine())
}
// SoftMaxB applies softmax backwards operation
func SoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if sm, ok := output.Engine().(SoftMaxer); ok {
return sm.SoftMaxB(output, grad, axis, opts...)
}
return nil, errors.Errorf("Unable to apply SoftMaxB. Engine %T does not support that.", output.Engine())
}
// LogSoftMaxB applies softmax backwards operation
func LogSoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if sm, ok := output.Engine().(SoftMaxer); ok {
return sm.LogSoftMaxB(output, grad, axis, opts...)
}
return nil, errors.Errorf("Unable to apply SoftMaxB. Engine %T does not support that.", output.Engine())
}
tensor-0.9.24/api_minmax.go 0000664 0000000 0000000 00000011471 14265126151 0015600 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
func MinBetween(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var minbetweener MinBetweener
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition
if oe != nil {
return oe.MinBetween(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.MinBetween(at, bt, opts...)
}
if minbetweener, ok = at.Engine().(MinBetweener); ok {
return minbetweener.MinBetween(at, bt, opts...)
}
if minbetweener, ok = bt.Engine().(MinBetweener); ok {
return minbetweener.MinBetween(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support MinBetween")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.MinBetweenScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.MinBetweenScalar(at, bt, leftTensor, opts...)
}
if minbetweener, ok = at.Engine().(MinBetweener); ok {
return minbetweener.MinBetweenScalar(at, bt, leftTensor, opts...)
}
if minbetweener, ok = bt.Engine().(MinBetweener); ok {
return minbetweener.MinBetweenScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support MinBetween")
}
default:
if oe != nil {
return oe.MinBetweenScalar(at, bt, true, opts...)
}
if minbetweener, ok = at.Engine().(MinBetweener); ok {
return minbetweener.MinBetweenScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support MinBetween")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.MinBetweenScalar(bt, at, false, opts...)
}
if minbetweener, ok = bt.Engine().(MinBetweener); ok {
return minbetweener.MinBetweenScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support MinBetween")
default:
return nil, errors.Errorf("Cannot perform MinBetween of %T and %T", a, b)
}
}
panic("Unreachable")
}
func MaxBetween(a, b interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var maxbetweener MaxBetweener
var oe standardEngine
var ok bool
switch at := a.(type) {
case Tensor:
oe = at.standardEngine()
switch bt := b.(type) {
case Tensor:
if !bt.Shape().IsScalar() && !at.Shape().IsScalar() { // non-scalar Tensor addition
if oe != nil {
return oe.MaxBetween(at, bt, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.MaxBetween(at, bt, opts...)
}
if maxbetweener, ok = at.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetween(at, bt, opts...)
}
if maxbetweener, ok = bt.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetween(at, bt, opts...)
}
return nil, errors.New("Neither engines of either operand support MaxBetween")
} else { // at least one of the operands is a scalar
var leftTensor bool
if !bt.Shape().IsScalar() {
leftTensor = false // a Scalar-Tensor * b Tensor
tmp := at
at = bt
bt = tmp
} else {
leftTensor = true // a Tensor * b Scalar-Tensor
}
if oe != nil {
return oe.MaxBetweenScalar(at, bt, leftTensor, opts...)
}
if oe = bt.standardEngine(); oe != nil {
return oe.MaxBetweenScalar(at, bt, leftTensor, opts...)
}
if maxbetweener, ok = at.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetweenScalar(at, bt, leftTensor, opts...)
}
if maxbetweener, ok = bt.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetweenScalar(at, bt, leftTensor, opts...)
}
return nil, errors.New("Neither engines of either operand support MaxBetween")
}
default:
if oe != nil {
return oe.MaxBetweenScalar(at, bt, true, opts...)
}
if maxbetweener, ok = at.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetweenScalar(at, bt, true, opts...)
}
return nil, errors.New("Operand A's engine does not support MaxBetween")
}
default:
switch bt := b.(type) {
case Tensor:
if oe = bt.standardEngine(); oe != nil {
return oe.MaxBetweenScalar(bt, at, false, opts...)
}
if maxbetweener, ok = bt.Engine().(MaxBetweener); ok {
return maxbetweener.MaxBetweenScalar(bt, at, false, opts...)
}
return nil, errors.New("Operand B's engine does not support MaxBetween")
default:
return nil, errors.Errorf("Cannot perform MaxBetween of %T and %T", a, b)
}
}
panic("Unreachable")
}
tensor-0.9.24/api_reduction.go 0000664 0000000 0000000 00000001510 14265126151 0016274 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
// Sum sums a Tensor along the given axes
func Sum(t Tensor, along ...int) (retVal Tensor, err error) {
if sumer, ok := t.Engine().(Sumer); ok {
return sumer.Sum(t, along...)
}
return nil, errors.New("Engine does not support Sum()")
}
// Argmax finds the index of the max value along the axis provided
func Argmax(t Tensor, axis int) (retVal Tensor, err error) {
if argmaxer, ok := t.Engine().(Argmaxer); ok {
return argmaxer.Argmax(t, axis)
}
return nil, errors.New("Engine does not support Argmax()")
}
// Argmin finds the index of the min value along the axis provided
func Argmin(t Tensor, axis int) (retVal Tensor, err error) {
if argminer, ok := t.Engine().(Argminer); ok {
return argminer.Argmin(t, axis)
}
return nil, errors.New("Engine does not support Argmax()")
}
tensor-0.9.24/api_unary.go 0000664 0000000 0000000 00000006523 14265126151 0015447 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import "github.com/pkg/errors"
func Neg(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if neger, ok := e.(Neger); ok {
return neger.Neg(a, opts...)
}
err = errors.Errorf("Engine does not perform Neg")
return
}
func Inv(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if inver, ok := e.(Inver); ok {
return inver.Inv(a, opts...)
}
err = errors.Errorf("Engine does not perform Inv")
return
}
func Square(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if squarer, ok := e.(Squarer); ok {
return squarer.Square(a, opts...)
}
err = errors.Errorf("Engine does not perform Square")
return
}
func Cube(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if cuber, ok := e.(Cuber); ok {
return cuber.Cube(a, opts...)
}
err = errors.Errorf("Engine does not perform Cube")
return
}
func Exp(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if exper, ok := e.(Exper); ok {
return exper.Exp(a, opts...)
}
err = errors.Errorf("Engine does not perform Exp")
return
}
func Tanh(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if tanher, ok := e.(Tanher); ok {
return tanher.Tanh(a, opts...)
}
err = errors.Errorf("Engine does not perform Tanh")
return
}
func Log(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if loger, ok := e.(Loger); ok {
return loger.Log(a, opts...)
}
err = errors.Errorf("Engine does not perform Log")
return
}
func Log2(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if log2er, ok := e.(Log2er); ok {
return log2er.Log2(a, opts...)
}
err = errors.Errorf("Engine does not perform Log2")
return
}
func Log10(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if log10er, ok := e.(Log10er); ok {
return log10er.Log10(a, opts...)
}
err = errors.Errorf("Engine does not perform Log10")
return
}
func Sqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if sqrter, ok := e.(Sqrter); ok {
return sqrter.Sqrt(a, opts...)
}
err = errors.Errorf("Engine does not perform Sqrt")
return
}
func Cbrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if cbrter, ok := e.(Cbrter); ok {
return cbrter.Cbrt(a, opts...)
}
err = errors.Errorf("Engine does not perform Cbrt")
return
}
func InvSqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if invsqrter, ok := e.(InvSqrter); ok {
return invsqrter.InvSqrt(a, opts...)
}
err = errors.Errorf("Engine does not perform InvSqrt")
return
}
func Abs(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if abser, ok := e.(Abser); ok {
return abser.Abs(a, opts...)
}
err = errors.Errorf("Engine does not perform Abs")
return
}
func Sign(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if signer, ok := e.(Signer); ok {
return signer.Sign(a, opts...)
}
err = errors.Errorf("Engine does not perform Sign")
return
}
func Clamp(a Tensor, min interface{}, max interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
e := a.Engine()
if clamper, ok := e.(Clamper); ok {
return clamper.Clamp(a, min, max, opts...)
}
err = errors.Errorf("Engine does not perform Clamp")
return
}
tensor-0.9.24/api_unary_generated_test.go 0000664 0000000 0000000 00000056413 14265126151 0020527 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
"testing/quick"
)
func TestNeg(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Neger)
we = we || !ok
ret, err := Neg(a)
if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Neg(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Neg failed: %v", err)
}
}
func TestSquare(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Squarer)
we = we || !ok
ret, err := Square(a)
if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Sqrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Square failed: %v", err)
}
}
func TestCube(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Cuber)
we = we || !ok
ret, err := Cube(a)
if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Cbrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cube failed: %v", err)
}
}
func TestExp(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Exper)
we = we || !ok
ret, err := Exp(a)
if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Log(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Exp failed: %v", err)
}
}
func TestLog(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Loger)
we = we || !ok
ret, err := Log(a)
if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Exp(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Log failed: %v", err)
}
}
func TestSqrt(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Sqrter)
we = we || !ok
ret, err := Sqrt(a)
if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Square(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Sqrt failed: %v", err)
}
}
func TestCbrt(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Cbrter)
we = we || !ok
ret, err := Cbrt(a)
if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Cube(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cbrt failed: %v", err)
}
}
func TestNeg_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Neger)
we = we || !ok
ret, err := Neg(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Neg(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Neg failed: %v", err)
}
}
func TestSquare_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Squarer)
we = we || !ok
ret, err := Square(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Sqrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Square failed: %v", err)
}
}
func TestCube_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Cuber)
we = we || !ok
ret, err := Cube(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Cbrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cube failed: %v", err)
}
}
func TestExp_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Exper)
we = we || !ok
ret, err := Exp(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Log(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Exp failed: %v", err)
}
}
func TestLog_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Loger)
we = we || !ok
ret, err := Log(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Exp(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Log failed: %v", err)
}
}
func TestSqrt_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Sqrter)
we = we || !ok
ret, err := Sqrt(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Square(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Sqrt failed: %v", err)
}
}
func TestCbrt_unsafe(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Cbrter)
we = we || !ok
ret, err := Cbrt(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Cube(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cbrt failed: %v", err)
}
}
func TestNeg_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Neger)
we = we || !ok
ret, err := Neg(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Neg(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Neg failed: %v", err)
}
}
func TestSquare_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Squarer)
we = we || !ok
ret, err := Square(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Sqrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Square failed: %v", err)
}
}
func TestCube_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Cuber)
we = we || !ok
ret, err := Cube(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
Cbrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cube failed: %v", err)
}
}
func TestExp_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Exper)
we = we || !ok
ret, err := Exp(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Log(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Exp failed: %v", err)
}
}
func TestLog_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Loger)
we = we || !ok
ret, err := Log(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Exp(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Log failed: %v", err)
}
}
func TestSqrt_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Sqrter)
we = we || !ok
ret, err := Sqrt(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Square(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Sqrt failed: %v", err)
}
}
func TestCbrt_reuse(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Cbrter)
we = we || !ok
ret, err := Cbrt(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Cube(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cbrt failed: %v", err)
}
}
func TestNeg_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Neger)
we = we || !ok
ret, err := Neg(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Neg", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Neg(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Neg failed: %v", err)
}
}
func TestSquare_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Squarer)
we = we || !ok
ret, err := Square(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Square", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatcmplxTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Sqrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Square failed: %v", err)
}
}
func TestCube_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Cuber)
we = we || !ok
ret, err := Cube(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Cube", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true // uninvertible due to type class implementation issues
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Cbrt(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cube failed: %v", err)
}
}
func TestExp_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Exper)
we = we || !ok
ret, err := Exp(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Exp", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Log(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Exp failed: %v", err)
}
}
func TestLog_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Loger)
we = we || !ok
ret, err := Log(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Log", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Exp(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Log failed: %v", err)
}
}
func TestSqrt_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, nil)
_, ok := q.Engine().(Sqrter)
we = we || !ok
ret, err := Sqrt(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Sqrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Square(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Sqrt failed: %v", err)
}
}
func TestCbrt_incr(t *testing.T) {
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Cbrter)
we = we || !ok
ret, err := Cbrt(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Cbrt", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Cube(ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(invFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv tests for Cbrt failed: %v", err)
}
}
tensor-0.9.24/api_unary_test.go 0000664 0000000 0000000 00000076663 14265126151 0016522 0 ustar 00root root 0000000 0000000 package tensor
import (
"math/rand"
"testing"
"testing/quick"
"time"
"math"
"github.com/stretchr/testify/assert"
"github.com/chewxy/math32"
)
/*
GENERATED FILE BY Genlib V1. DO NOT EDIT
*/
var clampTests = []struct {
a, reuse interface{}
min, max interface{}
correct interface{}
correctSliced interface{}
}{
{[]int{1, 2, 3, 4}, []int{10, 20, 30, 40}, int(2), int(3), []int{2, 2, 3, 3}, []int{2, 2, 3}},
{[]int8{1, 2, 3, 4}, []int8{10, 20, 30, 40}, int8(2), int8(3), []int8{2, 2, 3, 3}, []int8{2, 2, 3}},
{[]int16{1, 2, 3, 4}, []int16{10, 20, 30, 40}, int16(2), int16(3), []int16{2, 2, 3, 3}, []int16{2, 2, 3}},
{[]int32{1, 2, 3, 4}, []int32{10, 20, 30, 40}, int32(2), int32(3), []int32{2, 2, 3, 3}, []int32{2, 2, 3}},
{[]int64{1, 2, 3, 4}, []int64{10, 20, 30, 40}, int64(2), int64(3), []int64{2, 2, 3, 3}, []int64{2, 2, 3}},
{[]uint{1, 2, 3, 4}, []uint{10, 20, 30, 40}, uint(2), uint(3), []uint{2, 2, 3, 3}, []uint{2, 2, 3}},
{[]uint8{1, 2, 3, 4}, []uint8{10, 20, 30, 40}, uint8(2), uint8(3), []uint8{2, 2, 3, 3}, []uint8{2, 2, 3}},
{[]uint16{1, 2, 3, 4}, []uint16{10, 20, 30, 40}, uint16(2), uint16(3), []uint16{2, 2, 3, 3}, []uint16{2, 2, 3}},
{[]uint32{1, 2, 3, 4}, []uint32{10, 20, 30, 40}, uint32(2), uint32(3), []uint32{2, 2, 3, 3}, []uint32{2, 2, 3}},
{[]uint64{1, 2, 3, 4}, []uint64{10, 20, 30, 40}, uint64(2), uint64(3), []uint64{2, 2, 3, 3}, []uint64{2, 2, 3}},
{[]float32{1, 2, 3, 4}, []float32{10, 20, 30, 40}, float32(2), float32(3), []float32{2, 2, 3, 3}, []float32{2, 2, 3}},
{[]float64{1, 2, 3, 4}, []float64{10, 20, 30, 40}, float64(2), float64(3), []float64{2, 2, 3, 3}, []float64{2, 2, 3}},
}
var clampTestsMasked = []struct {
a, reuse interface{}
min, max interface{}
correct interface{}
correctSliced interface{}
}{
{[]int{1, 2, 3, 4}, []int{1, 20, 30, 40}, int(2), int(3), []int{1, 2, 3, 3}, []int{1, 2, 3}},
{[]int8{1, 2, 3, 4}, []int8{1, 20, 30, 40}, int8(2), int8(3), []int8{1, 2, 3, 3}, []int8{1, 2, 3}},
{[]int16{1, 2, 3, 4}, []int16{1, 20, 30, 40}, int16(2), int16(3), []int16{1, 2, 3, 3}, []int16{1, 2, 3}},
{[]int32{1, 2, 3, 4}, []int32{1, 20, 30, 40}, int32(2), int32(3), []int32{1, 2, 3, 3}, []int32{1, 2, 3}},
{[]int64{1, 2, 3, 4}, []int64{1, 20, 30, 40}, int64(2), int64(3), []int64{1, 2, 3, 3}, []int64{1, 2, 3}},
{[]uint{1, 2, 3, 4}, []uint{1, 20, 30, 40}, uint(2), uint(3), []uint{1, 2, 3, 3}, []uint{1, 2, 3}},
{[]uint8{1, 2, 3, 4}, []uint8{1, 20, 30, 40}, uint8(2), uint8(3), []uint8{1, 2, 3, 3}, []uint8{1, 2, 3}},
{[]uint16{1, 2, 3, 4}, []uint16{1, 20, 30, 40}, uint16(2), uint16(3), []uint16{1, 2, 3, 3}, []uint16{1, 2, 3}},
{[]uint32{1, 2, 3, 4}, []uint32{1, 20, 30, 40}, uint32(2), uint32(3), []uint32{1, 2, 3, 3}, []uint32{1, 2, 3}},
{[]uint64{1, 2, 3, 4}, []uint64{1, 20, 30, 40}, uint64(2), uint64(3), []uint64{1, 2, 3, 3}, []uint64{1, 2, 3}},
{[]float32{1, 2, 3, 4}, []float32{1, 20, 30, 40}, float32(2), float32(3), []float32{1, 2, 3, 3}, []float32{1, 2, 3}},
{[]float64{1, 2, 3, 4}, []float64{1, 20, 30, 40}, float64(2), float64(3), []float64{1, 2, 3, 3}, []float64{1, 2, 3}},
}
func TestClamp(t *testing.T) {
assert := assert.New(t)
var got, sliced Tensor
var T, reuse *Dense
var err error
for _, ct := range clampTests {
T = New(WithBacking(ct.a))
// safe
if got, err = Clamp(T, ct.min, ct.max); err != nil {
t.Error(err)
continue
}
if got == T {
t.Error("expected got != T")
continue
}
assert.Equal(ct.correct, got.Data())
// sliced safe
if sliced, err = T.Slice(makeRS(0, 3)); err != nil {
t.Error("Unable to slice T")
continue
}
if got, err = Clamp(sliced, ct.min, ct.max); err != nil {
t.Error(err)
continue
}
// reuse
reuse = New(WithBacking(ct.reuse))
if got, err = Clamp(T, ct.min, ct.max, WithReuse(reuse)); err != nil {
t.Error(err)
continue
}
if got != reuse {
t.Error("expected got == reuse")
continue
}
assert.Equal(ct.correct, got.Data())
// unsafe
if got, err = Clamp(T, ct.min, ct.max, UseUnsafe()); err != nil {
t.Error(err)
continue
}
if got != T {
t.Error("expected got == T")
continue
}
assert.Equal(ct.correct, got.Data())
}
}
func TestClampMasked(t *testing.T) {
assert := assert.New(t)
var got, sliced Tensor
var T, reuse *Dense
var err error
for _, ct := range clampTestsMasked {
T = New(WithBacking(ct.a, []bool{true, false, false, false}))
// safe
if got, err = Clamp(T, ct.min, ct.max); err != nil {
t.Error(err)
continue
}
if got == T {
t.Error("expected got != T")
continue
}
assert.Equal(ct.correct, got.Data())
// sliced safe
if sliced, err = T.Slice(makeRS(0, 3)); err != nil {
t.Error("Unable to slice T")
continue
}
if got, err = Clamp(sliced, ct.min, ct.max); err != nil {
t.Error(err)
continue
}
// reuse
reuse = New(WithBacking(ct.reuse, []bool{true, false, false, false}))
if got, err = Clamp(T, ct.min, ct.max, WithReuse(reuse)); err != nil {
t.Error(err)
continue
}
if got != reuse {
t.Error("expected got == reuse")
continue
}
assert.Equal(ct.correct, got.Data())
// unsafe
if got, err = Clamp(T, ct.min, ct.max, UseUnsafe()); err != nil {
t.Error(err)
continue
}
if got != T {
t.Error("expected got == T")
continue
}
assert.Equal(ct.correct, got.Data())
}
}
var signTests = []struct {
a, reuse interface{}
correct interface{}
correctSliced interface{}
}{
{[]int{0, 1, 2, -2, -1}, []int{100, 10, 20, 30, 40}, []int{0, 1, 1, -1, -1}, []int{0, 1, 1, -1}},
{[]int8{0, 1, 2, -2, -1}, []int8{100, 10, 20, 30, 40}, []int8{0, 1, 1, -1, -1}, []int8{0, 1, 1, -1}},
{[]int16{0, 1, 2, -2, -1}, []int16{100, 10, 20, 30, 40}, []int16{0, 1, 1, -1, -1}, []int16{0, 1, 1, -1}},
{[]int32{0, 1, 2, -2, -1}, []int32{100, 10, 20, 30, 40}, []int32{0, 1, 1, -1, -1}, []int32{0, 1, 1, -1}},
{[]int64{0, 1, 2, -2, -1}, []int64{100, 10, 20, 30, 40}, []int64{0, 1, 1, -1, -1}, []int64{0, 1, 1, -1}},
{[]float32{0, 1, 2, -2, -1}, []float32{100, 10, 20, 30, 40}, []float32{0, 1, 1, -1, -1}, []float32{0, 1, 1, -1}},
{[]float64{0, 1, 2, -2, -1}, []float64{100, 10, 20, 30, 40}, []float64{0, 1, 1, -1, -1}, []float64{0, 1, 1, -1}},
}
var signTestsMasked = []struct {
a, reuse interface{}
correct interface{}
// correctSliced interface{}
}{
{[]int{1, 2, -2, -1}, []int{10, 20, 30, 40}, []int{1, 1, -2, -1}},
{[]int8{1, 2, -2, -1}, []int8{10, 20, 30, 40}, []int8{1, 1, -2, -1}},
{[]int16{1, 2, -2, -1}, []int16{10, 20, 30, 40}, []int16{1, 1, -2, -1}},
{[]int32{1, 2, -2, -1}, []int32{10, 20, 30, 40}, []int32{1, 1, -2, -1}},
{[]int64{1, 2, -2, -1}, []int64{10, 20, 30, 40}, []int64{1, 1, -2, -1}},
{[]float32{1, 2, -2, -1}, []float32{10, 20, 30, 40}, []float32{1, 1, -2, -1}},
{[]float64{1, 2, -2, -1}, []float64{10, 20, 30, 40}, []float64{1, 1, -2, -1}},
}
func TestSign(t *testing.T) {
assert := assert.New(t)
var got, sliced Tensor
var T, reuse *Dense
var err error
for _, st := range signTests {
T = New(WithBacking(st.a))
// safe
if got, err = Sign(T); err != nil {
t.Error(err)
continue
}
if got == T {
t.Error("expected got != T")
continue
}
assert.Equal(st.correct, got.Data())
// sliced safe
if sliced, err = T.Slice(makeRS(0, 4)); err != nil {
t.Error("Unable to slice T")
continue
}
if got, err = Sign(sliced); err != nil {
t.Error(err)
continue
}
assert.Equal(st.correctSliced, got.Data())
// reuse
reuse = New(WithBacking(st.reuse))
if got, err = Sign(T, WithReuse(reuse)); err != nil {
t.Error(err)
continue
}
if got != reuse {
t.Error("expected got == reuse")
continue
}
assert.Equal(st.correct, got.Data())
// unsafe
if got, err = Sign(T, UseUnsafe()); err != nil {
t.Error(err)
continue
}
if got != T {
t.Error("expected got == T")
continue
}
assert.Equal(st.correct, got.Data())
}
}
func TestSignMasked(t *testing.T) {
assert := assert.New(t)
var got Tensor
var T, reuse *Dense
var err error
for _, st := range signTestsMasked {
T = New(WithBacking(st.a, []bool{false, false, true, false}))
// safe
if got, err = Sign(T); err != nil {
t.Error(err)
continue
}
if got == T {
t.Error("expected got != T")
continue
}
assert.Equal(st.correct, got.Data())
// reuse
reuse = New(WithBacking(st.reuse, []bool{false, false, true, false}))
if got, err = Sign(T, WithReuse(reuse)); err != nil {
t.Error(err)
continue
}
if got != reuse {
t.Error("expected got == reuse")
continue
}
assert.Equal(st.correct, got.Data())
// unsafe
if got, err = Sign(T, UseUnsafe()); err != nil {
t.Error(err)
continue
}
if got != T {
t.Error("expected got == T")
continue
}
assert.Equal(st.correct, got.Data())
}
}
var negTestsMasked = []struct {
a, reuse interface{}
correct interface{}
}{
{[]int{1, 2, -2, -1}, []int{10, 20, 30, 40}, []int{-1, -2, -2, 1}},
{[]int8{1, 2, -2, -1}, []int8{10, 20, 30, 40}, []int8{-1, -2, -2, 1}},
{[]int16{1, 2, -2, -1}, []int16{10, 20, 30, 40}, []int16{-1, -2, -2, 1}},
{[]int32{1, 2, -2, -1}, []int32{10, 20, 30, 40}, []int32{-1, -2, -2, 1}},
{[]int64{1, 2, -2, -1}, []int64{10, 20, 30, 40}, []int64{-1, -2, -2, 1}},
{[]float32{1, 2, -2, -1}, []float32{10, 20, 30, 40}, []float32{-1, -2, -2, 1}},
{[]float64{1, 2, -2, -1}, []float64{10, 20, 30, 40}, []float64{-1, -2, -2, 1}},
}
func TestNegMasked(t *testing.T) {
assert := assert.New(t)
var got Tensor
var T, reuse *Dense
var err error
for _, st := range negTestsMasked {
T = New(WithBacking(st.a, []bool{false, false, true, false}))
// safe
if got, err = Neg(T); err != nil {
t.Error(err)
continue
}
if got == T {
t.Error("expected got != T")
continue
}
assert.Equal(st.correct, got.Data())
// reuse
reuse = New(WithBacking(st.reuse, []bool{false, false, true, false}))
if got, err = Neg(T, WithReuse(reuse)); err != nil {
t.Error(err)
continue
}
if got != reuse {
t.Error("expected got == reuse")
continue
}
assert.Equal(st.correct, got.Data())
// unsafe
if got, err = Neg(T, UseUnsafe()); err != nil {
t.Error(err)
continue
}
if got != T {
t.Error("expected got == T")
continue
}
assert.Equal(st.correct, got.Data())
}
}
func TestInvSqrt(t *testing.T) {
var r *rand.Rand
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(InvSqrter)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := InvSqrt(a)
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Sqrt(b, UseUnsafe())
Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for InvSqrt failed: %v", err)
}
// unsafe
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(InvSqrter)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := InvSqrt(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Sqrt(b, UseUnsafe())
Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for InvSqrt failed: %v", err)
}
// reuse
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
reuse := q.Clone().(*Dense)
reuse.Zero()
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(InvSqrter)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := InvSqrt(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Sqrt(b, UseUnsafe())
Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Errorf("Expected ret to be the same as reuse")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests with reuse for InvSqrt failed: %v", err)
}
// incr
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(InvSqrter)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := InvSqrt(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Sqrt(b, UseUnsafe())
Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, b.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != incr {
t.Errorf("Expected ret to be the same as incr")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests with incr for InvSqrt failed: %v", err)
}
}
func TestInv(t *testing.T) {
var r *rand.Rand
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Inver)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Inv(a)
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Mul(ret, a, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for Inv failed: %v", err)
}
// unsafe
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Inver)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Inv(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Mul(ret, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Inv failed: %v", err)
}
// reuse
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
reuse := a.Clone().(*Dense)
reuse.Zero()
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Inver)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Inv(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
Mul(ret, a, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Errorf("Expected ret to be the same as reuse")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Inv failed: %v", err)
}
// incr
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Inver)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Inv(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Inv", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
Mul(ret, a, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != incr {
t.Errorf("Expected ret to be the same as incr")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Inv failed: %v", err)
}
}
func TestLog10(t *testing.T) {
var r *rand.Rand
// default
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log10er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log10(a)
if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
ten := identityVal(10, a.Dtype())
Pow(ten, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for Log10 failed: %v", err)
}
// unsafe
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log10er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log10(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
ten := identityVal(10, a.Dtype())
Pow(ten, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log10 failed: %v", err)
}
// reuse
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
reuse := a.Clone().(*Dense)
reuse.Zero()
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log10er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log10(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
ten := identityVal(10, a.Dtype())
Pow(ten, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Errorf("Expected ret to be the same as reuse")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log10 failed: %v", err)
}
// incr
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log10er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log10(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Log10", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
ten := identityVal(10, a.Dtype())
Pow(ten, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != incr {
t.Errorf("Expected ret to be the same as incr")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log10 failed: %v", err)
}
}
func TestAbs(t *testing.T) {
var r *rand.Rand
absFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
zeros := New(Of(q.Dtype()), WithShape(q.Shape().Clone()...))
correct := New(Of(Bool), WithShape(q.Shape().Clone()...))
correct.Memset(true)
// we'll exclude everything other than ordtypes because complex numbers cannot be abs'd
if err := typeclassCheck(a.Dtype(), ordTypes); err != nil {
return true
}
we, willFailEq := willerr(a, signedTypes, nil)
_, ok := q.Engine().(Abser)
we = we || !ok
ret, err := Abs(a)
if err, retEarly := qcErrCheck(t, "Abs", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
check, _ := Gte(ret, zeros)
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), check.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(absFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for Abs failed: %v", err)
}
}
func TestTanh(t *testing.T) {
var r *rand.Rand
// default
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Tanher)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Tanh(a)
if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
switch a.Dtype() {
case Float64:
if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
case Float32:
if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for Tanh failed: %v", err)
}
// unsafe
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Tanher)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Tanh(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
switch a.Dtype() {
case Float64:
if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
case Float32:
if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Tanh failed: %v", err)
}
// reuse
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
reuse := a.Clone().(*Dense)
reuse.Zero()
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Tanher)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Tanh(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
switch a.Dtype() {
case Float64:
if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
case Float32:
if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Errorf("Expected ret to be the same as reuse")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Tanh failed: %v", err)
}
// incr
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Tanher)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Tanh(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Tanh", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
switch a.Dtype() {
case Float64:
if ret, err = ret.Apply(math.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
case Float32:
if ret, err = ret.Apply(math32.Atan, UseUnsafe()); err != nil {
t.Error(err)
return false
}
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != incr {
t.Errorf("Expected ret to be the same as incr")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Tanh failed: %v", err)
}
}
func TestLog2(t *testing.T) {
var r *rand.Rand
// default
invFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log2er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log2(a)
if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
two := identityVal(2, a.Dtype())
Pow(two, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests for Log2 failed: %v", err)
}
// unsafe
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log2er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log2(a, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
two := identityVal(2, a.Dtype())
Pow(two, b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log2 failed: %v", err)
}
// reuse
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
correct := a.Clone().(*Dense)
reuse := a.Clone().(*Dense)
reuse.Zero()
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log2er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log2(a, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
two := identityVal(2, a.Dtype())
Pow(two, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Errorf("Expected ret to be the same as reuse")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log2 failed: %v", err)
}
// incr
invFn = func(q *Dense) bool {
a := q.Clone().(*Dense)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatTypes, nil)
_, ok := q.Engine().(Log2er)
we = we || !ok
// we'll exclude everything other than floats
if err := typeclassCheck(a.Dtype(), floatTypes); err != nil {
return true
}
ret, err := Log2(a, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Log2", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if ret, err = Sub(ret, identityVal(100, a.Dtype()), UseUnsafe()); err != nil {
t.Errorf("err while subtracting incr: %v", err)
return false
}
two := identityVal(2, a.Dtype())
Pow(two, ret, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != incr {
t.Errorf("Expected ret to be the same as incr")
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(invFn, &quick.Config{Rand: r}); err != nil {
t.Errorf("Inv tests using unsafe for Log2 failed: %v", err)
}
} tensor-0.9.24/api_utils.go 0000664 0000000 0000000 00000003762 14265126151 0015453 0 ustar 00root root 0000000 0000000 package tensor
import (
"log"
"math"
"math/rand"
"reflect"
"sort"
"github.com/chewxy/math32"
)
// SortIndex is similar to numpy's argsort
// TODO: tidy this up
func SortIndex(in interface{}) (out []int) {
switch list := in.(type) {
case []int:
orig := make([]int, len(list))
out = make([]int, len(list))
copy(orig, list)
sort.Ints(list)
for i, s := range list {
for j, o := range orig {
if o == s {
out[i] = j
break
}
}
}
case []float64:
orig := make([]float64, len(list))
out = make([]int, len(list))
copy(orig, list)
sort.Float64s(list)
for i, s := range list {
for j, o := range orig {
if o == s {
out[i] = j
break
}
}
}
case sort.Interface:
sort.Sort(list)
log.Printf("TODO: SortIndex for sort.Interface not yet done.")
}
return
}
// SampleIndex samples a slice or a Tensor.
// TODO: tidy this up.
func SampleIndex(in interface{}) int {
// var l int
switch list := in.(type) {
case []int:
var sum, i int
// l = len(list)
r := rand.Int()
for {
sum += list[i]
if sum > r && i > 0 {
return i
}
i++
}
case []float64:
var sum float64
var i int
// l = len(list)
r := rand.Float64()
for {
sum += list[i]
if sum > r && i > 0 {
return i
}
i++
}
case *Dense:
var i int
switch list.t.Kind() {
case reflect.Float64:
var sum float64
r := rand.Float64()
data := list.Float64s()
// l = len(data)
for {
datum := data[i]
if math.IsNaN(datum) || math.IsInf(datum, 0) {
return i
}
sum += datum
if sum > r && i > 0 {
return i
}
i++
}
case reflect.Float32:
var sum float32
r := rand.Float32()
data := list.Float32s()
// l = len(data)
for {
datum := data[i]
if math32.IsNaN(datum) || math32.IsInf(datum, 0) {
return i
}
sum += datum
if sum > r && i > 0 {
return i
}
i++
}
default:
panic("not yet implemented")
}
default:
panic("Not yet implemented")
}
return -1
}
tensor-0.9.24/array.go 0000664 0000000 0000000 00000024777 14265126151 0014611 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"reflect"
"sync"
"unsafe"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
// array is the underlying generic array.
type array struct {
storage.Header // the header - the Go representation (a slice)
t Dtype // the element type
}
// makeArray makes an array. The memory allocation is handled by Go
func makeArray(t Dtype, length int) array {
v := malloc(t, length)
hdr := storage.Header{
Raw: v,
}
return array{
Header: hdr,
t: t,
}
}
// arrayFromSlice creates an array from a slice. If x is not a slice, it will panic.
func arrayFromSlice(x interface{}) array {
xT := reflect.TypeOf(x)
if xT.Kind() != reflect.Slice {
panic("Expected a slice")
}
elT := xT.Elem()
return array{
Header: storage.Header{
Raw: storage.AsByteSlice(x),
},
t: Dtype{elT},
}
}
func (a *array) Len() int { return a.Header.TypedLen(a.t.Type) }
func (a *array) Cap() int { return a.Header.TypedLen(a.t.Type) }
// fromSlice populates the value from a slice
func (a *array) fromSlice(x interface{}) {
xT := reflect.TypeOf(x)
if xT.Kind() != reflect.Slice {
panic("Expected a slice")
}
elT := xT.Elem()
a.Raw = storage.AsByteSlice(x)
a.t = Dtype{elT}
}
// fromSliceOrTensor populates the value from a slice or anything that can form an array
func (a *array) fromSliceOrArrayer(x interface{}) {
if T, ok := x.(arrayer); ok {
xp := T.arrPtr()
// if the underlying array hasn't been allocated, or not enough has been allocated
if a.Header.Raw == nil {
a.Header.Raw = malloc(xp.t, xp.Len())
}
a.t = xp.t
copyArray(a, T.arrPtr())
return
}
a.fromSlice(x)
}
// byteSlice casts the underlying slice into a byte slice. Useful for copying and zeroing, but not much else
func (a array) byteSlice() []byte { return a.Header.Raw }
// sliceInto creates a slice. Instead of returning an array, which would cause a lot of reallocations, sliceInto expects a array to
// already have been created. This allows repetitive actions to be done without having to have many pointless allocation
func (a *array) sliceInto(i, j int, res *array) {
c := a.Cap()
if i < 0 || j < i || j > c {
panic(fmt.Sprintf("Cannot slice %v - index %d:%d is out of bounds", a, i, j))
}
s := i * int(a.t.Size())
e := j * int(a.t.Size())
c = c - i
res.Raw = a.Raw[s:e]
}
// slice slices an array
func (a array) slice(start, end int) array {
if end > a.Len() {
panic("Index out of range")
}
if end < start {
panic("Index out of range")
}
s := start * int(a.t.Size())
e := end * int(a.t.Size())
return array{
Header: storage.Header{Raw: a.Raw[s:e]},
t: a.t,
}
}
// swap swaps the elements i and j in the array
func (a *array) swap(i, j int) {
if a.t == String {
ss := a.hdr().Strings()
ss[i], ss[j] = ss[j], ss[i]
return
}
if !isParameterizedKind(a.t.Kind()) {
switch a.t.Size() {
case 8:
us := a.hdr().Uint64s()
us[i], us[j] = us[j], us[i]
case 4:
us := a.hdr().Uint32s()
us[i], us[j] = us[j], us[i]
case 2:
us := a.hdr().Uint16s()
us[i], us[j] = us[j], us[i]
case 1:
us := a.hdr().Uint8s()
us[i], us[j] = us[j], us[i]
}
return
}
size := int(a.t.Size())
tmp := make([]byte, size)
bs := a.byteSlice()
is := i * size
ie := is + size
js := j * size
je := js + size
copy(tmp, bs[is:ie])
copy(bs[is:ie], bs[js:je])
copy(bs[js:je], tmp)
}
/* *Array is a Memory */
// Uintptr returns the pointer of the first value of the slab
func (a *array) Uintptr() uintptr { return uintptr(unsafe.Pointer(&a.Header.Raw[0])) }
// MemSize returns how big the slice is in bytes
func (a *array) MemSize() uintptr { return uintptr(len(a.Header.Raw)) }
// Data returns the representation of a slice.
func (a array) Data() interface{} {
// build a type of []T
shdr := reflect.SliceHeader{
Data: a.Uintptr(),
Len: a.Len(),
Cap: a.Cap(),
}
sliceT := reflect.SliceOf(a.t.Type)
ptr := unsafe.Pointer(&shdr)
val := reflect.Indirect(reflect.NewAt(sliceT, ptr))
return val.Interface()
}
// Zero zeroes out the underlying array of the *Dense tensor.
func (a array) Zero() {
if a.t.Kind() == reflect.String {
ss := a.Strings()
for i := range ss {
ss[i] = ""
}
return
}
if !isParameterizedKind(a.t.Kind()) {
ba := a.byteSlice()
for i := range ba {
ba[i] = 0
}
return
}
l := a.Len()
for i := 0; i < l; i++ {
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
val.Set(reflect.Zero(a.t))
}
}
func (a *array) hdr() *storage.Header { return &a.Header }
func (a *array) rtype() reflect.Type { return a.t.Type }
/* MEMORY MOVEMENT STUFF */
// malloc is standard Go allocation of a block of memory - the plus side is that Go manages the memory
func malloc(t Dtype, length int) []byte {
size := int(calcMemSize(t, length))
return make([]byte, size)
}
// calcMemSize calulates the memory size of an array (given its size)
func calcMemSize(dt Dtype, size int) int64 {
return int64(dt.Size()) * int64(size)
}
// copyArray copies an array.
func copyArray(dst, src *array) int {
if dst.t != src.t {
panic("Cannot copy arrays of different types.")
}
return storage.Copy(dst.t.Type, &dst.Header, &src.Header)
}
func copyArraySliced(dst array, dstart, dend int, src array, sstart, send int) int {
if dst.t != src.t {
panic("Cannot copy arrays of different types.")
}
return storage.CopySliced(dst.t.Type, &dst.Header, dstart, dend, &src.Header, sstart, send)
}
// copyDense copies a DenseTensor
func copyDense(dst, src DenseTensor) int {
if dst.Dtype() != src.Dtype() {
panic("Cannot dopy DenseTensors of different types")
}
if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() {
if md, ok := dst.(MaskedTensor); ok {
dmask := md.Mask()
smask := ms.Mask()
if cap(dmask) < len(smask) {
dmask = make([]bool, len(smask))
copy(dmask, md.Mask())
md.SetMask(dmask)
}
copy(dmask, smask)
}
}
e := src.Engine()
if err := e.Memcpy(dst.arrPtr(), src.arrPtr()); err != nil {
panic(err)
}
return dst.len()
// return copyArray(dst.arr(), src.arr())
}
// copyDenseSliced copies a DenseTensor, but both are sliced
func copyDenseSliced(dst DenseTensor, dstart, dend int, src DenseTensor, sstart, send int) int {
if dst.Dtype() != src.Dtype() {
panic("Cannot copy DenseTensors of different types")
}
if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() {
if md, ok := dst.(MaskedTensor); ok {
dmask := md.Mask()
smask := ms.Mask()
if cap(dmask) < dend {
dmask = make([]bool, dend)
copy(dmask, md.Mask())
md.SetMask(dmask)
}
copy(dmask[dstart:dend], smask[sstart:send])
}
}
if e := src.Engine(); e != nil {
darr := dst.arr()
sarr := src.arr()
da := darr.slice(dstart, dend)
sa := sarr.slice(sstart, send)
switch e.(type) {
case NonStdEngine:
if err := e.Memcpy(&da, &sa); err != nil {
panic(err)
}
default:
// THIS IS AN OPTIMIZATION. REVISIT WHEN NEEDED.
//
// THE PURPOSE of this optimization is to make this perform better under
// default circumstances.
//
// The original code simply uses t.Engine().Memcpy(&dSlice, &tSlice).
// A variant can still be seen in the NonStdEngine case above.
//
// The `array.slice()` method has been optimized to return `array2`, which is a
// non-heap allocated type.
// a value of `array2` cannot have its address taken - e.g.
// var a array2
// doSomething(&a) // ← this cannot be done
//
// We *could* make `array2` implement Memory. But then a lot of runtime.convT2I and
// runtime.convI2T would be called. Which defeats the purpose of making things fast.
//
// So instead, we check to see if the Engine uses standard allocation methods.
// Typically this means `StdEng`.
//
// If so, we directly use storage.Copy instead of using the engine
storage.Copy(da.t.Type, &da.Header, &sa.Header)
}
return da.Len()
}
return copyArraySliced(dst.arr(), dstart, dend, src.arr(), sstart, send)
}
// copyDenseIter copies a DenseTensor, with iterator
func copyDenseIter(dst, src DenseTensor, diter, siter Iterator) (int, error) {
if dst.Dtype() != src.Dtype() {
panic("Cannot copy Dense arrays of different types")
}
// if they all don't need iterators, and have the same data order
if !dst.RequiresIterator() && !src.RequiresIterator() && dst.DataOrder().HasSameOrder(src.DataOrder()) {
return copyDense(dst, src), nil
}
if !dst.IsNativelyAccessible() {
return 0, errors.Errorf(inaccessibleData, dst)
}
if !src.IsNativelyAccessible() {
return 0, errors.Errorf(inaccessibleData, src)
}
if diter == nil {
diter = FlatIteratorFromDense(dst)
}
if siter == nil {
siter = FlatIteratorFromDense(src)
}
// if it's a masked tensor, we copy the mask as well
if ms, ok := src.(MaskedTensor); ok && ms.IsMasked() {
if md, ok := dst.(MaskedTensor); ok {
dmask := md.Mask()
smask := ms.Mask()
if cap(dmask) < len(smask) {
dmask = make([]bool, len(smask))
copy(dmask, md.Mask())
md.SetMask(dmask)
}
copy(dmask, smask)
}
}
return storage.CopyIter(dst.rtype(), dst.hdr(), src.hdr(), diter, siter), nil
}
type scalarPtrCount struct {
Ptr unsafe.Pointer
Count int
}
// scalarRCLock is a lock for the reference counting list.
var scalarRCLock sync.Mutex
// scalarRC is a bunch of reference counted pointers to scalar values
var scalarRC = make(map[uintptr]*sync.Pool) // uintptr is the size, the pool stores []byte
func scalarPool(size uintptr) *sync.Pool {
scalarRCLock.Lock()
pool, ok := scalarRC[size]
if !ok {
pool = &sync.Pool{
New: func() interface{} { return make([]byte, size) },
}
scalarRC[size] = pool
}
scalarRCLock.Unlock()
return pool
}
func allocScalar(a interface{}) []byte {
atype := reflect.TypeOf(a)
size := atype.Size()
pool := scalarPool(size)
return pool.Get().([]byte)
}
func freeScalar(bs []byte) {
if bs == nil {
return
}
// zero out
for i := range bs {
bs[i] = 0
}
size := uintptr(len(bs))
// put it back into pool
pool := scalarPool(size)
pool.Put(bs)
}
// scalarToHeader creates a Header from a scalar value
func scalarToHeader(a interface{}) (hdr *storage.Header, newAlloc bool) {
var raw []byte
switch at := a.(type) {
case Memory:
raw = storage.FromMemory(at.Uintptr(), at.MemSize())
default:
raw = allocScalar(a)
newAlloc = true
}
hdr = borrowHeader()
hdr.Raw = raw
if newAlloc {
copyScalarToPrealloc(a, hdr.Raw)
}
return hdr, newAlloc
}
func copyScalarToPrealloc(a interface{}, bs []byte) {
xV := reflect.ValueOf(a)
xT := reflect.TypeOf(a)
p := unsafe.Pointer(&bs[0])
v := reflect.NewAt(xT, p)
reflect.Indirect(v).Set(xV)
return
}
tensor-0.9.24/array_getset.go 0000664 0000000 0000000 00000036005 14265126151 0016147 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"reflect"
"unsafe"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
// Set sets the value of the underlying array at the index i.
func (a *array) Set(i int, x interface{}) {
switch a.t.Kind() {
case reflect.Bool:
xv := x.(bool)
a.SetB(i, xv)
case reflect.Int:
xv := x.(int)
a.SetI(i, xv)
case reflect.Int8:
xv := x.(int8)
a.SetI8(i, xv)
case reflect.Int16:
xv := x.(int16)
a.SetI16(i, xv)
case reflect.Int32:
xv := x.(int32)
a.SetI32(i, xv)
case reflect.Int64:
xv := x.(int64)
a.SetI64(i, xv)
case reflect.Uint:
xv := x.(uint)
a.SetU(i, xv)
case reflect.Uint8:
xv := x.(uint8)
a.SetU8(i, xv)
case reflect.Uint16:
xv := x.(uint16)
a.SetU16(i, xv)
case reflect.Uint32:
xv := x.(uint32)
a.SetU32(i, xv)
case reflect.Uint64:
xv := x.(uint64)
a.SetU64(i, xv)
case reflect.Uintptr:
xv := x.(uintptr)
a.SetUintptr(i, xv)
case reflect.Float32:
xv := x.(float32)
a.SetF32(i, xv)
case reflect.Float64:
xv := x.(float64)
a.SetF64(i, xv)
case reflect.Complex64:
xv := x.(complex64)
a.SetC64(i, xv)
case reflect.Complex128:
xv := x.(complex128)
a.SetC128(i, xv)
case reflect.String:
xv := x.(string)
a.SetStr(i, xv)
case reflect.UnsafePointer:
xv := x.(unsafe.Pointer)
a.SetUnsafePointer(i, xv)
default:
xv := reflect.ValueOf(x)
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
val.Set(xv)
}
}
// Get returns the ith element of the underlying array of the *Dense tensor.
func (a *array) Get(i int) interface{} {
switch a.t.Kind() {
case reflect.Bool:
return a.GetB(i)
case reflect.Int:
return a.GetI(i)
case reflect.Int8:
return a.GetI8(i)
case reflect.Int16:
return a.GetI16(i)
case reflect.Int32:
return a.GetI32(i)
case reflect.Int64:
return a.GetI64(i)
case reflect.Uint:
return a.GetU(i)
case reflect.Uint8:
return a.GetU8(i)
case reflect.Uint16:
return a.GetU16(i)
case reflect.Uint32:
return a.GetU32(i)
case reflect.Uint64:
return a.GetU64(i)
case reflect.Uintptr:
return a.GetUintptr(i)
case reflect.Float32:
return a.GetF32(i)
case reflect.Float64:
return a.GetF64(i)
case reflect.Complex64:
return a.GetC64(i)
case reflect.Complex128:
return a.GetC128(i)
case reflect.String:
return a.GetStr(i)
case reflect.UnsafePointer:
return a.GetUnsafePointer(i)
default:
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
return val.Interface()
}
}
// Memset sets all values in the array.
func (a *array) Memset(x interface{}) error {
switch a.t {
case Bool:
if xv, ok := x.(bool); ok {
data := a.Bools()
for i := range data {
data[i] = xv
}
return nil
}
case Int:
if xv, ok := x.(int); ok {
data := a.Ints()
for i := range data {
data[i] = xv
}
return nil
}
case Int8:
if xv, ok := x.(int8); ok {
data := a.Int8s()
for i := range data {
data[i] = xv
}
return nil
}
case Int16:
if xv, ok := x.(int16); ok {
data := a.Int16s()
for i := range data {
data[i] = xv
}
return nil
}
case Int32:
if xv, ok := x.(int32); ok {
data := a.Int32s()
for i := range data {
data[i] = xv
}
return nil
}
case Int64:
if xv, ok := x.(int64); ok {
data := a.Int64s()
for i := range data {
data[i] = xv
}
return nil
}
case Uint:
if xv, ok := x.(uint); ok {
data := a.Uints()
for i := range data {
data[i] = xv
}
return nil
}
case Uint8:
if xv, ok := x.(uint8); ok {
data := a.Uint8s()
for i := range data {
data[i] = xv
}
return nil
}
case Uint16:
if xv, ok := x.(uint16); ok {
data := a.Uint16s()
for i := range data {
data[i] = xv
}
return nil
}
case Uint32:
if xv, ok := x.(uint32); ok {
data := a.Uint32s()
for i := range data {
data[i] = xv
}
return nil
}
case Uint64:
if xv, ok := x.(uint64); ok {
data := a.Uint64s()
for i := range data {
data[i] = xv
}
return nil
}
case Uintptr:
if xv, ok := x.(uintptr); ok {
data := a.Uintptrs()
for i := range data {
data[i] = xv
}
return nil
}
case Float32:
if xv, ok := x.(float32); ok {
data := a.Float32s()
for i := range data {
data[i] = xv
}
return nil
}
case Float64:
if xv, ok := x.(float64); ok {
data := a.Float64s()
for i := range data {
data[i] = xv
}
return nil
}
case Complex64:
if xv, ok := x.(complex64); ok {
data := a.Complex64s()
for i := range data {
data[i] = xv
}
return nil
}
case Complex128:
if xv, ok := x.(complex128); ok {
data := a.Complex128s()
for i := range data {
data[i] = xv
}
return nil
}
case String:
if xv, ok := x.(string); ok {
data := a.Strings()
for i := range data {
data[i] = xv
}
return nil
}
case UnsafePointer:
if xv, ok := x.(unsafe.Pointer); ok {
data := a.UnsafePointers()
for i := range data {
data[i] = xv
}
return nil
}
}
xv := reflect.ValueOf(x)
l := a.Len()
for i := 0; i < l; i++ {
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
val.Set(xv)
}
return nil
}
func (a *array) memsetIter(x interface{}, it Iterator) (err error) {
var i int
switch a.t {
case Bool:
xv, ok := x.(bool)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Bools()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Int:
xv, ok := x.(int)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Ints()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Int8:
xv, ok := x.(int8)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Int8s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Int16:
xv, ok := x.(int16)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Int16s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Int32:
xv, ok := x.(int32)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Int32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Int64:
xv, ok := x.(int64)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Int64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uint:
xv, ok := x.(uint)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uints()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uint8:
xv, ok := x.(uint8)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uint8s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uint16:
xv, ok := x.(uint16)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uint16s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uint32:
xv, ok := x.(uint32)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uint32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uint64:
xv, ok := x.(uint64)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uint64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Uintptr:
xv, ok := x.(uintptr)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Uintptrs()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Float32:
xv, ok := x.(float32)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Float32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Float64:
xv, ok := x.(float64)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Float64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Complex64:
xv, ok := x.(complex64)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Complex64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case Complex128:
xv, ok := x.(complex128)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Complex128s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case String:
xv, ok := x.(string)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.Strings()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
case UnsafePointer:
xv, ok := x.(unsafe.Pointer)
if !ok {
return errors.Errorf(dtypeMismatch, a.t, x)
}
data := a.UnsafePointers()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = xv
}
err = handleNoOp(err)
default:
xv := reflect.ValueOf(x)
for i, err = it.Next(); err == nil; i, err = it.Next() {
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
val.Set(xv)
}
err = handleNoOp(err)
}
return
}
// Eq checks that any two arrays are equal
func (a array) Eq(other interface{}) bool {
if oa, ok := other.(*array); ok {
if oa.t != a.t {
return false
}
if oa.Len() != a.Len() {
return false
}
/*
if oa.C != a.C {
return false
}
*/
// same exact thing
if uintptr(unsafe.Pointer(&oa.Header.Raw[0])) == uintptr(unsafe.Pointer(&a.Header.Raw[0])) {
return true
}
switch a.t.Kind() {
case reflect.Bool:
for i, v := range a.Bools() {
if oa.GetB(i) != v {
return false
}
}
case reflect.Int:
for i, v := range a.Ints() {
if oa.GetI(i) != v {
return false
}
}
case reflect.Int8:
for i, v := range a.Int8s() {
if oa.GetI8(i) != v {
return false
}
}
case reflect.Int16:
for i, v := range a.Int16s() {
if oa.GetI16(i) != v {
return false
}
}
case reflect.Int32:
for i, v := range a.Int32s() {
if oa.GetI32(i) != v {
return false
}
}
case reflect.Int64:
for i, v := range a.Int64s() {
if oa.GetI64(i) != v {
return false
}
}
case reflect.Uint:
for i, v := range a.Uints() {
if oa.GetU(i) != v {
return false
}
}
case reflect.Uint8:
for i, v := range a.Uint8s() {
if oa.GetU8(i) != v {
return false
}
}
case reflect.Uint16:
for i, v := range a.Uint16s() {
if oa.GetU16(i) != v {
return false
}
}
case reflect.Uint32:
for i, v := range a.Uint32s() {
if oa.GetU32(i) != v {
return false
}
}
case reflect.Uint64:
for i, v := range a.Uint64s() {
if oa.GetU64(i) != v {
return false
}
}
case reflect.Uintptr:
for i, v := range a.Uintptrs() {
if oa.GetUintptr(i) != v {
return false
}
}
case reflect.Float32:
for i, v := range a.Float32s() {
if oa.GetF32(i) != v {
return false
}
}
case reflect.Float64:
for i, v := range a.Float64s() {
if oa.GetF64(i) != v {
return false
}
}
case reflect.Complex64:
for i, v := range a.Complex64s() {
if oa.GetC64(i) != v {
return false
}
}
case reflect.Complex128:
for i, v := range a.Complex128s() {
if oa.GetC128(i) != v {
return false
}
}
case reflect.String:
for i, v := range a.Strings() {
if oa.GetStr(i) != v {
return false
}
}
case reflect.UnsafePointer:
for i, v := range a.UnsafePointers() {
if oa.GetUnsafePointer(i) != v {
return false
}
}
default:
for i := 0; i < a.Len(); i++ {
if !reflect.DeepEqual(a.Get(i), oa.Get(i)) {
return false
}
}
}
return true
}
return false
}
func (a *array) zeroIter(it Iterator) (err error) {
var i int
switch a.t {
case Bool:
data := a.Bools()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = false
}
err = handleNoOp(err)
case Int:
data := a.Ints()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Int8:
data := a.Int8s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Int16:
data := a.Int16s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Int32:
data := a.Int32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Int64:
data := a.Int64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uint:
data := a.Uints()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uint8:
data := a.Uint8s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uint16:
data := a.Uint16s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uint32:
data := a.Uint32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uint64:
data := a.Uint64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Uintptr:
data := a.Uintptrs()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Float32:
data := a.Float32s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Float64:
data := a.Float64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Complex64:
data := a.Complex64s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case Complex128:
data := a.Complex128s()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = 0
}
err = handleNoOp(err)
case String:
data := a.Strings()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = ""
}
err = handleNoOp(err)
case UnsafePointer:
data := a.UnsafePointers()
for i, err = it.Next(); err == nil; i, err = it.Next() {
data[i] = nil
}
err = handleNoOp(err)
default:
for i, err = it.Next(); err == nil; i, err = it.Next() {
val := reflect.NewAt(a.t.Type, storage.ElementAt(i, unsafe.Pointer(&a.Header.Raw[0]), a.t.Size()))
val = reflect.Indirect(val)
val.Set(reflect.Zero(a.t))
}
err = handleNoOp(err)
}
return
}
tensor-0.9.24/bench.sh 0000775 0000000 0000000 00000001015 14265126151 0014536 0 ustar 00root root 0000000 0000000 #!/bin/sh
old=$1;
new=$2;
git checkout $old
# https://stackoverflow.com/a/2111099
branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
echo "Benchmarking $branch (old)"
go test -run=$^ -bench=. > ${branch}.bench
for i in {1..10}
do
go test -run=$^ -bench=. >> ${branch}.bench
done
git checkout $new
branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
echo "Benchmarking $branch (new)"
go test -run=$^ -bench=. > ${branch}.bench
for i in {1..10}
do
go test -run=$^ -bench=. >> ${branch}.bench
done
tensor-0.9.24/benchmark_dense_arith_test.go 0000664 0000000 0000000 00000004253 14265126151 0021014 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"gorgonia.org/vecf64"
)
func BenchmarkDense_Mul_Unsafe(b *testing.B) {
A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2)))
B := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 1, (100*100*2)+1)))
for i := 0; i < b.N; i++ {
A.Mul(B, UseUnsafe())
}
}
func BenchmarkNative_Mul_Unsafe(b *testing.B) {
A := Range(Float64, 0, 100*100*2).([]float64)
B := Range(Float64, 1, (100*100*2)+1).([]float64)
f := func(a, b []float64) {
for i, v := range a {
a[i] = v * b[i]
}
}
for i := 0; i < b.N; i++ {
f(A, B)
}
}
func BenchmarkNative_Mul_Unsafe_vec(b *testing.B) {
A := Range(Float64, 0, 100*100*2).([]float64)
B := Range(Float64, 1, (100*100*2)+1).([]float64)
for i := 0; i < b.N; i++ {
vecf64.Mul(A, B)
}
}
func BenchmarkAPI_Mul_Unsafe(b *testing.B) {
A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2)))
B := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 1, (100*100*2)+1)))
for i := 0; i < b.N; i++ {
Mul(A, B, UseUnsafe())
}
}
func BenchmarkDense_ContiguousSliced_Mul_Unsafe(b *testing.B) {
A := New(WithShape(4, 100, 100), WithBacking(Range(Float64, 0, 100*100*4)))
B := New(WithShape(2, 100, 100), WithBacking(Range(Float64, 1, (100*100*2)+1)))
Sliced, _ := A.Slice(makeRS(1, 3)) // result should be contiguous
for i := 0; i < b.N; i++ {
Mul(Sliced, B, UseUnsafe())
}
}
func BenchmarkDense_NonContiguousSliced_Mul_Unsafe(b *testing.B) {
A := New(WithShape(100, 4, 100), WithBacking(Range(Float64, 0, 100*100*4)))
B := New(WithShape(100, 2, 100), WithBacking(Range(Float64, 1, (100*100*2)+1)))
Sliced, _ := A.Slice(nil, makeRS(1, 3)) // result should be non-contiguous
for i := 0; i < b.N; i++ {
Mul(Sliced, B, UseUnsafe())
}
}
func BenchmarkAPI_MulScalar_Unsafe(b *testing.B) {
A := New(WithShape(100, 100, 2), WithBacking(Range(Float64, 0, 100*100*2)))
B := 3.141
for i := 0; i < b.N; i++ {
Mul(A, B, UseUnsafe())
}
}
func BenchmarkNative_MulScalar_Unsafe(b *testing.B) {
A := Range(Float64, 0, 100*100*2).([]float64)
B := 3.141
f := func(a []float64, b float64) {
for i := range a {
a[i] *= b
}
}
for i := 0; i < b.N; i++ {
f(A, B)
}
}
tensor-0.9.24/benchmark_dense_matop_test.go 0000664 0000000 0000000 00000005224 14265126151 0021024 0 ustar 00root root 0000000 0000000 package tensor
import (
"math/rand"
"testing"
)
func BenchmarkDense_Transpose(b *testing.B) {
T := New(WithShape(100, 100, 2), WithBacking(Range(Byte, 0, 100*100*2)))
for i := 0; i < b.N; i++ {
T.T()
T.Transpose()
}
}
func BenchmarkNativeSet(b *testing.B) {
T := New(WithShape(10000), Of(Float64))
data := T.Data().([]float64)
for i := 0; i < b.N; i++ {
for next := 0; next < 10000; next++ {
data[next] = float64(next + 1)
}
}
}
func BenchmarkSetMethod(b *testing.B) {
T := New(WithShape(10000), Of(Float64))
for i := 0; i < b.N; i++ {
for next := 0; next < 10000; next++ {
T.Set(next, float64(next+1))
}
}
}
func BenchmarkNativeGet(b *testing.B) {
T := New(WithShape(10000), Of(Float64))
data := T.Data().([]float64)
var f float64
for i := 0; i < b.N; i++ {
for next := 0; next < 10000; next++ {
f = data[next]
}
}
_ = f
}
func BenchmarkGetMethod(b *testing.B) {
T := New(WithShape(10000), Of(Float64))
var f float64
for i := 0; i < b.N; i++ {
for next := 0; next < 10000; next++ {
f = T.Get(next).(float64)
}
}
_ = f
}
func BenchmarkGetWithIterator(b *testing.B) {
T := New(WithShape(100, 100), Of(Float64))
var f float64
data := T.Data().([]float64)
for i := 0; i < b.N; i++ {
it := IteratorFromDense(T)
var next int
var err error
for next, err = it.Start(); err == nil; next, err = it.Next() {
f = data[next]
}
if _, ok := err.(NoOpError); !ok {
b.Errorf("Error: %v", err)
}
}
_ = f
}
func BenchmarkComplicatedGet(b *testing.B) {
T := New(WithShape(101, 1, 36, 5), Of(Float64))
T.T(0, 2, 1, 3)
data := T.Data().([]float64)
var f float64
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := IteratorFromDense(T)
var next int
var err error
for next, err = it.Start(); err == nil; next, err = it.Next() {
f = data[next]
}
if _, ok := err.(NoOpError); !ok {
b.Errorf("Error: %v", err)
}
}
_ = f
}
var atCoords [10000][2]int
func init() {
for i := range atCoords {
atCoords[i][0] = rand.Intn(100)
atCoords[i][1] = rand.Intn(100)
}
}
var at1, at2 float64
// func BenchmarkAtWithNativeIterator(b *testing.B) {
// T := New(WithShape(100, 100), Of(Float64))
// it, err := NativeMatrixF64(T)
// if err != nil {
// b.Fatalf("Error: %v", err)
// }
// var j int
// for i := 0; i < b.N; i++ {
// if j >= len(atCoords) {
// j = 0
// }
// at := atCoords[j]
// at1 = it[at[0]][at[1]]
// j++
// }
// }
func BenchmarkAt(b *testing.B) {
T := New(WithShape(100, 100), Of(Float64))
var j int
for i := 0; i < b.N; i++ {
if j >= len(atCoords) {
j = 0
}
at := atCoords[j]
_, err := T.At(at[0], at[1])
if err != nil {
b.Errorf("Error: %v", err)
}
j++
}
}
tensor-0.9.24/benchmark_dense_repeat_test.go 0000664 0000000 0000000 00000000400 14265126151 0021153 0 ustar 00root root 0000000 0000000 package tensor
import "testing"
func BenchmarkDenseRepeat(b *testing.B) {
for _, tst := range repeatTests {
tst := tst
b.Run(tst.name, func(b *testing.B) {
for n := 0; n < b.N; n++ {
tst.tensor.Repeat(tst.axis, tst.repeats...)
}
})
}
}
tensor-0.9.24/bitmap.go 0000664 0000000 0000000 00000002245 14265126151 0014731 0 ustar 00root root 0000000 0000000 package tensor
// BitMap is a very simple bitmap. It only supports Set, IsSet and Clear methods. It's mostly used for tracking which element has been set
type BitMap struct {
n []uint64
max int
}
// NewBitMap creates a new BitMap.
func NewBitMap(size int) *BitMap {
q, r := divmod(size, 64)
if r > 0 {
q++
}
return &BitMap{
n: make([]uint64, q),
max: size,
}
}
// Set sets the ith bit of the bit map to 1. It panics if i is greater or equal to the defined max
func (bm *BitMap) Set(i int) {
if i >= bm.max || i < 0 {
panic("Index out of range")
}
block, pos := divmod(i, 64)
bm.n[block] |= uint64(1) << uint64(pos)
}
// IsSet returns true if the ith bit is set. It panics if the i is greater or equal to the defined max
func (bm *BitMap) IsSet(i int) bool {
if i >= bm.max || i < 0 {
panic("Index out of range")
}
block, pos := divmod(i, 64)
return bm.n[block]>>uint64(pos)&uint64(1) == uint64(1)
}
// Clear clears the ith bit. It panics if i is greater or equal to the defined max
func (bm *BitMap) Clear(i int) {
if i >= bm.max || i < 0 {
panic("Index out of range")
}
block, pos := divmod(i, 64)
bm.n[block] &= ^(uint64(1) << uint64(pos))
}
tensor-0.9.24/bitmap_test.go 0000664 0000000 0000000 00000003172 14265126151 0015770 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBitMap(t *testing.T) {
assert := assert.New(t)
bm := NewBitMap(64)
assert.Equal(1, len(bm.n))
track := uint64(0)
for i := 0; i < 64; i++ {
bm.Set(i)
track |= uint64(1) << uint64(i)
assert.Equal(track, bm.n[0])
assert.Equal(true, bm.IsSet(i))
if i < 63 {
assert.Equal(false, bm.IsSet(i+1))
} else {
fails := func() {
bm.IsSet(i + 1)
}
assert.Panics(fails)
}
}
for i := 0; i < 64; i++ {
bm.Clear(i)
track &= ^(uint64(1) << uint64(i))
assert.Equal(track, bm.n[0])
assert.Equal(false, bm.IsSet(i))
}
bm = NewBitMap(124)
assert.Equal(2, len(bm.n))
track0 := uint64(0)
track1 := uint64(0)
for i := 0; i < 128; i++ {
if i < 124 {
bm.Set(i)
} else {
fails := func() {
bm.Set(i)
}
assert.Panics(fails)
}
if i < 64 {
track0 |= uint64(1) << uint64(i)
assert.Equal(track0, bm.n[0])
assert.Equal(true, bm.IsSet(i))
} else if i > 123 {
fails := func() {
bm.IsSet(i)
}
assert.Panics(fails)
} else {
track1 |= uint64(1) << uint64(i-64)
assert.Equal(track1, bm.n[1])
assert.Equal(true, bm.IsSet(i))
}
if i < 123 {
assert.Equal(false, bm.IsSet(i+1))
} else {
fails := func() {
bm.IsSet(i + 1)
}
assert.Panics(fails)
}
}
for i := 48; i < 70; i++ {
bm.Clear(i)
}
for i := 48; i < 70; i++ {
assert.Equal(false, bm.IsSet(i))
}
fails := func() {
bm.Clear(125)
}
assert.Panics(fails)
// idiots section!
bm = NewBitMap(3)
fails = func() {
bm.Set(-1)
}
assert.Panics(fails)
fails = func() {
bm.Set(3)
}
assert.Panics(fails)
}
tensor-0.9.24/blas.go 0000664 0000000 0000000 00000002454 14265126151 0014400 0 ustar 00root root 0000000 0000000 package tensor
import (
"sync"
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/blas/gonum"
)
var blasdoor sync.Mutex
var whichblas BLAS
// BLAS represents all the possible implementations of BLAS.
// The default is Gonum's Native
type BLAS interface {
blas.Float32
blas.Float64
blas.Complex64
blas.Complex128
}
// only blastoise.Implementation() and cubone.Implementation() are batchedBLAS -
// they both batch cgo calls (and cubone batches cuda calls)
type batchedBLAS interface {
WorkAvailable() int
DoWork()
BLAS
}
// Use defines which BLAS implementation gorgonia should use.
// The default is Gonum's Native. These are the other options:
// Use(blastoise.Implementation())
// Use(cubone.Implementation())
// Use(cgo.Implementation)
// Note the differences in the brackets. The blastoise and cubone ones are functions.
func Use(b BLAS) {
// close the blast door! close the blast door!
blasdoor.Lock()
// open the blast door! open the blast door!
defer blasdoor.Unlock()
// those lines were few of the better additions to the Special Edition. There, I said it. The Special Edition is superior. Except Han still shot first in my mind.
whichblas = b
}
// WhichBLAS returns the BLAS that gorgonia uses.
func WhichBLAS() BLAS { return whichblas }
func init() {
whichblas = gonum.Implementation{}
}
tensor-0.9.24/collections.go 0000664 0000000 0000000 00000001312 14265126151 0015765 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
func densesToTensors(a []*Dense) []Tensor {
retVal := make([]Tensor, len(a))
for i, t := range a {
retVal[i] = t
}
return retVal
}
func densesToDenseTensors(a []*Dense) []DenseTensor {
retVal := make([]DenseTensor, len(a))
for i, t := range a {
retVal[i] = t
}
return retVal
}
func tensorsToDenseTensors(a []Tensor) ([]DenseTensor, error) {
retVal := make([]DenseTensor, len(a))
var ok bool
for i, t := range a {
if retVal[i], ok = t.(DenseTensor); !ok {
return nil, errors.Errorf("can only convert Tensors of the same type to DenseTensors. Trying to convert %T (#%d in slice)", t, i)
}
}
return retVal, nil
}
tensor-0.9.24/consopt.go 0000664 0000000 0000000 00000014424 14265126151 0015144 0 ustar 00root root 0000000 0000000 package tensor
import (
"reflect"
"gorgonia.org/tensor/internal/storage"
)
// ConsOpt is a tensor construction option.
type ConsOpt func(Tensor)
// Of is a construction option for a Tensor.
func Of(a Dtype) ConsOpt {
Register(a)
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
tt.t = a
case *CS:
tt.t = a
default:
panic("Unsupported Tensor type")
}
}
return f
}
// WithBacking is a construction option for a Tensor
// Use it as such:
// backing := []float64{1,2,3,4}
// t := New(WithBacking(backing))
// It can be used with other construction options like WithShape
func WithBacking(x interface{}, argMask ...[]bool) ConsOpt {
var mask []bool
if len(argMask) > 0 {
mask = argMask[0]
}
f := func(t Tensor) {
if x == nil {
return
}
switch tt := t.(type) {
case *Dense:
tt.fromSlice(x)
if len(argMask) > 0 {
tt.addMask(mask)
}
default:
panic("Unsupported Tensor type")
}
}
return f
}
// WithMask is a construction option for a Tensor
// Use it as such:
// mask := []bool{true,true,false,false}
// t := New(WithBacking(backing), WithMask(mask))
// It can be used with other construction options like WithShape
// The supplied mask can be any type. If non-boolean, then tensor mask is set to true
// wherever non-zero value is obtained
func WithMask(x interface{}) ConsOpt {
f := func(t Tensor) {
if x == nil {
return
}
switch tt := t.(type) {
case *Dense:
tt.MaskFromSlice(x)
default:
panic("Unsupported Tensor type")
}
}
return f
}
// WithShape is a construction option for a Tensor. It creates the ndarray in the required shape.
func WithShape(dims ...int) ConsOpt {
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
throw := BorrowInts(len(dims))
copy(throw, dims)
tt.setShape(throw...)
case *CS:
if len(dims) != 2 {
panic("Only sparse matrices are supported")
}
throw := BorrowInts(len(dims))
copy(throw, dims)
tt.s = throw
default:
panic("Unsupported Tensor type")
}
}
return f
}
// FromScalar is a construction option for representing a scalar value as a Tensor
func FromScalar(x interface{}, argMask ...[]bool) ConsOpt {
var mask []bool
if len(argMask) > 0 {
mask = argMask[0]
}
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
xT := reflect.TypeOf(x)
sxT := reflect.SliceOf(xT)
xv := reflect.MakeSlice(sxT, 1, 1) // []T
xv0 := xv.Index(0) // xv[0]
xv0.Set(reflect.ValueOf(x))
tt.array.Header.Raw = storage.AsByteSlice(xv.Interface())
tt.t = Dtype{xT}
tt.mask = mask
default:
panic("Unsupported Tensor Type")
}
}
return f
}
// FromMemory is a construction option for creating a *Dense (for now) from memory location. This is a useful
// option for super large tensors that don't fit into memory - the user may need to `mmap` a file the tensor.
//
// Bear in mind that at the current stage of the ConsOpt design, the order of the ConsOpt is important.
// FromMemory requires the *Dense's Dtype be set already.
// This would fail (and panic):
// New(FromMemory(ptr, size), Of(Float64))
// This would not:
// New(Of(Float64), FromMemory(ptr, size))
// This behaviour of requiring the ConsOpts to be in order might be changed in the future.
//
// Memory must be manually managed by the caller.
// Tensors called with this construction option will not be returned to any pool - rather, all references to the pointers will be null'd.
// Use with caution.
//go:nocheckptr
func FromMemory(ptr uintptr, memsize uintptr) ConsOpt {
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
tt.Header.Raw = nil // GC anything if needed
tt.Header.Raw = storage.FromMemory(ptr, memsize)
tt.flag = MakeMemoryFlag(tt.flag, ManuallyManaged)
default:
panic("Unsupported Tensor type")
}
}
return f
}
// WithEngine is a construction option that would cause a Tensor to be linked with an execution engine.
func WithEngine(e Engine) ConsOpt {
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
tt.e = e
if e != nil && !e.AllocAccessible() {
tt.flag = MakeMemoryFlag(tt.flag, NativelyInaccessible)
}
tt.oe = nil
if oe, ok := e.(standardEngine); ok {
tt.oe = oe
}
case *CS:
tt.e = e
if e != nil && !e.AllocAccessible() {
tt.f = MakeMemoryFlag(tt.f, NativelyInaccessible)
}
}
}
return f
}
// AsFortran creates a *Dense with a col-major layout.
// If the optional backing argument is passed, the backing is assumed to be C-order (row major), and
// it will be transposed before being used.
func AsFortran(backing interface{}, argMask ...[]bool) ConsOpt {
var mask []bool
if len(argMask) > 0 {
mask = argMask[0]
}
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
if backing != nil {
// put the data into the tensor, then make a clone tensor to transpose
tt.fromSliceOrArrayer(backing)
// create a temporary tensor, to which the transpose will be done
tmp := NewDense(tt.Dtype(), tt.shape.Clone())
copyArray(tmp.arrPtr(), tt.arrPtr())
tmp.SetMask(mask)
tmp.T()
tmp.Transpose()
// copy the data back to the current tensor
copyArray(tt.arrPtr(), tmp.arrPtr())
tt.SetMask(tmp.Mask())
// cleanup: return the temporary tensor back to the pool
ReturnTensor(tmp)
}
tt.AP.o = MakeDataOrder(tt.AP.o, ColMajor)
if tt.AP.shape != nil {
ReturnInts(tt.AP.strides)
tt.AP.strides = nil
tt.AP.strides = tt.AP.calcStrides()
}
case *CS:
panic("AsFortran is not an available option for Compressed Sparse layouts")
}
}
return f
}
func AsDenseDiag(backing interface{}) ConsOpt {
f := func(t Tensor) {
switch tt := t.(type) {
case *Dense:
if bt, ok := backing.(Tensor); ok {
backing = bt.Data()
}
xT := reflect.TypeOf(backing)
if xT.Kind() != reflect.Slice {
panic("Expected a slice")
}
xV := reflect.ValueOf(backing)
l := xV.Len()
// elT := xT.Elem()
sli := reflect.MakeSlice(xT, l*l, l*l)
shape := Shape{l, l}
strides := shape.CalcStrides()
for i := 0; i < l; i++ {
idx, err := Ltoi(shape, strides, i, i)
if err != nil {
panic(err)
}
at := sli.Index(idx)
xi := xV.Index(i)
at.Set(xi)
}
tt.fromSliceOrArrayer(sli.Interface())
tt.setShape(l, l)
default:
panic("AsDenseDiag is not available as an option for CS")
}
}
return f
}
tensor-0.9.24/consopt_test.go 0000664 0000000 0000000 00000004321 14265126151 0016176 0 ustar 00root root 0000000 0000000 // +build linux
package tensor
import (
"fmt"
"io/ioutil"
"os"
"syscall"
"testing"
"testing/quick"
"unsafe"
"github.com/stretchr/testify/assert"
)
type F64 float64
func newF64(f float64) *F64 { r := F64(f); return &r }
func (f *F64) Uintptr() uintptr { return uintptr(unsafe.Pointer(f)) }
func (f *F64) MemSize() uintptr { return 8 }
func (f *F64) Pointer() unsafe.Pointer { return unsafe.Pointer(f) }
func Test_FromMemory(t *testing.T) {
fn := func(F float64) bool {
f := newF64(F)
T := New(WithShape(), Of(Float64), FromMemory(f.Uintptr(), f.MemSize()))
data := T.Data().(float64)
if data != F {
return false
}
return true
}
if err := quick.Check(fn, &quick.Config{MaxCount: 1000000}); err != nil {
t.Logf("%v", err)
}
f, err := ioutil.TempFile("", "test")
if err != nil {
t.Fatal(err)
}
// fill in with fake data
backing := make([]byte, 8*1024*1024) // 1024*1024 matrix of float64
asFloats := *(*[]float64)(unsafe.Pointer(&backing))
asFloats = asFloats[: 1024*1024 : 1024*1024]
asFloats[0] = 3.14
asFloats[2] = 6.28
asFloats[1024*1024-1] = 3.14
asFloats[1024*1024-3] = 6.28
f.Write(backing)
// defer cleanup
defer os.Remove(f.Name())
// do the mmap stuff
stat, err := f.Stat()
if err != nil {
t.Fatal(err)
}
size := int(stat.Size())
fd := int(f.Fd())
bs, err := syscall.Mmap(fd, 0, size, syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := syscall.Munmap(bs); err != nil {
t.Error(err)
}
}()
T := New(WithShape(1024, 1024), Of(Float64), FromMemory(uintptr(unsafe.Pointer(&bs[0])), uintptr(size)))
s := fmt.Sprintf("%v", T)
expected := `⎡3.14 0 6.28 0 ... 0 0 0 0⎤
⎢ 0 0 0 0 ... 0 0 0 0⎥
⎢ 0 0 0 0 ... 0 0 0 0⎥
⎢ 0 0 0 0 ... 0 0 0 0⎥
.
.
.
⎢ 0 0 0 0 ... 0 0 0 0⎥
⎢ 0 0 0 0 ... 0 0 0 0⎥
⎢ 0 0 0 0 ... 0 0 0 0⎥
⎣ 0 0 0 0 ... 0 6.28 0 3.14⎦
`
if s != expected {
t.Errorf("Expected mmap'd tensor to be exactly the same.")
}
assert.True(t, T.IsManuallyManaged())
}
tensor-0.9.24/debug.go 0000664 0000000 0000000 00000004273 14265126151 0014546 0 ustar 00root root 0000000 0000000 // +build debug
package tensor
import (
"fmt"
"log"
"os"
"reflect"
"runtime/debug"
"strings"
"sync/atomic"
"unsafe"
)
var TABCOUNT uint32
var TRACK = false
const DEBUG = true
var _logger_ = log.New(os.Stderr, "", 0)
var replacement = "\n"
func tabcount() int {
return int(atomic.LoadUint32(&TABCOUNT))
}
func enterLoggingContext() {
atomic.AddUint32(&TABCOUNT, 1)
tabcount := tabcount()
_logger_.SetPrefix(strings.Repeat("\t", tabcount))
replacement = "\n" + strings.Repeat("\t", tabcount)
}
func leaveLoggingContext() {
tabcount := tabcount()
tabcount--
if tabcount < 0 {
atomic.StoreUint32(&TABCOUNT, 0)
tabcount = 0
} else {
atomic.StoreUint32(&TABCOUNT, uint32(tabcount))
}
_logger_.SetPrefix(strings.Repeat("\t", tabcount))
replacement = "\n" + strings.Repeat("\t", tabcount)
}
func logf(format string, others ...interface{}) {
if DEBUG {
// format = strings.Replace(format, "\n", replacement, -1)
s := fmt.Sprintf(format, others...)
s = strings.Replace(s, "\n", replacement, -1)
_logger_.Println(s)
// _logger_.Printf(format, others...)
}
}
var stats = new(debug.GCStats)
func loggc() {
debug.ReadGCStats(stats)
log.Printf("NUMGC: %v", stats.NumGC)
}
func init() {
debug.SetPanicOnFault(true)
debug.SetTraceback("all")
}
type rtype struct {
size uintptr
ptrdata uintptr // number of bytes in the type that can contain pointers
hash uint32 // hash of type; avoids computation in hash tables
tflag uint8 // extra type information flags
align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
kind uint8 // enumeration for C
alg uintptr // algorithm table
gcdata uintptr // garbage collection data
str int32 // string form
ptrToThis int32 // type for pointer to this type, may be zero
}
func (t *rtype) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "size: %d pointers: %d, hash: 0x%x, flag: %d, align: %d, kind: %d", t.size, t.ptrdata, t.hash, t.tflag, t.align, t.kind)
}
func logRtype(t *reflect.Type) {
iface := *(*[2]uintptr)(unsafe.Pointer(t))
rt := (*rtype)(unsafe.Pointer(iface[1]))
log.Printf("TYPE INFO: %v(%p) - %v", *t, t, rt)
}
tensor-0.9.24/defaultengine.go 0000664 0000000 0000000 00000003177 14265126151 0016274 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/execution"
)
// StdEng is the default execution engine that comes with the tensors. To use other execution engines, use the WithEngine construction option.
type StdEng struct {
execution.E
}
// makeArray allocates a slice for the array
func (e StdEng) makeArray(arr *array, t Dtype, size int) {
arr.Raw = malloc(t, size)
arr.t = t
}
func (e StdEng) AllocAccessible() bool { return true }
func (e StdEng) Alloc(size int64) (Memory, error) { return nil, noopError{} }
func (e StdEng) Free(mem Memory, size int64) error { return nil }
func (e StdEng) Memset(mem Memory, val interface{}) error {
if ms, ok := mem.(MemSetter); ok {
return ms.Memset(val)
}
return errors.Errorf("Cannot memset %v with StdEng", mem)
}
func (e StdEng) Memclr(mem Memory) {
if z, ok := mem.(Zeroer); ok {
z.Zero()
}
return
}
func (e StdEng) Memcpy(dst, src Memory) error {
switch dt := dst.(type) {
case *array:
switch st := src.(type) {
case *array:
copyArray(dt, st)
return nil
case arrayer:
copyArray(dt, st.arrPtr())
return nil
}
case arrayer:
switch st := src.(type) {
case *array:
copyArray(dt.arrPtr(), st)
return nil
case arrayer:
copyArray(dt.arrPtr(), st.arrPtr())
return nil
}
}
return errors.Errorf("Failed to copy %T %T", dst, src)
}
func (e StdEng) Accessible(mem Memory) (Memory, error) { return mem, nil }
func (e StdEng) WorksWith(order DataOrder) bool { return true }
func (e StdEng) checkAccessible(t Tensor) error {
if !t.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, t)
}
return nil
}
tensor-0.9.24/defaultengine_argmethods.go 0000664 0000000 0000000 00000010203 14265126151 0020475 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
func (e StdEng) Argmax(t Tensor, axis int) (retVal Tensor, err error) {
switch tt := t.(type) {
case DenseTensor:
return e.argmaxDenseTensor(tt, axis)
default:
return nil, errors.Errorf(typeNYI, "StdEng.Argmax", t)
}
}
func (e StdEng) argmaxDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmax")
}
if axis >= len(t.Shape()) {
return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis)
}
dataA := t.hdr()
typ := t.rtype()
// SPECIAL CASE: FLAT ARGMAX
if axis == AllAxes {
var index int
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
if index = e.E.ArgmaxFlatMasked(typ, dataA, mt.Mask()); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
} else {
if index = e.E.ArgmaxFlat(typ, dataA); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
}
return New(FromScalar(index)), nil
}
// ARGMAX ALONG AXIS
var indices []int
axes := make([]int, len(t.Shape()))
for i := range t.Shape() {
switch {
case i < axis:
axes[i] = i
case i == axis:
axes[len(axes)-1] = i
case i > axis:
axes[i-1] = i
}
}
// be a good citizen - borrow and return, since we're only using this AP to figure out the moves
newAP, _, err := t.Info().T(axes...)
if _, ok := err.(NoOpError); !ok && err != nil {
return
} else if ok {
t.Info().CloneTo(&newAP)
}
it := IteratorFromDense(t)
iteratorLoadAP(it, &newAP)
lastSize := it.Shape()[len(it.Shape())-1]
newShape := it.Shape().Clone()
newShape = newShape[:len(newShape)-1]
// cleanup
defer func() {
newAP.zero()
ReturnInts(newShape)
}()
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
mask := mt.Mask()
if indices, err = e.E.ArgmaxIterMasked(typ, dataA, mask, it, lastSize); err != nil {
return
}
} else {
if indices, err = e.E.ArgmaxIter(typ, dataA, it, lastSize); err != nil {
return
}
}
return New(WithShape(newShape...), WithBacking(indices)), nil
}
func (e StdEng) Argmin(t Tensor, axis int) (retVal Tensor, err error) {
switch tt := t.(type) {
case DenseTensor:
return e.argminDenseTensor(tt, axis)
default:
return nil, errors.Errorf(typeNYI, "StdEng.Argmin", t)
}
}
func (e StdEng) argminDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmin")
}
if axis >= len(t.Shape()) {
return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis)
}
dataA := t.hdr()
typ := t.rtype()
// SPECIAL CASE: FLAT ARGMAX
if axis == AllAxes {
var index int
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
if index = e.E.ArgminFlatMasked(typ, dataA, mt.Mask()); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
} else {
if index = e.E.ArgminFlat(typ, dataA); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
}
return New(FromScalar(index)), nil
}
// ARGMAX ALONG AXIS
var indices []int
axes := make([]int, len(t.Shape()))
for i := range t.Shape() {
switch {
case i < axis:
axes[i] = i
case i == axis:
axes[len(axes)-1] = i
case i > axis:
axes[i-1] = i
}
}
// be a good citizen - borrow and return, since we're only using this AP to figure out the moves
newAP, _, err := t.Info().T(axes...)
if _, ok := err.(NoOpError); !ok && err != nil {
return
} else if ok {
newAP = t.Info().Clone()
}
it := IteratorFromDense(t)
iteratorLoadAP(it, &newAP)
lastSize := it.Shape()[len(it.Shape())-1]
newShape := it.Shape().Clone()
newShape = newShape[:len(newShape)-1]
// cleanup
defer func() {
newAP.zero()
ReturnInts(newShape)
}()
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
mask := mt.Mask()
if indices, err = e.E.ArgminIterMasked(typ, dataA, mask, it, lastSize); err != nil {
return
}
} else {
if indices, err = e.E.ArgminIter(typ, dataA, it, lastSize); err != nil {
return
}
}
return New(WithShape(newShape...), WithBacking(indices)), nil
}
tensor-0.9.24/defaultengine_arith.go 0000664 0000000 0000000 00000070020 14265126151 0017452 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
// Add performs a + b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Add failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Add")
}
if useIter {
switch {
case incr:
err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.AddIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.AddIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.AddIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.AddRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Add(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Add(typ, retVal.hdr(), dataB)
}
return
}
// Sub performs a - b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Sub(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Sub failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Sub")
}
if useIter {
switch {
case incr:
err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.SubIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.SubIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.SubIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.SubRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Sub(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Sub(typ, retVal.hdr(), dataB)
}
return
}
// Mul performs a × b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Mul(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mul failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Mul")
}
if useIter {
switch {
case incr:
err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MulIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.MulIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.MulIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.MulRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Mul(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Mul(typ, retVal.hdr(), dataB)
}
return
}
// Div performs a ÷ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Div(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Div failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Div")
}
if useIter {
switch {
case incr:
err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.DivIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.DivIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.DivIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.DivRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Div(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Div(typ, retVal.hdr(), dataB)
}
return
}
// Pow performs a ^ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Pow(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Pow failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Pow")
}
if useIter {
switch {
case incr:
err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.PowIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.PowIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.PowIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.PowRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Pow(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Pow(typ, retVal.hdr(), dataB)
}
return
}
// Mod performs a % b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Mod(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mod failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Mod")
}
if useIter {
switch {
case incr:
err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.ModIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.ModIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.ModIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
err = e.E.ModRecv(typ, dataA, dataB, dataReuse)
retVal = reuse
case !safe:
err = e.E.Mod(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Mod(typ, retVal.hdr(), dataB)
}
return
}
// AddScalar performs t + s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Add failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Add failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Add")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Add")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.AddIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.AddIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.AddIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.AddIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.AddIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Add(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Add(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Add(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Add(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// SubScalar performs t - s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Sub failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Sub failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sub")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sub")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.SubIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.SubIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.SubIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.SubIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.SubIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Sub(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Sub(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Sub(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Sub(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// MulScalar performs t × s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mul failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Mul failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mul")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mul")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MulIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.MulIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.MulIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.MulIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.MulIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mul(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Mul(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Mul(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Mul(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// DivScalar performs t ÷ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Div failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Div failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Div")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Div")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.DivIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.DivIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.DivIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.DivIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.DivIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Div(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Div(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Div(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Div(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// PowScalar performs t ^ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Pow failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Pow failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Pow")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Pow")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.PowIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.PowIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.PowIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.PowIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.PowIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Pow(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Pow(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Pow(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Pow(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// ModScalar performs t % s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mod failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Mod failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mod")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mod")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.ModIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.ModIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.ModIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.ModIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.ModIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mod(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Mod(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Mod(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Mod(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
tensor-0.9.24/defaultengine_cmp.go 0000664 0000000 0000000 00000104701 14265126151 0017126 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
// Gt performs a > b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) Gt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Gt failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Gt")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.GtSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.GtSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.GtSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GtSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Gt(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// Gte performs a ≥ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) Gte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Gte failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Gte")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.GteSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.GteSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.GteSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GteSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Gte(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// Lt performs a < b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) Lt(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Lt failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Lt")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.LtSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.LtSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.LtSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LtSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Lt(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// Lte performs a ≤ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) Lte(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Lte failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Lte")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.LteSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.LteSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.LteSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LteSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Lte(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// ElEq performs a == b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) ElEq(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, eqTypes); err != nil {
return nil, errors.Wrapf(err, "Eq failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Eq")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.EqSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.EqSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.EqSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.EqSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Eq(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// ElNe performs a ≠ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) ElNe(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, eqTypes); err != nil {
return nil, errors.Wrapf(err, "Ne failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Ne")
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.NeSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.NeSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
return
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.NeSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.NeSame(typ, dataReuse, dataB)
retVal = reuse
default:
err = e.E.Ne(typ, dataA, dataB, dataReuse)
retVal = reuse
}
return
}
// GtScalar performs t > s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) GtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Gt failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Gt failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Gt")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Gt")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.GtSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.GtSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.GtSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.GtIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GtSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.LtSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.GtSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GtSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.GtSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Gt(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// GteScalar performs t ≥ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) GteScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Gte failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Gte failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Gte")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Gte")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.GteSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.GteSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.GteSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.GteIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GteSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.LteSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.GteSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.GteSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.GteSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Gte(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// LtScalar performs t < s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) LtScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Lt failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Lt failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Lt")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Lt")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.LtSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.LtSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.LtSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.LtIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LtSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.GtSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.LtSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LtSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.LtSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Lt(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// LteScalar performs t ≤ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (e StdEng) LteScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "Lte failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Lte failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Lte")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Lte")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.LteSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.LteSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.LteSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.LteIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LteSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.GteSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.LteSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.LteSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.LteSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Lte(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
func (e StdEng) EqScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, eqTypes); err != nil {
return nil, errors.Wrapf(err, "Eq failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Eq failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Eq")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Eq")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.EqSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.EqSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.EqSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.EqIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.EqSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.EqSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.EqSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.EqSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.EqSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Eq(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
func (e StdEng) NeScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, eqTypes); err != nil {
return nil, errors.Wrapf(err, "Ne failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Ne failed")
}
var reuse DenseTensor
var safe, same bool
if reuse, safe, _, _, same, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !safe {
same = true
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Ne")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Ne")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
switch {
case same && safe && reuse == nil:
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
case !same && safe && reuse == nil:
reuse = NewDense(Bool, a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && same && reuse == nil:
err = e.E.NeSameIter(typ, dataA, dataB, ait, bit)
retVal = a
case same && safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.NeSameIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case same && safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.NeSameIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
err = e.E.NeIter(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.NeSame(typ, dataReuse, dataB)
retVal = reuse
return
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.NeSame(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && same && reuse == nil:
err = e.E.NeSame(typ, dataA, dataB)
retVal = a
case same && safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.NeSame(typ, dataReuse, dataB)
retVal = reuse
case same && safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.NeSame(typ, dataA, dataReuse)
retVal = reuse
default:
err = e.E.Ne(typ, dataA, dataB, dataReuse)
retVal = reuse
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
tensor-0.9.24/defaultengine_linalg.go 0000664 0000000 0000000 00000045135 14265126151 0017622 0 ustar 00root root 0000000 0000000 package tensor
import (
"reflect"
"github.com/pkg/errors"
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/mat"
)
// Trace returns the trace of a matrix (i.e. the sum of the diagonal elements). If the Tensor provided is not a matrix, it will return an error
func (e StdEng) Trace(t Tensor) (retVal interface{}, err error) {
if t.Dims() != 2 {
err = errors.Errorf(dimMismatch, 2, t.Dims())
return
}
if err = typeclassCheck(t.Dtype(), numberTypes); err != nil {
return nil, errors.Wrap(err, "Trace")
}
rstride := t.Strides()[0]
cstride := t.Strides()[1]
r := t.Shape()[0]
c := t.Shape()[1]
m := MinInt(r, c)
stride := rstride + cstride
switch data := t.Data().(type) {
case []int:
var trace int
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int8:
var trace int8
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int16:
var trace int16
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int32:
var trace int32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int64:
var trace int64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint:
var trace uint
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint8:
var trace uint8
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint16:
var trace uint16
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint32:
var trace uint32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint64:
var trace uint64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []float32:
var trace float32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []float64:
var trace float64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []complex64:
var trace complex64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []complex128:
var trace complex128
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
}
return
}
func (e StdEng) Dot(x, y Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if _, ok := x.(DenseTensor); !ok {
err = errors.Errorf("Engine only supports working on x that is a DenseTensor. Got %T instead", x)
return
}
if _, ok := y.(DenseTensor); !ok {
err = errors.Errorf("Engine only supports working on y that is a DenseTensor. Got %T instead", y)
return
}
var a, b DenseTensor
if a, err = getFloatDenseTensor(x); err != nil {
err = errors.Wrapf(err, opFail, "Dot")
return
}
if b, err = getFloatDenseTensor(y); err != nil {
err = errors.Wrapf(err, opFail, "Dot")
return
}
fo := ParseFuncOpts(opts...)
var reuse, incr DenseTensor
if reuse, err = getFloatDenseTensor(fo.reuse); err != nil {
err = errors.Wrapf(err, opFail, "Dot - reuse")
return
}
if incr, err = getFloatDenseTensor(fo.incr); err != nil {
err = errors.Wrapf(err, opFail, "Dot - incr")
return
}
switch {
case a.IsScalar() && b.IsScalar():
var res interface{}
switch a.Dtype().Kind() {
case reflect.Float64:
res = a.GetF64(0) * b.GetF64(0)
case reflect.Float32:
res = a.GetF32(0) * b.GetF32(0)
}
switch {
case incr != nil:
if !incr.IsScalar() {
err = errors.Errorf(shapeMismatch, ScalarShape(), incr.Shape())
return
}
if err = e.E.MulIncr(a.Dtype().Type, a.hdr(), b.hdr(), incr.hdr()); err != nil {
err = errors.Wrapf(err, opFail, "Dot scalar incr")
return
}
retVal = incr
case reuse != nil:
reuse.Set(0, res)
reuse.reshape()
retVal = reuse
default:
retVal = New(FromScalar(res))
}
return
case a.IsScalar():
switch {
case incr != nil:
return Mul(a.ScalarValue(), b, WithIncr(incr))
case reuse != nil:
return Mul(a.ScalarValue(), b, WithReuse(reuse))
}
// default moved out
return Mul(a.ScalarValue(), b)
case b.IsScalar():
switch {
case incr != nil:
return Mul(a, b.ScalarValue(), WithIncr(incr))
case reuse != nil:
return Mul(a, b.ScalarValue(), WithReuse(reuse))
}
return Mul(a, b.ScalarValue())
}
switch {
case a.IsVector():
switch {
case b.IsVector():
// check size
if a.len() != b.len() {
err = errors.Errorf(shapeMismatch, a.Shape(), b.Shape())
return
}
var ret interface{}
if ret, err = e.Inner(a, b); err != nil {
return nil, errors.Wrapf(err, opFail, "Dot")
}
return New(FromScalar(ret)), nil
case b.IsMatrix():
b.T()
defer b.UT()
switch {
case reuse != nil && incr != nil:
return b.MatVecMul(a, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return b.MatVecMul(a, WithReuse(reuse))
case incr != nil:
return b.MatVecMul(a, WithIncr(incr))
default:
}
return b.MatVecMul(a)
default:
}
case a.IsMatrix():
switch {
case b.IsVector():
switch {
case reuse != nil && incr != nil:
return a.MatVecMul(b, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return a.MatVecMul(b, WithReuse(reuse))
case incr != nil:
return a.MatVecMul(b, WithIncr(incr))
default:
}
return a.MatVecMul(b)
case b.IsMatrix():
switch {
case reuse != nil && incr != nil:
return a.MatMul(b, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return a.MatMul(b, WithReuse(reuse))
case incr != nil:
return a.MatMul(b, WithIncr(incr))
default:
}
return a.MatMul(b)
default:
}
default:
}
as := a.Shape()
bs := b.Shape()
axesA := BorrowInts(1)
axesB := BorrowInts(1)
defer ReturnInts(axesA)
defer ReturnInts(axesB)
var lastA, secondLastB int
lastA = len(as) - 1
axesA[0] = lastA
if len(bs) >= 2 {
secondLastB = len(bs) - 2
} else {
secondLastB = 0
}
axesB[0] = secondLastB
if as[lastA] != bs[secondLastB] {
err = errors.Errorf(shapeMismatch, as, bs)
return
}
var rd *Dense
if rd, err = a.TensorMul(b, axesA, axesB); err != nil {
panic(err)
}
if reuse != nil {
copyDense(reuse, rd)
ap := rd.Info().Clone()
reuse.setAP(&ap)
defer ReturnTensor(rd)
// swap out the underlying data and metadata
// reuse.data, rd.data = rd.data, reuse.data
// reuse.AP, rd.AP = rd.AP, reuse.AP
// defer ReturnTensor(rd)
retVal = reuse
} else {
retVal = rd
}
return
}
// TODO: make it take DenseTensor
func (e StdEng) SVD(a Tensor, uv, full bool) (s, u, v Tensor, err error) {
var t *Dense
var ok bool
if err = e.checkAccessible(a); err != nil {
return nil, nil, nil, errors.Wrapf(err, "opFail %v", "SVD")
}
if t, ok = a.(*Dense); !ok {
return nil, nil, nil, errors.Errorf("StdEng only performs SVDs for DenseTensors. Got %T instead", a)
}
if err = typeclassCheck(a.Dtype(), floatTypes); err != nil {
return nil, nil, nil, errors.Errorf("StdEng can only perform SVDs for float64 and float32 type. Got tensor of %v instead", t.Dtype())
}
if !t.IsMatrix() {
return nil, nil, nil, errors.Errorf(dimMismatch, 2, t.Dims())
}
var m *mat.Dense
var svd mat.SVD
if m, err = ToMat64(t, UseUnsafe()); err != nil {
return
}
switch {
case full && uv:
ok = svd.Factorize(m, mat.SVDFull)
case !full && uv:
ok = svd.Factorize(m, mat.SVDThin)
case full && !uv:
// illogical state - if you specify "full", you WANT the UV matrices
// error
err = errors.Errorf("SVD requires computation of `u` and `v` matrices if `full` was specified.")
return
default:
// by default, we return only the singular values
ok = svd.Factorize(m, mat.SVDNone)
}
if !ok {
// error
err = errors.Errorf("Unable to compute SVD")
return
}
// extract values
var um, vm mat.Dense
s = recycledDense(Float64, Shape{MinInt(t.Shape()[0], t.Shape()[1])}, WithEngine(e))
svd.Values(s.Data().([]float64))
if uv {
svd.UTo(&um)
svd.VTo(&vm)
// vm.VFromSVD(&svd)
u = FromMat64(&um, UseUnsafe(), As(t.t))
v = FromMat64(&vm, UseUnsafe(), As(t.t))
}
return
}
// Inner is a thin layer over BLAS's D/Sdot.
// It returns a scalar value, wrapped in an interface{}, which is not quite nice.
func (e StdEng) Inner(a, b Tensor) (retVal interface{}, err error) {
var ad, bd DenseTensor
if ad, bd, err = e.checkTwoFloatComplexTensors(a, b); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Inner")
}
switch A := ad.Data().(type) {
case []float32:
B := bd.Float32s()
retVal = whichblas.Sdot(len(A), A, 1, B, 1)
case []float64:
B := bd.Float64s()
retVal = whichblas.Ddot(len(A), A, 1, B, 1)
case []complex64:
B := bd.Complex64s()
retVal = whichblas.Cdotu(len(A), A, 1, B, 1)
case []complex128:
B := bd.Complex128s()
retVal = whichblas.Zdotu(len(A), A, 1, B, 1)
}
return
}
// MatVecMul is a thin layer over BLAS' DGEMV
// Because DGEMV computes:
// y = αA * x + βy
// we set beta to 0, so we don't have to manually zero out the reused/retval tensor data
func (e StdEng) MatVecMul(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.MatVecMul")
}
m := ad.oshape()[0]
n := ad.oshape()[1]
tA := blas.NoTrans
do := a.DataOrder()
z := ad.oldAP().IsZero()
var lda int
switch {
case do.IsRowMajor() && z:
lda = n
case do.IsRowMajor() && !z:
tA = blas.Trans
lda = n
case do.IsColMajor() && z:
tA = blas.Trans
lda = m
m, n = n, m
case do.IsColMajor() && !z:
lda = m
m, n = n, m
}
incX, incY := 1, 1 // step size
// ASPIRATIONAL TODO: different incX and incY
// TECHNICAL DEBT. TECHDEBT. TECH DEBT
// Example use case:
// log.Printf("a %v %v", ad.Strides(), ad.ostrides())
// log.Printf("b %v", b.Strides())
// incX := a.Strides()[0]
// incY = b.Strides()[0]
switch A := ad.Data().(type) {
case []float64:
x := bd.Float64s()
y := pd.Float64s()
alpha, beta := float64(1), float64(0)
whichblas.Dgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
case []float32:
x := bd.Float32s()
y := pd.Float32s()
alpha, beta := float32(1), float32(0)
whichblas.Sgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
case []complex64:
x := bd.Complex64s()
y := pd.Complex64s()
var alpha, beta complex64 = complex(1, 0), complex(0, 0)
whichblas.Cgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
case []complex128:
x := bd.Complex128s()
y := pd.Complex128s()
var alpha, beta complex128 = complex(1, 0), complex(0, 0)
whichblas.Zgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
default:
return errors.Errorf(typeNYI, "matVecMul", bd.Data())
}
return nil
}
// MatMul is a thin layer over DGEMM.
// DGEMM computes:
// C = αA * B + βC
// To prevent needless zeroing out of the slice, we just set β to 0
func (e StdEng) MatMul(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.MatMul")
}
ado := a.DataOrder()
bdo := b.DataOrder()
cdo := prealloc.DataOrder()
// get result shapes. k is the shared dimension
// a is (m, k)
// b is (k, n)
// c is (m, n)
var m, n, k int
m = ad.Shape()[0]
k = ad.Shape()[1]
n = bd.Shape()[1]
// wrt the strides, we use the original strides, because that's what BLAS needs, instead of calling .Strides()
// lda in colmajor = number of rows;
// lda in row major = number of cols
var lda, ldb, ldc int
switch {
case ado.IsColMajor():
lda = m
case ado.IsRowMajor():
lda = k
}
switch {
case bdo.IsColMajor():
ldb = bd.Shape()[0]
case bdo.IsRowMajor():
ldb = n
}
switch {
case cdo.IsColMajor():
ldc = prealloc.Shape()[0]
case cdo.IsRowMajor():
ldc = prealloc.Shape()[1]
}
// check for trans
tA, tB := blas.NoTrans, blas.NoTrans
if !ad.oldAP().IsZero() {
tA = blas.Trans
if ado.IsRowMajor() {
lda = m
} else {
lda = k
}
}
if !bd.oldAP().IsZero() {
tB = blas.Trans
if bdo.IsRowMajor() {
ldb = bd.Shape()[0]
} else {
ldb = bd.Shape()[1]
}
}
switch A := ad.Data().(type) {
case []float64:
B := bd.Float64s()
C := pd.Float64s()
alpha, beta := float64(1), float64(0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Dgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Dgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
case []float32:
B := bd.Float32s()
C := pd.Float32s()
alpha, beta := float32(1), float32(0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Sgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Sgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
case []complex64:
B := bd.Complex64s()
C := pd.Complex64s()
var alpha, beta complex64 = complex(1, 0), complex(0, 0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Cgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Cgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
case []complex128:
B := bd.Complex128s()
C := pd.Complex128s()
var alpha, beta complex128 = complex(1, 0), complex(0, 0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Zgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Zgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
default:
return errors.Errorf(typeNYI, "matMul", ad.Data())
}
return
}
// Outer is a thin wrapper over S/Dger
func (e StdEng) Outer(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatComplexTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.Outer")
}
m := ad.Size()
n := bd.Size()
pdo := pd.DataOrder()
// the stride of a Vector is always going to be [1],
// incX := t.Strides()[0]
// incY := other.Strides()[0]
incX, incY := 1, 1
// lda := pd.Strides()[0]
var lda int
switch {
case pdo.IsColMajor():
aShape := a.Shape().Clone()
bShape := b.Shape().Clone()
if err = a.Reshape(aShape[0], 1); err != nil {
return err
}
if err = b.Reshape(1, bShape[0]); err != nil {
return err
}
if err = e.MatMul(a, b, prealloc); err != nil {
return err
}
if err = b.Reshape(bShape...); err != nil {
return
}
if err = a.Reshape(aShape...); err != nil {
return
}
return nil
case pdo.IsRowMajor():
lda = pd.Shape()[1]
}
switch x := ad.Data().(type) {
case []float64:
y := bd.Float64s()
A := pd.Float64s()
alpha := float64(1)
whichblas.Dger(m, n, alpha, x, incX, y, incY, A, lda)
case []float32:
y := bd.Float32s()
A := pd.Float32s()
alpha := float32(1)
whichblas.Sger(m, n, alpha, x, incX, y, incY, A, lda)
case []complex64:
y := bd.Complex64s()
A := pd.Complex64s()
var alpha complex64 = complex(1, 0)
whichblas.Cgeru(m, n, alpha, x, incX, y, incY, A, lda)
case []complex128:
y := bd.Complex128s()
A := pd.Complex128s()
var alpha complex128 = complex(1, 0)
whichblas.Zgeru(m, n, alpha, x, incX, y, incY, A, lda)
default:
return errors.Errorf(typeNYI, "outer", b.Data())
}
return nil
}
/* UNEXPORTED UTILITY FUNCTIONS */
func (e StdEng) checkTwoFloatTensors(a, b Tensor) (ad, bd DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if a.Dtype() != b.Dtype() {
return nil, nil, errors.New("Expected a and b to have the same Dtype")
}
if ad, err = getFloatDenseTensor(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatDenseTensor(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
return
}
func (e StdEng) checkThreeFloatTensors(a, b, ret Tensor) (ad, bd, retVal DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: ret is not accessible")
}
if a.Dtype() != b.Dtype() || b.Dtype() != ret.Dtype() {
return nil, nil, nil, errors.New("Expected a and b and retVal all to have the same Dtype")
}
if ad, err = getFloatDenseTensor(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatDenseTensor(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
if retVal, err = getFloatDenseTensor(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects retVal to be be a DenseTensor")
}
return
}
func (e StdEng) checkTwoFloatComplexTensors(a, b Tensor) (ad, bd DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if a.Dtype() != b.Dtype() {
return nil, nil, errors.New("Expected a and b to have the same Dtype")
}
if ad, err = getFloatComplexDenseTensor(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatComplexDenseTensor(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
return
}
func (e StdEng) checkThreeFloatComplexTensors(a, b, ret Tensor) (ad, bd, retVal DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: ret is not accessible")
}
if a.Dtype() != b.Dtype() || b.Dtype() != ret.Dtype() {
return nil, nil, nil, errors.New("Expected a and b and retVal all to have the same Dtype")
}
if ad, err = getFloatComplexDenseTensor(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatComplexDenseTensor(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
if retVal, err = getFloatComplexDenseTensor(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects retVal to be be a DenseTensor")
}
return
}
tensor-0.9.24/defaultengine_mapreduce.go 0000664 0000000 0000000 00000020552 14265126151 0020315 0 ustar 00root root 0000000 0000000 package tensor
import (
"reflect"
"sort"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/execution"
"gorgonia.org/tensor/internal/storage"
)
func (e StdEng) Map(fn interface{}, a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, nil); err != nil {
err = errors.Wrap(err, "Failed Map()")
return
}
var reuse DenseTensor
var safe, _, incr bool
if reuse, safe, _, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return
}
switch {
case safe && reuse == nil:
// create reuse
if v, ok := a.(View); ok {
if v.IsMaterializable() {
reuse = v.Materialize().(DenseTensor)
} else {
reuse = v.Clone().(DenseTensor)
}
} else {
reuse = New(Of(a.Dtype()), WithShape(a.Shape().Clone()...))
}
case reuse != nil:
if !reuse.IsNativelyAccessible() {
return nil, errors.Errorf(inaccessibleData, reuse)
}
if a.Size() != reuse.Size() {
return nil, errors.Errorf(shapeMismatch, a.Shape(), reuse.Shape())
}
}
// PREP DATA
typ := a.Dtype().Type
var dataA, dataReuse, used *storage.Header
var ait, rit, uit Iterator
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Map")
}
// HANDLE USE CASES
switch {
case !safe:
used = dataA
uit = ait
default:
used = dataReuse
uit = rit
}
// DO
if useIter {
err = e.E.MapIter(typ, fn, used, incr, uit)
} else {
err = e.E.Map(typ, fn, used, incr)
}
if err != nil {
err = errors.Wrapf(err, "Unable to apply function %v to tensor of %v", fn, typ)
return
}
// SET RETVAL
switch {
case reuse != nil:
if err = reuseCheckShape(reuse, a.Shape()); err != nil {
err = errors.Wrapf(err, "Reuse shape check failed")
return
}
retVal = reuse
case !safe:
retVal = a
default:
retVal = reuse
}
return
}
func (e StdEng) Reduce(fn interface{}, a Tensor, axis int, defaultValue interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
if !a.IsNativelyAccessible() {
return nil, errors.Errorf(inaccessibleData, a)
}
var at, reuse DenseTensor
var dataA, dataReuse *storage.Header
if at, reuse, dataA, dataReuse, err = e.prepReduce(a, axis, opts...); err != nil {
err = errors.Wrap(err, "Prep Reduce failed")
return
}
lastAxis := a.Dims() - 1
typ := a.Dtype().Type
// actual call out to the internal engine
switch {
case (axis == 0 && at.DataOrder().IsRowMajor()) || ((axis == lastAxis || axis == len(a.Shape())-1) && at.DataOrder().IsColMajor()):
var size, split int
if at.DataOrder().IsColMajor() {
return nil, errors.Errorf("NYI: colmajor")
}
size = a.Shape()[0]
split = a.DataSize() / size
storage.CopySliced(typ, dataReuse, 0, split, dataA, 0, split)
err = e.E.ReduceFirst(typ, dataA, dataReuse, split, size, fn)
case (axis == lastAxis && at.DataOrder().IsRowMajor()) || (axis == 0 && at.DataOrder().IsColMajor()):
var dimSize int
if at.DataOrder().IsColMajor() {
return nil, errors.Errorf("NYI: colmajor")
}
dimSize = a.Shape()[axis]
err = e.E.ReduceLast(typ, dataA, dataReuse, dimSize, defaultValue, fn)
default:
dim0 := a.Shape()[0]
dimSize := a.Shape()[axis]
outerStride := a.Strides()[0]
stride := a.Strides()[axis]
expected := reuse.Strides()[0]
err = e.E.ReduceDefault(typ, dataA, dataReuse, dim0, dimSize, outerStride, stride, expected, fn)
}
retVal = reuse
return
}
func (e StdEng) OptimizedReduce(a Tensor, axis int, firstFn, lastFn, defaultFn, defaultValue interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
if !a.IsNativelyAccessible() {
return nil, errors.Errorf(inaccessibleData, a)
}
var at, reuse DenseTensor
var dataA, dataReuse *storage.Header
if at, reuse, dataA, dataReuse, err = e.prepReduce(a, axis, opts...); err != nil {
err = errors.Wrap(err, "Prep Reduce failed")
return
}
lastAxis := a.Dims() - 1
typ := a.Dtype().Type
// actual call out to the internal engine
switch {
case (axis == 0 && at.DataOrder().IsRowMajor()) || ((axis == lastAxis || axis == len(a.Shape())-1) && at.DataOrder().IsColMajor()):
var size, split int
if at.DataOrder().IsColMajor() {
return nil, errors.Errorf("NYI: colmajor")
}
size = a.Shape()[0]
split = a.DataSize() / size
storage.CopySliced(typ, dataReuse, 0, split, dataA, 0, split)
err = e.E.ReduceFirst(typ, dataA, dataReuse, split, size, firstFn)
case (axis == lastAxis && at.DataOrder().IsRowMajor()) || (axis == 0 && at.DataOrder().IsColMajor()):
var dimSize int
if at.DataOrder().IsColMajor() {
return nil, errors.Errorf("NYI: colmajor")
}
dimSize = a.Shape()[axis]
err = e.E.ReduceLast(typ, dataA, dataReuse, dimSize, defaultValue, lastFn)
default:
dim0 := a.Shape()[0]
dimSize := a.Shape()[axis]
outerStride := a.Strides()[0]
stride := a.Strides()[axis]
expected := reuse.Strides()[0]
err = e.E.ReduceDefault(typ, dataA, dataReuse, dim0, dimSize, outerStride, stride, expected, defaultFn)
}
retVal = reuse
return
}
func (e StdEng) Sum(a Tensor, along ...int) (retVal Tensor, err error) {
a2 := a
if v, ok := a.(View); ok && v.IsMaterializable() {
a2 = v.Materialize()
}
return e.reduce("Sum", execution.MonotonicSum, execution.SumMethods, a2, along...)
}
func (e StdEng) Min(a Tensor, along ...int) (retVal Tensor, err error) {
a2 := a
if v, ok := a.(View); ok && v.IsMaterializable() {
a2 = v.Materialize()
}
return e.reduce("Min", execution.MonotonicMin, execution.MinMethods, a2, along...)
}
func (e StdEng) Max(a Tensor, along ...int) (retVal Tensor, err error) {
a2 := a
if v, ok := a.(View); ok && v.IsMaterializable() {
a2 = v.Materialize()
}
return e.reduce("Max", execution.MonotonicMax, execution.MaxMethods, a2, along...)
}
func (e StdEng) reduce(
op string,
monotonicMethod func(t reflect.Type, a *storage.Header) (interface{}, error),
methods func(t reflect.Type) (interface{}, interface{}, interface{}, error),
a Tensor,
along ...int) (retVal Tensor, err error) {
switch at := a.(type) {
case *Dense:
hdr := at.hdr()
typ := at.t.Type
monotonic, incr1 := IsMonotonicInts(along) // if both are true, then it means all axes are accounted for, then it'll return a scalar value
if (monotonic && incr1 && len(along) == a.Dims()) || len(along) == 0 {
var ret interface{}
if ret, err = monotonicMethod(typ, hdr); err != nil {
return
}
return New(FromScalar(ret)), nil
}
var firstFn, lastFn, defaultFn interface{}
if firstFn, lastFn, defaultFn, err = methods(typ); err != nil {
return
}
defaultVal := reflect.Zero(typ).Interface()
retVal = a
dimsReduced := 0
sort.Slice(along, func(i, j int) bool { return along[i] < along[j] })
for _, axis := range along {
axis -= dimsReduced
dimsReduced++
if axis >= retVal.Dims() {
err = errors.Errorf(dimMismatch, retVal.Dims(), axis)
return
}
if retVal, err = e.OptimizedReduce(retVal, axis, firstFn, lastFn, defaultFn, defaultVal); err != nil {
return
}
}
return
default:
return nil, errors.Errorf("Cannot perform %s on %T", op, a)
}
}
func (StdEng) prepReduce(a Tensor, axis int, opts ...FuncOpt) (at, reuse DenseTensor, dataA, dataReuse *storage.Header, err error) {
if axis >= a.Dims() {
err = errors.Errorf(dimMismatch, axis, a.Dims())
return
}
if err = unaryCheck(a, nil); err != nil {
err = errors.Wrap(err, "prepReduce failed")
return
}
// FUNC PREP
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
err = errors.Wrap(err, "Unable to prep unary tensor")
return
}
var newShape Shape
for i, s := range a.Shape() {
if i == axis {
continue
}
newShape = append(newShape, s)
}
switch {
case !safe:
err = errors.New("Reduce only supports safe operations.")
return
case reuse != nil && !reuse.IsNativelyAccessible():
err = errors.Errorf(inaccessibleData, reuse)
return
case reuse != nil:
if reuse.Shape().TotalSize() != newShape.TotalSize() {
err = errors.Errorf(shapeMismatch, reuse.Shape(), newShape)
return
}
reuse.Reshape(newShape...)
case safe && reuse == nil:
reuse = New(Of(a.Dtype()), WithShape(newShape...))
}
// DATA PREP
var useIter bool
if dataA, dataReuse, _, _, useIter, err = prepDataUnary(a, reuse); err != nil {
err = errors.Wrapf(err, "StdEng.Reduce data prep")
return
}
var ok bool
if at, ok = a.(DenseTensor); !ok || useIter {
err = errors.Errorf("Reduce does not (yet) support iterable tensors")
return
}
return
}
tensor-0.9.24/defaultengine_matop_misc.go 0000664 0000000 0000000 00000025137 14265126151 0020507 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
var (
_ Diager = StdEng{}
)
type fastcopier interface {
fastCopyDenseRepeat(t DenseTensor, d *Dense, outers, size, stride, newStride int, repeats []int) error
}
// Repeat ...
func (e StdEng) Repeat(t Tensor, axis int, repeats ...int) (Tensor, error) {
switch tt := t.(type) {
case DenseTensor:
newShape, newRepeats, newAxis, size, err := e.denseRepeatCheck(t, axis, repeats)
if err != nil {
return nil, err
}
rr := recycledDense(t.Dtype(), newShape, WithEngine(StdEng{}))
return e.denseRepeat(tt, rr, newShape, newAxis, size, newRepeats)
default:
return nil, errors.Errorf("NYI")
}
}
// RepeatReuse is like Repeat, but with a provided reuse Tensor. The reuseTensor must be of the same type as the input t.
func (e StdEng) RepeatReuse(t Tensor, reuse Tensor, axis int, repeats ...int) (Tensor, error) {
switch tt := t.(type) {
case DenseTensor:
newShape, newRepeats, newAxis, size, err := e.denseRepeatCheck(t, axis, repeats)
if err != nil {
return nil, err
}
rr, ok := reuse.(DenseTensor)
if !ok {
return nil, errors.Errorf("t is a DenseTensor but reuse is of %T", reuse)
}
if !reuse.Shape().Eq(newShape) {
return nil, errors.Errorf("Reuse shape is %v. Expected shape is %v", reuse.Shape(), newShape)
}
return e.denseRepeat(tt, rr, newShape, newAxis, size, newRepeats)
default:
return nil, errors.Errorf("NYI")
}
}
func (StdEng) denseRepeatCheck(t Tensor, axis int, repeats []int) (newShape Shape, newRepeats []int, newAxis, size int, err error) {
if newShape, newRepeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil {
return nil, nil, -1, -1, errors.Wrap(err, "Unable to get repeated shape")
}
newAxis = axis
if axis == AllAxes {
newAxis = 0
}
return
}
func (StdEng) denseRepeat(t, reuse DenseTensor, newShape Shape, axis, size int, repeats []int) (retVal DenseTensor, err error) {
d, err := assertDense(reuse)
if err != nil {
return nil, errors.Wrapf(err, "Repeat reuse is not a *Dense")
}
var outers int
if t.IsScalar() {
outers = 1
} else {
outers = ProdInts(t.Shape()[0:axis])
}
var stride, newStride int
if newShape.IsVector() || t.IsVector() {
stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector
} else {
stride = t.ostrides()[axis]
}
if newShape.IsVector() {
newStride = 1
} else {
newStride = d.ostrides()[axis]
}
var destStart, srcStart int
// fastCopy is not bypassing the copyDenseSliced method to populate the output tensor
var fastCopy bool
var fce fastcopier
// we need an engine for fastCopying...
e := t.Engine()
// e can never be nil. Error would have occurred elsewhere
var ok bool
if fce, ok = e.(fastcopier); ok {
fastCopy = true
}
// In this case, let's not implement the fast copy to keep the code readable
if ms, ok := t.(MaskedTensor); ok && ms.IsMasked() {
fastCopy = false
}
// if d is not a fastcopier, then we also cannot use fast copy
if _, ok := d.Engine().(fastcopier); !ok {
fastCopy = false
}
if fastCopy {
if err := fce.fastCopyDenseRepeat(t, d, outers, size, stride, newStride, repeats); err != nil {
return nil, err
}
return d, nil
}
for i := 0; i < outers; i++ {
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
for k := 0; k < tmp; k++ {
if srcStart >= t.len() || destStart+stride > d.len() {
break
}
copyDenseSliced(d, destStart, d.len(), t, srcStart, t.len())
destStart += newStride
}
srcStart += stride
}
}
return d, nil
}
func (e StdEng) fastCopyDenseRepeat(src DenseTensor, dest *Dense, outers, size, stride, newStride int, repeats []int) error {
sarr := src.arr()
darr := dest.arr()
var destStart, srcStart int
for i := 0; i < outers; i++ {
// faster shortcut for common case.
//
// Consider a case where:
// a := ⎡ 1 ⎤
// ⎢ 2 ⎥
// ⎢ 3 ⎥
// ⎣ 4 ⎦
// a has a shape of (4, 1). it is a *Dense.
//
// Now assume we want to repeat it on axis 1, 3 times. We want to repeat it into `b`,
// which is already allocated and zeroed, as shown below
//
// b := ⎡ 0 0 0 ⎤
// ⎢ 0 0 0 ⎥
// ⎢ 0 0 0 ⎥
// ⎣ 0 0 0 ⎦
//
// Now, both `a` and `b` have a stride of 1.
//
// The desired result is:
// b := ⎡ 1 1 1 ⎤
// ⎢ 2 2 2 ⎥
// ⎢ 3 3 3 ⎥
// ⎣ 4 4 4 ⎦
///
// Observe that this is simply broadcasting (copying) a[0] (a scalar value) to the row b[0], and so on and so forth.
// This can be done without knowing the full type - we simply copy the bytes over.
if stride == 1 && newStride == 1 {
for sz := 0; sz < size; sz++ {
tmp := repeats[sz]
// first we get the bounds of the src and the dest
// the srcStart and destStart are the indices assuming a flat array of []T
// we need to get the byte slice equivalent.
bSrcStart := srcStart * int(sarr.t.Size())
bSrcEnd := (srcStart + stride) * int(sarr.t.Size())
bDestStart := destStart * int(darr.t.Size())
bDestEnd := (destStart + tmp) * int(darr.t.Size())
// then we get the data as a slice of raw bytes
sBS := sarr.Header.Raw
dBS := darr.Header.Raw
// recall that len(src) < len(dest)
// it's easier to understand if we define the ranges.
// Less prone to errors.
sRange := sBS[bSrcStart:bSrcEnd]
dRange := dBS[bDestStart:bDestEnd]
// finally we copy things.
for i := 0; i < len(dRange); i += len(sRange) {
copy(dRange[i:], sRange)
}
srcStart += stride
destStart += tmp
}
// we can straightaway broadcast
continue
}
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
var tSlice array
tSlice = sarr.slice(srcStart, src.len())
for k := 0; k < tmp; k++ {
if srcStart >= src.len() || destStart+stride > dest.len() {
break
}
dSlice := darr.slice(destStart, destStart+newStride)
// THIS IS AN OPTIMIZATION. REVISIT WHEN NEEDED.
storage.Copy(dSlice.t.Type, &dSlice.Header, &tSlice.Header)
destStart += newStride
}
srcStart += stride
}
}
return nil
}
// Concat tensors
func (e StdEng) Concat(t Tensor, axis int, others ...Tensor) (retVal Tensor, err error) {
switch tt := t.(type) {
case DenseTensor:
var denses []DenseTensor
if denses, err = tensorsToDenseTensors(others); err != nil {
return nil, errors.Wrap(err, "Concat failed")
}
return e.denseConcat(tt, axis, denses)
default:
return nil, errors.Errorf("NYI")
}
}
func (e StdEng) denseConcat(a DenseTensor, axis int, Ts []DenseTensor) (DenseTensor, error) {
ss := make([]Shape, len(Ts))
var err error
var isMasked bool
for i, T := range Ts {
ss[i] = T.Shape()
if mt, ok := T.(MaskedTensor); ok {
isMasked = isMasked || mt.IsMasked()
}
}
var newShape Shape
if newShape, err = a.Shape().Concat(axis, ss...); err != nil {
return nil, errors.Wrap(err, "Unable to find new shape that results from concatenation")
}
retVal := recycledDense(a.Dtype(), newShape, WithEngine(e))
if isMasked {
retVal.makeMask()
}
all := make([]DenseTensor, len(Ts)+1)
all[0] = a
copy(all[1:], Ts)
// TODO: OPIMIZATION
// When (axis == 0 && a is row major and all others is row major) || (axis == last axis of A && all tensors are colmajor)
// just flat copy
//
// isOuter is true when the axis is the outermost axis
// isInner is true when the axis is the inner most axis
isOuter := axis == 0
isInner := axis == (a.Shape().Dims() - 1)
// special case
var start, end int
for _, T := range all {
end += T.Shape()[axis]
slices := make([]Slice, axis+1)
slices[axis] = makeRS(start, end)
var v *Dense
if v, err = sliceDense(retVal, slices...); err != nil {
return nil, errors.Wrap(err, "Unable to slice DenseTensor while performing denseConcat")
}
// keep dims after slicing
switch {
case v.IsVector() && T.IsMatrix() && axis == 0:
v.reshape(v.shape[0], 1)
case T.IsRowVec() && axis == 0:
T.reshape(T.Shape()[1])
case v.Shape().IsScalarEquiv() && T.Shape().IsScalarEquiv():
copyArray(v.arrPtr(), T.arrPtr())
if mt, ok := T.(MaskedTensor); ok {
copy(v.mask, mt.Mask())
}
start = end
continue
default:
diff := retVal.Shape().Dims() - v.Shape().Dims()
if diff > 0 && isOuter {
newShape := make(Shape, v.Shape().Dims()+diff)
for i := 0; i < diff; i++ {
newShape[i] = 1
}
copy(newShape[diff:], v.Shape())
v.reshape(newShape...)
} else if diff > 0 && isInner {
newShape := v.Shape().Clone()
newStrides := v.strides
for i := 0; i < diff; i++ {
newShape = append(newShape, 1)
newStrides = append(newStrides, 1)
}
v.shape = newShape
v.strides = newStrides
} else if T.Shape()[axis] == 1 {
if err := v.unsqueeze(axis); err != nil {
return nil, errors.Wrapf(err, "Unable to keep dims after slicing a shape %v on axis %d where the size is 1", T.Shape(), axis)
}
}
}
var vmask, Tmask []bool
vmask = v.mask
v.mask = nil
if mt, ok := T.(MaskedTensor); ok && mt.IsMasked() {
Tmask = mt.Mask()
mt.SetMask(nil)
}
if err = assignArray(v, T); err != nil {
return nil, errors.Wrap(err, "Unable to assignArray in denseConcat")
}
// if it's a masked tensor, we copy the mask as well
if Tmask != nil {
if vmask != nil {
if cap(vmask) < len(Tmask) {
vmask2 := make([]bool, len(Tmask))
copy(vmask2, vmask)
vmask = vmask2
}
copy(vmask, Tmask)
v.SetMask(vmask)
}
// mt.SetMask(Tmask)
}
start = end
}
return retVal, nil
}
// Diag ...
func (e StdEng) Diag(t Tensor) (retVal Tensor, err error) {
a, ok := t.(DenseTensor)
if !ok {
return nil, errors.Errorf("StdEng only works with DenseTensor for Diagonal()")
}
if a.Dims() != 2 {
err = errors.Errorf(dimMismatch, 2, a.Dims())
return
}
if err = typeclassCheck(a.Dtype(), numberTypes); err != nil {
return nil, errors.Wrap(err, "Diagonal")
}
rstride := a.Strides()[0]
cstride := a.Strides()[1]
r := a.Shape()[0]
c := a.Shape()[1]
m := MinInt(r, c)
stride := rstride + cstride
b := a.Clone().(DenseTensor)
b.Zero()
switch a.rtype().Size() {
case 1:
bdata := b.hdr().Uint8s()
adata := a.hdr().Uint8s()
for i := 0; i < m; i++ {
bdata[i] = adata[i*stride]
}
case 2:
bdata := b.hdr().Uint16s()
adata := a.hdr().Uint16s()
for i := 0; i < m; i++ {
bdata[i] = adata[i*stride]
}
case 4:
bdata := b.hdr().Uint32s()
adata := a.hdr().Uint32s()
for i := 0; i < m; i++ {
bdata[i] = adata[i*stride]
}
case 8:
bdata := b.hdr().Uint64s()
adata := a.hdr().Uint64s()
for i := 0; i < m; i++ {
bdata[i] = adata[i*stride]
}
default:
return nil, errors.Errorf(typeNYI, "Arbitrary sized diag", t)
}
return b, nil
}
tensor-0.9.24/defaultengine_matop_stack.go 0000664 0000000 0000000 00000024036 14265126151 0020656 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
// This file contains code for the execution engine to stack tensors
func (e StdEng) StackDense(t DenseTensor, axis int, others ...DenseTensor) (retVal DenseTensor, err error) {
opdims := t.Dims()
if axis >= opdims+1 {
err = errors.Errorf(dimMismatch, opdims+1, axis)
return
}
newShape := Shape(BorrowInts(opdims + 1))
newShape[axis] = len(others) + 1
shape := t.Shape()
var cur int
for i, s := range shape {
if i == axis {
cur++
}
newShape[cur] = s
cur++
}
info := t.Info()
var newStrides []int
if info.o.IsColMajor() {
newStrides = newShape.CalcStridesColMajor()
} else {
newStrides = newShape.CalcStrides()
}
ap := MakeAP(newShape, newStrides, info.o, info.Δ)
allNoMat := !t.RequiresIterator()
for _, ot := range others {
if allNoMat && ot.RequiresIterator() {
allNoMat = false
}
}
retVal = recycledDense(t.Dtype(), ap.Shape(), WithEngine(e))
retVal.setAP(&ap)
// the "viewStack" method is the more generalized method
// and will work for all Tensors, regardless of whether it's a view
// But the simpleStack is faster, and is an optimization
if allNoMat {
retVal = e.denseSimpleStack(t, retVal, axis, others)
} else {
retVal, err = e.denseViewStack(t, retVal, axis, others)
}
return
}
func (e StdEng) denseSimpleStack(t, retVal DenseTensor, axis int, others []DenseTensor) DenseTensor {
switch axis {
case 0:
copyDense(retVal, t)
next := t.len()
for _, ot := range others {
copyDenseSliced(retVal, next, retVal.len(), ot, 0, ot.len())
next += ot.len()
}
default:
axisStride := retVal.Info().Strides()[axis]
batches := retVal.len() / axisStride
destStart := 0
start := 0
end := start + axisStride
for i := 0; i < batches; i++ {
copyDenseSliced(retVal, destStart, retVal.len(), t, start, end)
for _, ot := range others {
destStart += axisStride
copyDenseSliced(retVal, destStart, retVal.len(), ot, start, end)
i++
}
destStart += axisStride
start += axisStride
end += axisStride
}
}
return retVal
}
func (e StdEng) denseViewStack(t, retVal DenseTensor, axis int, others []DenseTensor) (DenseTensor, error) {
axisStride := retVal.Info().Strides()[axis]
batches := retVal.len() / axisStride
it := IteratorFromDense(t)
its := make([]Iterator, 0, len(others))
for _, ot := range others {
oter := IteratorFromDense(ot)
its = append(its, oter)
}
err := e.doViewStack(t, retVal, axisStride, batches, it, others, its)
return retVal, err
}
func (e StdEng) doViewStack(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) error {
switch int(t.Dtype().Size()) {
case 1:
return e.doViewStack1(t, retVal, axisStride, batches, it, others, its)
case 2:
return e.doViewStack2(t, retVal, axisStride, batches, it, others, its)
case 4:
return e.doViewStack4(t, retVal, axisStride, batches, it, others, its)
case 8:
return e.doViewStack8(t, retVal, axisStride, batches, it, others, its)
default:
return e.doViewStackArbitrary(t, retVal, axisStride, batches, it, others, its)
}
}
func (e StdEng) doViewStack1(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) {
data := retVal.hdr().Uint8s()[:0]
var mask []bool
var retIsMasked bool
if mt, ok := t.(MaskedTensor); ok {
retIsMasked = mt.IsMasked()
}
for _, ot := range others {
if mt, ok := ot.(MaskedTensor); ok {
retIsMasked = retIsMasked || mt.IsMasked()
}
}
f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) {
var tmask []bool
if mt, ok := t.(MaskedTensor); ok {
tmask = mt.Mask()
isMasked = mt.IsMasked()
}
for last = 0; last < axisStride; last++ {
id, err := it.Next()
if handleNoOp(err) != nil {
return -1, isMasked, errors.Wrap(err, "doviewStackfailed")
}
if err != nil {
break
}
data = append(data, t.hdr().Uint8s()[id])
if isMasked {
mask = append(mask, tmask[id])
}
}
return
}
for i := 0; i < batches; i++ {
var last int
var isMasked bool
if last, isMasked, err = f(t, it); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
for j, ot := range others {
if last, isMasked, err = f(ot, its[j]); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
}
}
if mt, ok := retVal.(MaskedTensor); ok {
mt.SetMask(mask)
}
return nil
}
func (e StdEng) doViewStack2(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) {
data := retVal.hdr().Uint16s()[:0]
var mask []bool
var retIsMasked bool
if mt, ok := t.(MaskedTensor); ok {
retIsMasked = mt.IsMasked()
}
for _, ot := range others {
if mt, ok := ot.(MaskedTensor); ok {
retIsMasked = retIsMasked || mt.IsMasked()
}
}
f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) {
var tmask []bool
if mt, ok := t.(MaskedTensor); ok {
tmask = mt.Mask()
isMasked = mt.IsMasked()
}
for last = 0; last < axisStride; last++ {
id, err := it.Next()
if handleNoOp(err) != nil {
return -1, isMasked, errors.Wrap(err, "doviewStackfailed")
}
if err != nil {
break
}
data = append(data, t.hdr().Uint16s()[id])
if isMasked {
mask = append(mask, tmask[id])
}
}
return
}
for i := 0; i < batches; i++ {
var last int
var isMasked bool
if last, isMasked, err = f(t, it); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
for j, ot := range others {
if last, isMasked, err = f(ot, its[j]); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
}
}
if mt, ok := retVal.(MaskedTensor); ok {
mt.SetMask(mask)
}
return nil
}
func (e StdEng) doViewStack4(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) {
data := retVal.hdr().Uint32s()[:0]
var mask []bool
var retIsMasked bool
if mt, ok := t.(MaskedTensor); ok {
retIsMasked = mt.IsMasked()
}
for _, ot := range others {
if mt, ok := ot.(MaskedTensor); ok {
retIsMasked = retIsMasked || mt.IsMasked()
}
}
f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) {
var tmask []bool
if mt, ok := t.(MaskedTensor); ok {
tmask = mt.Mask()
isMasked = mt.IsMasked()
}
for last = 0; last < axisStride; last++ {
id, err := it.Next()
if handleNoOp(err) != nil {
return -1, isMasked, errors.Wrap(err, "doviewStackfailed")
}
if err != nil {
break
}
data = append(data, t.hdr().Uint32s()[id])
if isMasked {
mask = append(mask, tmask[id])
}
}
return
}
for i := 0; i < batches; i++ {
var last int
var isMasked bool
if last, isMasked, err = f(t, it); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
for j, ot := range others {
if last, isMasked, err = f(ot, its[j]); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
}
}
if mt, ok := retVal.(MaskedTensor); ok {
mt.SetMask(mask)
}
return nil
}
func (e StdEng) doViewStack8(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) {
data := retVal.hdr().Uint64s()[:0]
var mask []bool
var retIsMasked bool
if mt, ok := t.(MaskedTensor); ok {
retIsMasked = mt.IsMasked()
}
for _, ot := range others {
if mt, ok := ot.(MaskedTensor); ok {
retIsMasked = retIsMasked || mt.IsMasked()
}
}
f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) {
var tmask []bool
if mt, ok := t.(MaskedTensor); ok {
tmask = mt.Mask()
isMasked = mt.IsMasked()
}
for last = 0; last < axisStride; last++ {
id, err := it.Next()
if handleNoOp(err) != nil {
return -1, isMasked, errors.Wrap(err, "doviewStackfailed")
}
if err != nil {
break
}
data = append(data, t.hdr().Uint64s()[id])
if isMasked {
mask = append(mask, tmask[id])
}
}
return
}
for i := 0; i < batches; i++ {
var last int
var isMasked bool
if last, isMasked, err = f(t, it); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
for j, ot := range others {
if last, isMasked, err = f(ot, its[j]); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
}
}
if mt, ok := retVal.(MaskedTensor); ok {
mt.SetMask(mask)
}
return nil
}
func (e StdEng) doViewStackArbitrary(t, retVal DenseTensor, axisStride, batches int, it Iterator, others []DenseTensor, its []Iterator) (err error) {
dt := t.Dtype()
data := retVal.hdr().Raw[:0] // truncate to 0
size := int(dt.Size())
var mask []bool
var retIsMasked bool
if mt, ok := t.(MaskedTensor); ok {
retIsMasked = mt.IsMasked()
}
for _, ot := range others {
if mt, ok := ot.(MaskedTensor); ok {
retIsMasked = retIsMasked || mt.IsMasked()
}
}
f := func(t DenseTensor, it Iterator) (last int, isMasked bool, err error) {
var tmask []bool
if mt, ok := t.(MaskedTensor); ok {
tmask = mt.Mask()
isMasked = mt.IsMasked()
}
bs := t.hdr().Raw
for last = 0; last < axisStride; last++ {
id, err := it.Next()
if handleNoOp(err) != nil {
return -1, isMasked, errors.Wrap(err, "doviewStackfailed")
}
if err != nil {
break
}
v := bs[id*size : id*size+size]
data = append(data, v...)
if isMasked {
mask = append(mask, tmask[id])
}
}
return
}
for i := 0; i < batches; i++ {
var last int
var isMasked bool
if last, isMasked, err = f(t, it); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
for j, ot := range others {
if last, isMasked, err = f(ot, its[j]); err != nil {
return
}
if retIsMasked && (!isMasked) {
mask = append(mask, make([]bool, last)...)
}
}
}
if mt, ok := retVal.(MaskedTensor); ok {
mt.SetMask(mask)
}
return nil
}
tensor-0.9.24/defaultengine_matop_transpose.go 0000664 0000000 0000000 00000006532 14265126151 0021570 0 ustar 00root root 0000000 0000000 // +build !inplacetranspose
package tensor
import (
"github.com/pkg/errors"
)
func (e StdEng) Transpose(a Tensor, expStrides []int) error {
if !a.IsNativelyAccessible() {
return errors.Errorf("Cannot Transpose() on non-natively accessible tensor")
}
if dt, ok := a.(DenseTensor); ok {
e.denseTranspose(dt, expStrides)
return nil
}
return errors.Errorf("Tranpose for tensor of %T not supported", a)
}
func (e StdEng) denseTranspose(a DenseTensor, expStrides []int) {
if a.rtype() == String.Type {
e.denseTransposeString(a, expStrides)
return
}
e.transposeMask(a)
switch a.rtype().Size() {
case 1:
e.denseTranspose1(a, expStrides)
case 2:
e.denseTranspose2(a, expStrides)
case 4:
e.denseTranspose4(a, expStrides)
case 8:
e.denseTranspose8(a, expStrides)
default:
e.denseTransposeArbitrary(a, expStrides)
}
}
func (e StdEng) transposeMask(a DenseTensor) {
if !a.(*Dense).IsMasked() {
return
}
orig := a.(*Dense).Mask()
tmp := make([]bool, len(orig))
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
tmp[j] = orig[i]
j++
}
copy(orig, tmp)
}
func (e StdEng) denseTranspose1(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u8s := tmpArr.Uint8s()
orig := a.hdr().Uint8s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u8s[j] = orig[i]
j++
}
copy(orig, u8s)
}
func (e StdEng) denseTranspose2(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u16s := tmpArr.Uint16s()
orig := a.hdr().Uint16s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u16s[j] = orig[i]
j++
}
copy(orig, u16s)
}
func (e StdEng) denseTranspose4(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u32s := tmpArr.Uint32s()
orig := a.hdr().Uint32s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u32s[j] = orig[i]
j++
}
copy(orig, u32s)
}
func (e StdEng) denseTranspose8(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u64s := tmpArr.Uint64s()
orig := a.hdr().Uint64s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u64s[j] = orig[i]
j++
}
copy(orig, u64s)
}
func (e StdEng) denseTransposeString(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
strs := tmpArr.Strings()
orig := a.hdr().Strings()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
strs[j] = orig[i]
j++
}
copy(orig, strs)
}
func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) {
rtype := a.rtype()
typeSize := int(rtype.Size())
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
// arbs := storage.AsByteSlice(tmpArr.hdr(), rtype)
arbs := tmpArr.byteSlice()
orig := a.hdr().Raw
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
srcStart := i * typeSize
srcEnd := srcStart + typeSize
dstStart := j * typeSize
dstEnd := dstStart + typeSize
copy(arbs[dstStart:dstEnd], orig[srcStart:srcEnd])
j++
}
copy(orig, arbs)
}
tensor-0.9.24/defaultengine_matop_transpose_inplace.go 0000664 0000000 0000000 00000014222 14265126151 0023256 0 ustar 00root root 0000000 0000000 // +build inplacetranspose
package tensor
import (
"github.com/pkg/errors"
)
func (e StdEng) Transpose(a Tensor, expStrides []int) error {
if !a.IsNativelyAccessible() {
return errors.Errorf("Cannot Transpose() on non-natively accessible tensor")
}
if dt, ok := a.(DenseTensor); ok {
e.denseTranspose(dt, expStrides)
return nil
}
return errors.Errorf("Tranpose for tensor of %T not supported", a)
}
func (e StdEng) denseTranspose(a DenseTensor, expStrides []int) {
if a.rtype() == String.Type {
e.denseTransposeString(a, expStrides)
return
}
e.transposeMask(a)
switch a.rtype().Size() {
case 1:
e.denseTranspose1(a, expStrides)
case 2:
e.denseTranspose2(a, expStrides)
case 4:
e.denseTranspose4(a, expStrides)
case 8:
e.denseTranspose8(a, expStrides)
default:
e.denseTransposeArbitrary(a, expStrides)
}
}
func (e StdEng) transposeMask(a DenseTensor) {
if !a.(*Dense).IsMasked() {
return
}
shape := a.Shape()
if len(shape) != 2 {
// TODO(poopoothegorilla): currently only two dimensions are implemented
return
}
n, m := shape[0], shape[1]
mask := a.(*Dense).Mask()
size := len(mask)
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1)
for i := 0; i < size; i++ {
srci := i
if track.IsSet(srci) {
continue
}
srcv := mask[srci]
for {
oc := srci % n
or := (srci - oc) / n
desti := oc*m + or
if track.IsSet(desti) {
break
}
track.Set(desti)
destv := mask[desti]
mask[desti] = srcv
srci = desti
srcv = destv
}
}
}
func (e StdEng) denseTranspose1(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
var saved, tmp byte
var i int
data := a.hdr().Uint8s()
if len(data) < 4 {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
if track.IsSet(i) && track.IsSet(dest) {
data[i] = saved
saved = 0
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
tmp = data[i]
data[i] = saved
saved = tmp
i = dest
}
}
func (e StdEng) denseTranspose2(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
var saved, tmp uint16
var i int
data := a.hdr().Uint16s()
if len(data) < 4 {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
if track.IsSet(i) && track.IsSet(dest) {
data[i] = saved
saved = 0
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
tmp = data[i]
data[i] = saved
saved = tmp
i = dest
}
}
func (e StdEng) denseTranspose4(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
var saved, tmp uint32
var i int
data := a.hdr().Uint32s()
if len(data) < 4 {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
if track.IsSet(i) && track.IsSet(dest) {
data[i] = saved
saved = 0
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
tmp = data[i]
data[i] = saved
saved = tmp
i = dest
}
}
func (e StdEng) denseTranspose8(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
var saved, tmp uint64
var i int
data := a.hdr().Uint64s()
if len(data) < 4 {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
if track.IsSet(i) && track.IsSet(dest) {
data[i] = saved
saved = 0
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
// log.Printf("i: %d start %d, end %d | tmp %v saved %v", i, start, end, tmp, saved)
tmp = data[i]
data[i] = saved
saved = tmp
i = dest
}
}
func (e StdEng) denseTransposeString(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
var saved, tmp string
var i int
data := a.hdr().Strings()
if len(data) < 4 {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
if track.IsSet(i) && track.IsSet(dest) {
data[i] = saved
saved = ""
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
tmp = data[i]
data[i] = saved
saved = tmp
i = dest
}
}
func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) {
axes := a.transposeAxes()
size := a.len()
rtype := a.rtype()
typeSize := int(rtype.Size())
// first we'll create a bit-map to track which elements have been moved to their correct places
track := NewBitMap(size)
track.Set(0)
track.Set(size - 1) // first and last element of a transposedon't change
saved := make([]byte, typeSize, typeSize)
tmp := make([]byte, typeSize, typeSize)
var i int
data := a.arr().Raw
if len(data) < 4*typeSize {
return
}
for i = 1; ; {
dest := a.transposeIndex(i, axes, expStrides)
start := typeSize * i
end := start + typeSize
if track.IsSet(i) && track.IsSet(dest) {
copy(data[start:end], saved)
for i := range saved {
saved[i] = 0
}
for i < size && track.IsSet(i) {
i++
}
if i >= size {
break
}
continue
}
track.Set(i)
copy(tmp, data[start:end])
copy(data[start:end], saved)
copy(saved, tmp)
i = dest
}
}
tensor-0.9.24/defaultengine_minmax.go 0000664 0000000 0000000 00000022455 14265126151 0017645 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
var (
_ MinBetweener = StdEng{}
_ MaxBetweener = StdEng{}
)
func (e StdEng) MinBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MinBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.MinBetween")
}
// check to see if anything needs to be created
if reuse == nil {
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
return
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MinBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
default:
panic("Unreachable")
}
return
}
func (e StdEng) MaxBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MaxBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.MaxBetween")
}
// check to see if anything needs to be created
if reuse == nil {
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
return
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MaxBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
default:
panic("Unreachable")
}
return
}
func (e StdEng) MinBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MinBetween failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "MinBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
if reuse == nil {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
return
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MinBetween(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MinBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MinBetween(typ, dataA, dataReuse)
retVal = reuse
default:
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
func (e StdEng) MaxBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MaxBetween failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "MaxBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
if reuse == nil {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
return
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MaxBetween(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MaxBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MaxBetween(typ, dataA, dataReuse)
retVal = reuse
default:
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
tensor-0.9.24/defaultengine_misc.go 0000664 0000000 0000000 00000004400 14265126151 0017275 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
func (e StdEng) Clamp(a Tensor, min, max interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, nonComplexNumberTypes); err != nil {
return nil, errors.Wrap(err, "Clamp failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), false, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Neg")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.ClampIter(typ, cloned.hdr(), ait, min, max); err != nil {
return nil, errors.Wrapf(err, "Unable to perform Clamp")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.ClampIter(typ, dataReuse, rit, min, max)
retVal = reuse
case !safe:
err = e.E.ClampIter(typ, dataA, ait, min, max)
retVal = a
default:
cloned := a.Clone().(Tensor)
err = e.E.ClampIter(typ, cloned.hdr(), ait, min, max)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Clamp(typ, cloned.hdr(), min, max); err != nil {
return nil, errors.Wrapf(err, "Unable to perform Clamp")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Clamp(typ, dataReuse, min, max)
retVal = reuse
case !safe:
err = e.E.Clamp(typ, dataA, min, max)
retVal = a
default:
cloned := a.Clone().(Tensor)
err = e.E.Clamp(typ, cloned.hdr(), min, max)
retVal = cloned
}
return
}
func (e StdEng) FMA(a, x, y Tensor) (Tensor, error) {
return e.Mul(a, x, WithIncr(y))
}
func (e StdEng) FMAScalar(a Tensor, x interface{}, y Tensor) (Tensor, error) {
return e.MulScalar(a, x, true, WithIncr(y))
}
tensor-0.9.24/defaultengine_prep.go 0000664 0000000 0000000 00000012556 14265126151 0017323 0 ustar 00root root 0000000 0000000 package tensor
import (
"reflect"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
// "log"
)
func handleFuncOpts(expShape Shape, expType Dtype, o DataOrder, strict bool, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr, same bool, err error) {
fo := ParseFuncOpts(opts...)
reuseT, incr := fo.IncrReuse()
safe = fo.Safe()
same = fo.Same()
toReuse = reuseT != nil
if toReuse {
if reuse, err = getDenseTensor(reuseT); err != nil {
returnOpOpt(fo)
err = errors.Wrapf(err, "Cannot reuse a Tensor that isn't a DenseTensor. Got %T instead", reuseT)
return
}
if reuse != nil && !reuse.IsNativelyAccessible() {
returnOpOpt(fo)
err = errors.Errorf(inaccessibleData, reuse)
return
}
if (strict || same) && reuse.Dtype() != expType {
returnOpOpt(fo)
err = errors.Errorf(typeMismatch, expType, reuse.Dtype())
err = errors.Wrapf(err, "Cannot use reuse")
return
}
if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() {
returnOpOpt(fo)
err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape)
err = errors.Wrapf(err, "Cannot use reuse: shape mismatch - reuse.len() %v, expShape.TotalSize() %v", reuse.len(), expShape.TotalSize())
return
}
if !reuse.Shape().Eq(expShape) {
cloned := expShape.Clone()
if err = reuse.Reshape(cloned...); err != nil {
return
}
ReturnInts([]int(cloned))
}
if !incr && reuse != nil {
reuse.setDataOrder(o)
// err = reuse.reshape(expShape...)
}
}
returnOpOpt(fo)
return
}
func binaryCheck(a, b Tensor, tc *typeclass) (err error) {
// check if the tensors are accessible
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
if !b.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, b)
}
at := a.Dtype()
bt := b.Dtype()
if tc != nil {
if err = typeclassCheck(at, tc); err != nil {
return errors.Wrapf(err, typeclassMismatch, "a")
}
if err = typeclassCheck(bt, tc); err != nil {
return errors.Wrapf(err, typeclassMismatch, "b")
}
}
if at.Kind() != bt.Kind() {
return errors.Errorf(typeMismatch, at, bt)
}
if !a.Shape().Eq(b.Shape()) {
return errors.Errorf(shapeMismatch, b.Shape(), a.Shape())
}
return nil
}
func unaryCheck(a Tensor, tc *typeclass) error {
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
at := a.Dtype()
if tc != nil {
if err := typeclassCheck(at, tc); err != nil {
return errors.Wrapf(err, typeclassMismatch, "a")
}
}
return nil
}
// scalarDtypeCheck checks that a scalar value has the same dtype as the dtype of a given tensor.
func scalarDtypeCheck(a Tensor, b interface{}) error {
var dt Dtype
switch bt := b.(type) {
case Dtyper:
dt = bt.Dtype()
default:
t := reflect.TypeOf(b)
dt = Dtype{t}
}
if a.Dtype() != dt {
return errors.Errorf("Expected scalar to have the same Dtype as the tensor (%v). Got %T instead ", a.Dtype(), b)
}
return nil
}
// prepDataVV prepares the data given the input and reuse tensors. It also retruns several indicators
//
// useIter indicates that the iterator methods should be used.
// swap indicates that the operands are swapped.
func prepDataVV(a, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, bit, iit Iterator, useIter, swap bool, err error) {
// get data
dataA = a.hdr()
dataB = b.hdr()
if reuse != nil {
dataReuse = reuse.hdr()
}
// iter
useIter = a.RequiresIterator() ||
b.RequiresIterator() ||
(reuse != nil && reuse.RequiresIterator()) ||
!a.DataOrder().HasSameOrder(b.DataOrder()) ||
(reuse != nil && (!a.DataOrder().HasSameOrder(reuse.DataOrder()) || !b.DataOrder().HasSameOrder(reuse.DataOrder())))
if useIter {
ait = a.Iterator()
bit = b.Iterator()
if reuse != nil {
iit = reuse.Iterator()
}
}
// swap
if _, ok := a.(*CS); ok {
if _, ok := b.(DenseTensor); ok {
swap = true
dataA, dataB = dataB, dataA
ait, bit = bit, ait
}
}
return
}
func prepDataVS(a Tensor, b interface{}, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, ait, iit Iterator, useIter bool, newAlloc bool, err error) {
// get data
dataA = a.hdr()
dataB, newAlloc = scalarToHeader(b)
if reuse != nil {
dataReuse = reuse.hdr()
}
if a.IsScalar() {
return
}
useIter = a.RequiresIterator() ||
(reuse != nil && reuse.RequiresIterator()) ||
(reuse != nil && !reuse.DataOrder().HasSameOrder(a.DataOrder()))
if useIter {
ait = a.Iterator()
if reuse != nil {
iit = reuse.Iterator()
}
}
return
}
func prepDataSV(a interface{}, b Tensor, reuse Tensor) (dataA, dataB, dataReuse *storage.Header, bit, iit Iterator, useIter bool, newAlloc bool, err error) {
// get data
dataA, newAlloc = scalarToHeader(a)
dataB = b.hdr()
if reuse != nil {
dataReuse = reuse.hdr()
}
// get iterator
if b.IsScalar() {
return
}
useIter = b.RequiresIterator() ||
(reuse != nil && reuse.RequiresIterator()) ||
(reuse != nil && !reuse.DataOrder().HasSameOrder(b.DataOrder()))
if useIter {
bit = b.Iterator()
if reuse != nil {
iit = reuse.Iterator()
}
}
return
}
func prepDataUnary(a Tensor, reuse Tensor) (dataA, dataReuse *storage.Header, ait, rit Iterator, useIter bool, err error) {
// get data
dataA = a.hdr()
if reuse != nil {
dataReuse = reuse.hdr()
}
// get iterator
if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) {
ait = a.Iterator()
if reuse != nil {
rit = reuse.Iterator()
}
useIter = true
}
return
}
tensor-0.9.24/defaultengine_selbyidx.go 0000664 0000000 0000000 00000016613 14265126151 0020176 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
"reflect"
)
// SelectByIndices selects the values given the in `indices` tensor.
//
// Currently SelectByIndices only supports Dense tensors that do not require the use of iterators.
// Please make a pull request to support tensors that require the use of an iterator to traverse data.
func (e StdEng) SelectByIndices(a, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if !indices.Shape().IsVectorLike() {
return nil, errors.Errorf("Expected indices to be a vector. Got %v instead", indices.Shape())
}
if indices.Dtype() != Int {
return nil, errors.Errorf("Expected indices to be a vector of ints. Got %v instead", indices.Dtype())
}
// if b is a scalar, then use Slice
if a.Shape().IsScalarEquiv() {
slices := make([]Slice, a.Shape().Dims())
slices[axis] = ss(getInts(indices)[0])
return a.Slice(slices...)
}
expectedShape := a.Shape().Clone()
expectedShape[axis] = indices.Shape().TotalSize()
var reuse DenseTensor
var safe, toReuse, _ bool
if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if safe || !toReuse && reuse == nil && safe {
// create reuse
reuse = New(WithShape(expectedShape...), Of(a.Dtype()))
}
if !safe {
if a.Shape()[axis] != indices.Shape().TotalSize() {
expected := a.Shape().Clone()
expected[axis] = indices.Shape().TotalSize()
return nil, errors.Errorf("Expected a safe resuse to have the same shape as the expected shape of the result: %v. The input a has %v ", expected, a.Shape())
}
reuse = a.(DenseTensor)
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, indices, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Add")
}
if useIter {
e.iterSelectByIdx(axis, dataA, dataB, dataReuse, ait, bit, iit)
//TODO
return
}
e.selectByIdx(axis, dataB.Ints(), typ, dataA, dataReuse, a.(*Dense).AP, reuse.(*Dense).AP)
return reuse, nil
}
func (e StdEng) iterSelectByIdx(axis int, dataA, dataB, dataReuse *storage.Header, ait, bit, iit Iterator) {
panic("iterSelectByIdx is not yet implemented")
}
func (e StdEng) selectByIdx(axis int, indices []int, typ reflect.Type, dataA, dataRetVal *storage.Header, apA, apRet AP) {
isInnermost := axis == apA.shape.Dims()-1
outer := ProdInts(apA.shape[:axis])
axStride := apA.strides[axis]
retStride := apRet.strides[axis]
var outerRetStride int
if axis == 0 {
// then it's the outermost
outerRetStride = apRet.strides[axis] * 2
} else {
outerRetStride = apRet.strides[axis-1]
}
srcCoord := make([]int, apA.shape.Dims())
dstCoord := make([]int, apRet.shape.Dims())
if isInnermost {
prevAxis := axis - 1
if prevAxis < 0 {
// this may be the case if input is a vector
prevAxis = 0
}
prevStride := apA.strides[prevAxis]
retPrevStride := apRet.strides[prevAxis]
for i, idx := range indices {
srcCoord[axis] = idx
dstCoord[axis] = i
start, _ := Ltoi(apA.shape, apA.strides, srcCoord...)
dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...)
for o := 0; o < outer; o++ {
end := start + axStride
dstEnd := dstStart + retStride
storage.CopySliced(typ, dataRetVal, dstStart, dstEnd, dataA, start, end)
start += prevStride
dstStart += retPrevStride
}
}
return
}
for i, idx := range indices {
srcCoord[axis] = idx
dstCoord[axis] = i
start, _ := Ltoi(apA.shape, apA.strides, srcCoord...)
dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...)
for o := 0; o < outer; o++ {
end := start + axStride
dstEnd := dstStart + retStride
storage.CopySliced(typ, dataRetVal, dstStart, dstEnd, dataA, start, end)
start = end + axStride
dstStart = dstEnd + (outerRetStride - retStride)
}
}
}
// SelectByIndicesB computes the gradient of the result of `SelectByIndices`.
//
// Currently SelectByIndicesB only supports Dense tensors that do not require the use of iterators.
// Please make a pull request to support tensors that require the use of an iterator to traverse data.
func (e StdEng) SelectByIndicesB(input, outGrad, indices Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if !indices.Shape().IsVectorLike() {
return nil, errors.Errorf("Expected indices to be a vector. Got %v instead", outGrad.Shape())
}
if indices.Dtype() != Int {
return nil, errors.Errorf("Expected indices to be a vector of ints. Got %v instead", outGrad.Dtype())
}
// if b is a scalar, then use Slice
if input.Shape().IsScalarEquiv() {
slices := make([]Slice, input.Shape().Dims())
slices[axis] = ss(outGrad.Data().([]int)[0])
return input.Slice(slices...)
}
expectedShape := input.Shape().Clone()
var reuse DenseTensor
var _, toReuse, _ bool
if reuse, _, toReuse, _, _, err = handleFuncOpts(input.Shape(), input.Dtype(), input.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if !toReuse && reuse == nil {
// create reuse
reuse = New(WithShape(expectedShape...), Of(input.Dtype()))
}
typ := input.Dtype().Type
var _, dataB, dataReuse *storage.Header
var _, bit, iit Iterator
var useIter bool
if _, dataB, dataReuse, _, bit, iit, useIter, _, err = prepDataVV(input, outGrad, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.SelectByIndicesB")
}
if useIter {
e.iterSelectByIndicesB(axis, dataB, dataReuse, bit, iit)
//TODO
return
}
e.selectByIndicesB(axis, getInts(indices), typ, dataB, dataReuse, outGrad.(*Dense).AP, reuse.(*Dense).AP)
return reuse, nil
}
func (e StdEng) iterSelectByIndicesB(axis int, dataB, dataGradA *storage.Header, bit, iit Iterator) {
panic("iterSelectByIndicesB not implemented yet")
}
func (e StdEng) selectByIndicesB(axis int, indices []int, typ reflect.Type, dataB, dataGradA *storage.Header, apB, apRet AP) {
isInnermost := axis == apB.shape.Dims()-1
outer := ProdInts(apB.shape[:axis])
axStride := apB.strides[axis]
retStride := apRet.strides[axis]
var outerRetStride int
if axis == 0 {
outerRetStride = apRet.strides[axis] * 2
} else {
outerRetStride = apRet.strides[axis-1]
}
dstCoord := make([]int, apB.shape.Dims())
srcCoord := make([]int, apRet.shape.Dims())
if isInnermost {
prevAxis := axis - 1
if prevAxis < 0 {
// this may be the case if input is a vector
prevAxis = 0
}
retPrevStride := apB.strides[prevAxis]
prevStride := apRet.strides[prevAxis]
for i, idx := range indices {
dstCoord[axis] = idx
srcCoord[axis] = i
dstStart, _ := Ltoi(apB.shape, apB.strides, dstCoord...)
start, _ := Ltoi(apRet.shape, apRet.strides, srcCoord...)
for o := 0; o < outer; o++ {
dstEnd := dstStart + axStride
end := start + retStride
e.E.AddSliced(typ, dataGradA, dstStart, dstEnd, dataB, start, end)
dstStart += prevStride
start += retPrevStride
}
}
return
}
for i, idx := range indices {
dstCoord[axis] = idx
srcCoord[axis] = i
dstStart, _ := Ltoi(apRet.shape, apRet.strides, dstCoord...)
start, _ := Ltoi(apB.shape, apB.strides, srcCoord...)
for o := 0; o < outer; o++ {
dstEnd := dstStart + axStride
end := start + retStride
e.E.AddSliced(typ, dataGradA, dstStart, dstEnd, dataB, start, end)
dstStart = dstEnd + axStride
start = end + (outerRetStride - retStride)
}
}
}
tensor-0.9.24/defaultengine_softmax.go 0000664 0000000 0000000 00000036656 14265126151 0020045 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"math"
"sync"
"github.com/chewxy/math32"
"github.com/pkg/errors"
)
// if dims = 2 and axis -1 it returns the last dimension. In this case 1
func resolveAxis(axis int, dims int) int {
res := axis % dims
if (res < 0 && dims > 0) || (res > 0 && dims < 0) {
return res + dims
}
return res
}
// SoftMax performs the softmax operation on the given tensor. Currently it expects the tensor to be a Dense tensor.
// Please make a pull request to support sparse tensors.
//
// The softmax function is defined as :
// σ(x) = e^x_i / Σ(e^x_i)
func (e StdEng) SoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
axis = resolveAxis(axis, x.Dims())
expectedShape := x.Shape()
var reuse DenseTensor
var safe, toReuse, _ bool
if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, x.Dtype(), x.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if safe || !toReuse && reuse == nil && safe {
// create reuse
reuse = New(WithShape(expectedShape...), Of(x.Dtype()))
}
switch x.Dtype() {
case Float32:
if expectedShape.Dims()-1 == axis {
e.softMaxLastDimF32(reuse, x, axis, false)
} else {
e.softMaxInnerDimF32(reuse, x, axis, false)
}
case Float64:
if expectedShape.Dims()-1 == axis {
e.softMaxLastDimF64(reuse, x, axis, false)
} else {
e.softMaxInnerDimF64(reuse, x, axis, false)
}
default:
return nil, fmt.Errorf("type %v not supported", x.Dtype())
}
return reuse, nil
}
// SoftMaxB computes gradient of the input `x`, given the `output = SoftMax(x)` and its associated gradient. Currently it expects the tensor to be a Dense tensor.
// Please make a pull request to support sparse tensors.
func (e StdEng) SoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if !output.Shape().Eq(grad.Shape()) {
return nil, fmt.Errorf("output and grad shapes don't match")
}
if !output.Dtype().Eq(grad.Dtype()) {
return nil, fmt.Errorf("output and grad types don't match")
}
axis = resolveAxis(axis, output.Dims())
expectedShape := output.Shape()
var reuse DenseTensor
var safe, toReuse, _ bool
if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, output.Dtype(), output.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if safe || !toReuse && reuse == nil && safe {
// create reuse
reuse = New(WithShape(expectedShape...), Of(output.Dtype()))
}
switch output.Dtype() {
case Float32:
if expectedShape.Dims()-1 == axis {
e.softMaxBLastDimF32(reuse, output, grad, axis, false)
} else {
e.softMaxBInnerDimF32(reuse, output, grad, axis, false)
}
case Float64:
if expectedShape.Dims()-1 == axis {
e.softMaxBLastDimF64(reuse, output, grad, axis, false)
} else {
e.softMaxBInnerDimF64(reuse, output, grad, axis, false)
}
default:
return nil, fmt.Errorf("type %v not supported", output.Dtype())
}
return reuse, nil
}
// LogSoftMax performs softmax but in log space. This provides some amount of numerical stabilization.
// Conceptually it is the same as performing a logarithm after applying the softmax function.
// Currently it expects the tensor to be a Dense tensor.
// Please make a pull request to support sparse tensors.
func (e StdEng) LogSoftMax(x Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
axis = resolveAxis(axis, x.Dims())
expectedShape := x.Shape()
var reuse DenseTensor
var safe, toReuse, _ bool
if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, x.Dtype(), x.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if safe || !toReuse && reuse == nil && safe {
// create reuse
reuse = New(WithShape(expectedShape...), Of(x.Dtype()))
}
switch x.Dtype() {
case Float32:
if expectedShape.Dims()-1 == axis {
e.softMaxLastDimF32(reuse, x, axis, true)
} else {
e.softMaxInnerDimF32(reuse, x, axis, true)
}
case Float64:
if expectedShape.Dims()-1 == axis {
e.softMaxLastDimF64(reuse, x, axis, true)
} else {
e.softMaxInnerDimF64(reuse, x, axis, true)
}
default:
return nil, fmt.Errorf("type %v not supported", x.Dtype())
}
return reuse, nil
}
// LogSoftMaxB computes the gradient of the input `x`, given the `output = LogSoftmax(x)` and its associated gradient.
// Currently it expects the tensor to be a Dense tensor.
// Please make a pull request to support sparse tensors.
func (e StdEng) LogSoftMaxB(output, grad Tensor, axis int, opts ...FuncOpt) (retVal Tensor, err error) {
if !output.Shape().Eq(grad.Shape()) {
return nil, fmt.Errorf("output and grad shapes don't match")
}
if !output.Dtype().Eq(grad.Dtype()) {
return nil, fmt.Errorf("output and grad types don't match")
}
axis = resolveAxis(axis, output.Dims())
expectedShape := output.Shape()
var reuse DenseTensor
var safe, toReuse, _ bool
if reuse, safe, toReuse, _, _, err = handleFuncOpts(expectedShape, output.Dtype(), output.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if safe || !toReuse && reuse == nil && safe {
// create reuse
reuse = New(WithShape(expectedShape...), Of(output.Dtype()))
}
switch output.Dtype() {
case Float32:
if expectedShape.Dims()-1 == axis {
e.softMaxBLastDimF32(reuse, output, grad, axis, true)
} else {
e.softMaxBInnerDimF32(reuse, output, grad, axis, true)
}
case Float64:
if expectedShape.Dims()-1 == axis {
e.softMaxBLastDimF64(reuse, output, grad, axis, true)
} else {
e.softMaxBInnerDimF64(reuse, output, grad, axis, true)
}
default:
return nil, fmt.Errorf("type %v not supported", output.Dtype())
}
return reuse, nil
}
func (e StdEng) softMaxLastDimF64(output Tensor, x Tensor, axis int, logSoftMax bool) {
outputArr := getFloat64s(output)
xArr := getFloat64s(x)
xShape := x.Shape()
outerSize := 1
dimSize := xShape[axis]
for i := 0; i < axis; i++ {
outerSize *= xShape[i]
}
var wg sync.WaitGroup
for ii := 0; ii < outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
maxInput := xArr[0]
for j := 1; j < dimSize; j++ {
i := ii*dimSize + j
if xArr[i] > maxInput {
maxInput = xArr[i]
}
}
sumExp := float64(0.0)
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
z := xArr[i] - maxInput
exp := math.Exp(z)
if logSoftMax {
outputArr[i] = z
} else {
outputArr[i] = exp
}
sumExp += exp
}
if !logSoftMax {
sumExp = 1 / sumExp
}
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
if logSoftMax {
outputArr[i] -= math.Log(sumExp)
} else {
outputArr[i] *= sumExp
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
func (e StdEng) softMaxBLastDimF64(inputGrad, output, grad Tensor, axis int, logSoftMax bool) {
dx := getFloat64s(inputGrad)
outputArr := getFloat64s(output)
gradArr := getFloat64s(grad)
outputShape := output.Shape()
outerSize := 1
dimSize := outputShape[axis]
for i := 0; i < axis; i++ {
outerSize *= outputShape[i]
}
var wg sync.WaitGroup
for ii := 0; ii < outerSize; ii++ {
wg.Add(1)
if logSoftMax {
go func(gradArr, dx []float64, ii int, wg *sync.WaitGroup) {
sum := gradArr[ii*dimSize]
for j := 1; j < dimSize; j++ {
i := ii*dimSize + j
sum += gradArr[i]
}
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
dx[i] = gradArr[i] - (math.Exp(outputArr[i]) * sum)
}
wg.Done()
}(gradArr, dx, ii, &wg)
} else {
go func(outputArr, gradArr, dx []float64, ii int, wg *sync.WaitGroup) {
//mul := make([]float64, dimSize)
var sum float64
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
//mul[j] = outputArr[i] * gradArr[i]
sum += outputArr[i] * gradArr[i]
}
// sum := mul[0]
// for j := 1; j < dimSize; j++ {
// sum += mul[j]
// }
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
dx[i] = (gradArr[i] - sum) * outputArr[i]
}
wg.Done()
}(outputArr, gradArr, dx, ii, &wg)
}
}
wg.Wait()
}
func (e StdEng) softMaxInnerDimF64(output Tensor, x Tensor, axis int, logSoftmax bool) {
xShape := x.Shape()
innerSize, outerSize := 1, 1
for i := 0; i < axis; i++ {
outerSize *= xShape[i]
}
for i := axis + 1; i < xShape.Dims(); i++ {
innerSize *= xShape[i]
}
dimSize := xShape[axis]
dimStride := innerSize
outerStride := dimSize * dimStride
outputArr := getFloat64s(output)
xArr := getFloat64s(x)
var wg sync.WaitGroup
for ii := 0; ii < innerSize*outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
outerIndex, innerIndex := divmod(ii, innerSize)
inputPart := xArr[outerIndex*outerStride+innerIndex:]
outputPart := outputArr[outerIndex*outerStride+innerIndex:]
maxInput := inputPart[0]
for j := 1; j < dimSize; j++ {
i := j * dimStride
if inputPart[i] > maxInput {
maxInput = inputPart[i]
}
}
sumExp := 0.0
for j := 0; j < dimSize; j++ {
i := j * dimStride
exp := math.Exp(inputPart[i] - maxInput)
if !logSoftmax {
outputPart[i] = exp
}
sumExp += exp
}
if logSoftmax {
sumExp = math.Log(sumExp)
} else {
sumExp = 1 / sumExp
}
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
outputPart[i] = inputPart[i] - maxInput - sumExp
} else {
outputPart[i] *= sumExp
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
func (e StdEng) softMaxBInnerDimF64(inputGrad, output, grad Tensor, axis int, logSoftmax bool) {
dxShape := inputGrad.Shape()
innerSize, outerSize := 1, 1
for i := 0; i < axis; i++ {
outerSize *= dxShape[i]
}
for i := axis + 1; i < dxShape.Dims(); i++ {
innerSize *= dxShape[i]
}
dimSize := dxShape[axis]
dimStride := innerSize
outerStride := dimSize * dimStride
dxArr := getFloat64s(inputGrad)
outputArr := getFloat64s(output)
gradArr := getFloat64s(grad)
var wg sync.WaitGroup
for ii := 0; ii < innerSize*outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
outerIndex, innerIndex := divmod(ii, innerSize)
gradPart := gradArr[outerIndex*outerStride+innerIndex:]
dxPart := dxArr[outerIndex*outerStride+innerIndex:]
outputPart := outputArr[outerIndex*outerStride+innerIndex:]
sum := 0.0
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
sum += gradPart[i]
} else {
sum += gradPart[i] * outputPart[i]
}
}
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
dxPart[i] = gradPart[i] - math.Exp(outputPart[i])*sum
} else {
dxPart[i] = outputPart[i] * (gradPart[i] - sum)
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
func (e StdEng) softMaxLastDimF32(output Tensor, x Tensor, axis int, logSoftMax bool) {
outputArr := getFloat32s(output)
xArr := getFloat32s(x)
xShape := x.Shape()
outerSize := 1
dimSize := xShape[axis]
for i := 0; i < axis; i++ {
outerSize *= xShape[i]
}
var wg sync.WaitGroup
for ii := 0; ii < outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
maxInput := xArr[0]
for j := 1; j < dimSize; j++ {
i := ii*dimSize + j
if xArr[i] > maxInput {
maxInput = xArr[i]
}
}
sumExp := float32(0.0)
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
z := xArr[i] - maxInput
exp := math32.Exp(z)
if logSoftMax {
outputArr[i] = z
} else {
outputArr[i] = exp
}
sumExp += exp
}
if !logSoftMax {
sumExp = 1 / sumExp
}
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
if logSoftMax {
outputArr[i] -= math32.Log(sumExp)
} else {
outputArr[i] *= sumExp
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
func (e StdEng) softMaxBLastDimF32(inputGrad, output, grad Tensor, axis int, logSoftMax bool) {
dx := getFloat32s(inputGrad)
outputArr := getFloat32s(output)
gradArr := getFloat32s(grad)
outputShape := output.Shape()
outerSize := 1
dimSize := outputShape[axis]
for i := 0; i < axis; i++ {
outerSize *= outputShape[i]
}
var wg sync.WaitGroup
for ii := 0; ii < outerSize; ii++ {
wg.Add(1)
if logSoftMax {
go func(ii int, wg *sync.WaitGroup) {
sum := gradArr[ii*dimSize]
for j := 1; j < dimSize; j++ {
i := ii*dimSize + j
sum += gradArr[i]
}
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
dx[i] = gradArr[i] - (math32.Exp(outputArr[i]) * sum)
}
wg.Done()
}(ii, &wg)
} else {
go func(ii int, wg *sync.WaitGroup) {
//mul := make([]float32, dimSize)
var sum float32
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
//mul[j] = outputArr[i] * gradArr[i]
sum += outputArr[i] * gradArr[i]
}
// sum := mul[0]
// for j := 1; j < dimSize; j++ {
// sum += mul[j]
// }
for j := 0; j < dimSize; j++ {
i := ii*dimSize + j
dx[i] = (gradArr[i] - sum) * outputArr[i]
}
wg.Done()
}(ii, &wg)
}
}
wg.Wait()
}
func (e StdEng) softMaxInnerDimF32(output Tensor, x Tensor, axis int, logSoftmax bool) {
xShape := x.Shape()
innerSize, outerSize := 1, 1
for i := 0; i < axis; i++ {
outerSize *= xShape[i]
}
for i := axis + 1; i < xShape.Dims(); i++ {
innerSize *= xShape[i]
}
dimSize := xShape[axis]
dimStride := innerSize
outerStride := dimSize * dimStride
outputArr := getFloat32s(output)
xArr := getFloat32s(x)
var wg sync.WaitGroup
for ii := 0; ii < innerSize*outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
outerIndex, innerIndex := divmod(ii, innerSize)
inputPart := xArr[outerIndex*outerStride+innerIndex:]
outputPart := outputArr[outerIndex*outerStride+innerIndex:]
maxInput := inputPart[0]
for j := 1; j < dimSize; j++ {
i := j * dimStride
if inputPart[i] > maxInput {
maxInput = inputPart[i]
}
}
sumExp := float32(0.0)
for j := 0; j < dimSize; j++ {
i := j * dimStride
exp := math32.Exp(inputPart[i] - maxInput)
if !logSoftmax {
outputPart[i] = exp
}
sumExp += exp
}
if logSoftmax {
sumExp = math32.Log(sumExp)
} else {
sumExp = 1 / sumExp
}
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
outputPart[i] = inputPart[i] - maxInput - sumExp
} else {
outputPart[i] *= sumExp
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
func (e StdEng) softMaxBInnerDimF32(inputGrad, output, grad Tensor, axis int, logSoftmax bool) {
dxShape := inputGrad.Shape()
innerSize, outerSize := 1, 1
for i := 0; i < axis; i++ {
outerSize *= dxShape[i]
}
for i := axis + 1; i < dxShape.Dims(); i++ {
innerSize *= dxShape[i]
}
dimSize := dxShape[axis]
dimStride := innerSize
outerStride := dimSize * dimStride
dxArr := getFloat32s(inputGrad)
outputArr := getFloat32s(output)
gradArr := getFloat32s(grad)
var wg sync.WaitGroup
for ii := 0; ii < innerSize*outerSize; ii++ {
wg.Add(1)
go func(ii int, wg *sync.WaitGroup) {
outerIndex, innerIndex := divmod(ii, innerSize)
gradPart := gradArr[outerIndex*outerStride+innerIndex:]
dxPart := dxArr[outerIndex*outerStride+innerIndex:]
outputPart := outputArr[outerIndex*outerStride+innerIndex:]
sum := float32(0.0)
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
sum += gradPart[i]
} else {
sum += gradPart[i] * outputPart[i]
}
}
for j := 0; j < dimSize; j++ {
i := j * dimStride
if logSoftmax {
dxPart[i] = gradPart[i] - math32.Exp(outputPart[i])*sum
} else {
dxPart[i] = outputPart[i] * (gradPart[i] - sum)
}
}
wg.Done()
}(ii, &wg)
}
wg.Wait()
}
tensor-0.9.24/defaultengine_unary.go 0000664 0000000 0000000 00000062273 14265126151 0017514 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
func (e StdEng) Neg(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, numberTypes); err != nil {
err = errors.Wrapf(err, "Neg failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Neg")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.NegIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Neg")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.NegIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.NegIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.NegIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Neg(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Neg")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Neg(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Neg(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Neg(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Inv(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, numberTypes); err != nil {
err = errors.Wrapf(err, "Inv failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Inv")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.InvIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Inv")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.InvIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.InvIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.InvIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Inv(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Inv")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Inv(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Inv(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Inv(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Square(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, numberTypes); err != nil {
err = errors.Wrapf(err, "Square failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Square")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.SquareIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Square")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.SquareIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.SquareIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.SquareIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Square(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Square")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Square(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Square(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Square(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Cube(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, numberTypes); err != nil {
err = errors.Wrapf(err, "Cube failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Cube")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.CubeIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Cube")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.CubeIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.CubeIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.CubeIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Cube(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Cube")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Cube(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Cube(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Cube(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Exp(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatcmplxTypes); err != nil {
err = errors.Wrapf(err, "Exp failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Exp")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.ExpIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Exp")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.ExpIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.ExpIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.ExpIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Exp(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Exp")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Exp(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Exp(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Exp(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Tanh(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatcmplxTypes); err != nil {
err = errors.Wrapf(err, "Tanh failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Tanh")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.TanhIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Tanh")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.TanhIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.TanhIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.TanhIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Tanh(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Tanh")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Tanh(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Tanh(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Tanh(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Log(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatcmplxTypes); err != nil {
err = errors.Wrapf(err, "Log failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Log")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.LogIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.LogIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.LogIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.LogIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Log(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Log(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Log(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Log(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Log2(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatTypes); err != nil {
err = errors.Wrapf(err, "Log2 failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Log2")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Log2Iter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log2")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.Log2Iter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.Log2Iter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Log2Iter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Log2(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log2")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Log2(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Log2(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Log2(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Log10(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatcmplxTypes); err != nil {
err = errors.Wrapf(err, "Log10 failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Log10")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Log10Iter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log10")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.Log10Iter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.Log10Iter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Log10Iter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Log10(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Log10")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Log10(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Log10(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Log10(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Sqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatcmplxTypes); err != nil {
err = errors.Wrapf(err, "Sqrt failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sqrt")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.SqrtIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Sqrt")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.SqrtIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.SqrtIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.SqrtIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Sqrt(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Sqrt")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Sqrt(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Sqrt(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Sqrt(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Cbrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatTypes); err != nil {
err = errors.Wrapf(err, "Cbrt failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Cbrt")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.CbrtIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Cbrt")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.CbrtIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.CbrtIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.CbrtIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Cbrt(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Cbrt")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Cbrt(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Cbrt(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Cbrt(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) InvSqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, floatTypes); err != nil {
err = errors.Wrapf(err, "InvSqrt failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.InvSqrt")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.InvSqrtIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform InvSqrt")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.InvSqrtIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.InvSqrtIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.InvSqrtIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.InvSqrt(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform InvSqrt")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.InvSqrt(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.InvSqrt(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.InvSqrt(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Abs(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, signedTypes); err != nil {
err = errors.Wrapf(err, "Abs failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Abs")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.AbsIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Abs")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.AbsIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.AbsIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.AbsIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Abs(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Abs")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Abs(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Abs(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Abs(typ, cloned.hdr())
retVal = cloned
}
return
}
func (e StdEng) Sign(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(a, signedTypes); err != nil {
err = errors.Wrapf(err, "Sign failed")
return
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var ait, rit Iterator
var dataA, dataReuse *storage.Header
var useIter bool
if dataA, dataReuse, ait, rit, useIter, err = prepDataUnary(a, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sign")
}
if useIter {
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.SignIter(typ, cloned.hdr(), ait); err != nil {
return nil, errors.Wrap(err, "Unable to perform Sign")
}
ait.Reset()
err = e.E.AddIter(typ, dataReuse, cloned.hdr(), rit, ait)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, rit, ait)
rit.Reset()
err = e.E.SignIter(typ, dataReuse, rit)
retVal = reuse
case !safe:
err = e.E.SignIter(typ, dataA, ait)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.SignIter(typ, cloned.hdr(), ait)
retVal = cloned
}
return
}
switch {
case incr:
cloned := a.Clone().(Tensor)
if err = e.E.Sign(typ, cloned.hdr()); err != nil {
return nil, errors.Wrap(err, "Unable to perform Sign")
}
err = e.E.Add(typ, dataReuse, cloned.hdr())
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Sign(typ, dataReuse)
retVal = reuse
case !safe:
err = e.E.Sign(typ, dataA)
retVal = a
default: // safe by default
cloned := a.Clone().(Tensor)
err = e.E.Sign(typ, cloned.hdr())
retVal = cloned
}
return
}
tensor-0.9.24/defaultenginefloat32.go 0000664 0000000 0000000 00000014457 14265126151 0017472 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/execution"
"gorgonia.org/tensor/internal/storage"
"gorgonia.org/vecf32"
)
func handleFuncOptsF32(expShape Shape, o DataOrder, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr bool, err error) {
fo := ParseFuncOpts(opts...)
reuseT, incr := fo.IncrReuse()
safe = fo.Safe()
toReuse = reuseT != nil
if toReuse {
var ok bool
if reuse, ok = reuseT.(DenseTensor); !ok {
returnOpOpt(fo)
err = errors.Errorf("Cannot reuse a different type of Tensor in a *Dense-Scalar operation. Reuse is of %T", reuseT)
return
}
if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() {
returnOpOpt(fo)
err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape)
err = errors.Wrapf(err, "Cannot use reuse: shape mismatch")
return
}
if !incr && reuse != nil {
reuse.setDataOrder(o)
// err = reuse.reshape(expShape...)
}
}
returnOpOpt(fo)
return
}
func prepDataVSF32(a Tensor, b interface{}, reuse Tensor) (dataA *storage.Header, dataB float32, dataReuse *storage.Header, ait, iit Iterator, useIter bool, err error) {
// get data
dataA = a.hdr()
switch bt := b.(type) {
case float32:
dataB = bt
case *float32:
dataB = *bt
default:
err = errors.Errorf("b is not a float32: %T", b)
return
}
if reuse != nil {
dataReuse = reuse.hdr()
}
if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) {
ait = a.Iterator()
if reuse != nil {
iit = reuse.Iterator()
}
useIter = true
}
return
}
func (e Float32Engine) checkThree(a, b Tensor, reuse Tensor) error {
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
if !b.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, b)
}
if reuse != nil && !reuse.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, reuse)
}
if a.Dtype() != Float32 {
return errors.Errorf("Expected a to be of Float32. Got %v instead", a.Dtype())
}
if a.Dtype() != b.Dtype() || (reuse != nil && b.Dtype() != reuse.Dtype()) {
return errors.Errorf("Expected a, b and reuse to have the same Dtype. Got %v, %v and %v instead", a.Dtype(), b.Dtype(), reuse.Dtype())
}
return nil
}
func (e Float32Engine) checkTwo(a Tensor, reuse Tensor) error {
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
if reuse != nil && !reuse.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, reuse)
}
if a.Dtype() != Float32 {
return errors.Errorf("Expected a to be of Float32. Got %v instead", a.Dtype())
}
if reuse != nil && reuse.Dtype() != a.Dtype() {
return errors.Errorf("Expected reuse to be the same as a. Got %v instead", reuse.Dtype())
}
return nil
}
// Float32Engine is an execution engine that is optimized to only work with float32s. It assumes all data will are float32s.
//
// Use this engine only as form of optimization. You should probably be using the basic default engine for most cases.
type Float32Engine struct {
StdEng
}
// makeArray allocates a slice for the array
func (e Float32Engine) makeArray(arr *array, t Dtype, size int) {
if t != Float32 {
panic("Float32Engine only creates float32s")
}
if size < 0 {
panic("Cannot have negative sizes when making array")
}
arr.Header.Raw = make([]byte, size*4)
arr.t = t
}
func (e Float32Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) {
reuse := y
if err = e.checkThree(a, x, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, x, reuse); err != nil {
return nil, errors.Wrap(err, "Float32Engine.FMA")
}
if useIter {
err = execution.MulIterIncrF32(dataA.Float32s(), dataB.Float32s(), dataReuse.Float32s(), ait, bit, iit)
retVal = reuse
return
}
vecf32.IncrMul(dataA.Float32s(), dataB.Float32s(), dataReuse.Float32s())
retVal = reuse
return
}
func (e Float32Engine) FMAScalar(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) {
reuse := y
if err = e.checkTwo(a, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var ait, iit Iterator
var dataTensor, dataReuse *storage.Header
var scalar float32
var useIter bool
if dataTensor, scalar, dataReuse, ait, iit, useIter, err = prepDataVSF32(a, x, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "Float32Engine.FMAScalar")
}
if useIter {
err = execution.MulIterIncrVSF32(dataTensor.Float32s(), scalar, dataReuse.Float32s(), ait, iit)
retVal = reuse
}
execution.MulIncrVSF32(dataTensor.Float32s(), scalar, dataReuse.Float32s())
retVal = reuse
return
}
// Add performs a + b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e Float32Engine) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if a.RequiresIterator() || b.RequiresIterator() {
return e.StdEng.Add(a, b, opts...)
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, err = handleFuncOptsF32(a.Shape(), a.DataOrder(), opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if err = e.checkThree(a, b, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var hdrA, hdrB, hdrReuse *storage.Header
var dataA, dataB, dataReuse []float32
if hdrA, hdrB, hdrReuse, _, _, _, _, _, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "Float32Engine.Add")
}
dataA = hdrA.Float32s()
dataB = hdrB.Float32s()
if hdrReuse != nil {
dataReuse = hdrReuse.Float32s()
}
switch {
case incr:
vecf32.IncrAdd(dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
copy(dataReuse, dataA)
vecf32.Add(dataReuse, dataB)
retVal = reuse
case !safe:
vecf32.Add(dataA, dataB)
retVal = a
default:
ret := a.Clone().(headerer)
vecf32.Add(ret.hdr().Float32s(), dataB)
retVal = ret.(Tensor)
}
return
}
func (e Float32Engine) Inner(a, b Tensor) (retVal float32, err error) {
var A, B []float32
var AD, BD *Dense
var ok bool
if AD, ok = a.(*Dense); !ok {
return 0, errors.Errorf("a is not a *Dense")
}
if BD, ok = b.(*Dense); !ok {
return 0, errors.Errorf("b is not a *Dense")
}
A = AD.Float32s()
B = BD.Float32s()
retVal = whichblas.Sdot(len(A), A, 1, B, 1)
return
}
tensor-0.9.24/defaultenginefloat32_test.go 0000664 0000000 0000000 00000001635 14265126151 0020523 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"testing/quick"
)
func TestFloat32Engine_makeArray(t *testing.T) {
// the uint16 is just to make sure that tests are correctly run.
// we don't want the quicktest to randomly generate a size that is so large
// that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers)
// after the quick test.
f := func(sz uint16) bool {
size := int(sz)
e := Float32Engine{StdEng{}}
dt := Float32
arr := array{}
e.makeArray(&arr, dt, size)
if len(arr.Raw) != size*4 {
t.Errorf("Expected raw to be size*4. Got %v instead", len(arr.Raw))
return false
}
v, ok := arr.Data().([]float32)
if !ok {
t.Errorf("Expected v to be []float32. Got %T instead", arr.Data())
return false
}
if len(v) != size {
return false
}
return true
}
if err := quick.Check(f, nil); err != nil {
t.Errorf("Quick test failed %v", err)
}
}
tensor-0.9.24/defaultenginefloat64.go 0000664 0000000 0000000 00000014345 14265126151 0017473 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/execution"
"gorgonia.org/tensor/internal/storage"
"gorgonia.org/vecf64"
)
func handleFuncOptsF64(expShape Shape, o DataOrder, opts ...FuncOpt) (reuse DenseTensor, safe, toReuse, incr bool, err error) {
fo := ParseFuncOpts(opts...)
reuseT, incr := fo.IncrReuse()
safe = fo.Safe()
toReuse = reuseT != nil
if toReuse {
var ok bool
if reuse, ok = reuseT.(DenseTensor); !ok {
returnOpOpt(fo)
err = errors.Errorf("Cannot reuse a different type of Tensor in a *Dense-Scalar operation. Reuse is of %T", reuseT)
return
}
if reuse.len() != expShape.TotalSize() && !expShape.IsScalar() {
returnOpOpt(fo)
err = errors.Errorf(shapeMismatch, reuse.Shape(), expShape)
err = errors.Wrapf(err, "Cannot use reuse: shape mismatch")
return
}
if !incr && reuse != nil {
reuse.setDataOrder(o)
// err = reuse.reshape(expShape...)
}
}
returnOpOpt(fo)
return
}
func prepDataVSF64(a Tensor, b interface{}, reuse Tensor) (dataA *storage.Header, dataB float64, dataReuse *storage.Header, ait, iit Iterator, useIter bool, err error) {
// get data
dataA = a.hdr()
switch bt := b.(type) {
case float64:
dataB = bt
case *float64:
dataB = *bt
default:
err = errors.Errorf("b is not a float64: %T", b)
return
}
if reuse != nil {
dataReuse = reuse.hdr()
}
if a.RequiresIterator() || (reuse != nil && reuse.RequiresIterator()) {
ait = a.Iterator()
if reuse != nil {
iit = reuse.Iterator()
}
useIter = true
}
return
}
func (e Float64Engine) checkThree(a, b Tensor, reuse Tensor) error {
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
if !b.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, b)
}
if reuse != nil && !reuse.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, reuse)
}
if a.Dtype() != Float64 {
return errors.Errorf("Expected a to be of Float64. Got %v instead", a.Dtype())
}
if a.Dtype() != b.Dtype() || (reuse != nil && b.Dtype() != reuse.Dtype()) {
return errors.Errorf("Expected a, b and reuse to have the same Dtype. Got %v, %v and %v instead", a.Dtype(), b.Dtype(), reuse.Dtype())
}
return nil
}
func (e Float64Engine) checkTwo(a Tensor, reuse Tensor) error {
if !a.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, a)
}
if reuse != nil && !reuse.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, reuse)
}
if a.Dtype() != Float64 {
return errors.Errorf("Expected a to be of Float64. Got %v instead", a.Dtype())
}
if reuse != nil && reuse.Dtype() != a.Dtype() {
return errors.Errorf("Expected reuse to be the same as a. Got %v instead", reuse.Dtype())
}
return nil
}
// Float64Engine is an execution engine that is optimized to only work with float64s. It assumes all data will are float64s.
//
// Use this engine only as form of optimization. You should probably be using the basic default engine for most cases.
type Float64Engine struct {
StdEng
}
// makeArray allocates a slice for the array
func (e Float64Engine) makeArray(arr *array, t Dtype, size int) {
if t != Float64 {
panic("Float64Engine only creates float64s")
}
arr.Header.Raw = make([]byte, size*8)
arr.t = t
}
func (e Float64Engine) FMA(a, x, y Tensor) (retVal Tensor, err error) {
reuse := y
if err = e.checkThree(a, x, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, _, err = prepDataVV(a, x, reuse); err != nil {
return nil, errors.Wrap(err, "Float64Engine.FMA")
}
if useIter {
err = execution.MulIterIncrF64(dataA.Float64s(), dataB.Float64s(), dataReuse.Float64s(), ait, bit, iit)
retVal = reuse
return
}
vecf64.IncrMul(dataA.Float64s(), dataB.Float64s(), dataReuse.Float64s())
retVal = reuse
return
}
func (e Float64Engine) FMAScalar(a Tensor, x interface{}, y Tensor) (retVal Tensor, err error) {
reuse := y
if err = e.checkTwo(a, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var ait, iit Iterator
var dataTensor, dataReuse *storage.Header
var scalar float64
var useIter bool
if dataTensor, scalar, dataReuse, ait, iit, useIter, err = prepDataVSF64(a, x, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "Float64Engine.FMAScalar")
}
if useIter {
err = execution.MulIterIncrVSF64(dataTensor.Float64s(), scalar, dataReuse.Float64s(), ait, iit)
retVal = reuse
}
execution.MulIncrVSF64(dataTensor.Float64s(), scalar, dataReuse.Float64s())
retVal = reuse
return
}
// Add performs a + b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e Float64Engine) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if a.RequiresIterator() || b.RequiresIterator() {
return e.StdEng.Add(a, b, opts...)
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, err = handleFuncOptsF64(a.Shape(), a.DataOrder(), opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
if err = e.checkThree(a, b, reuse); err != nil {
return nil, errors.Wrap(err, "Failed checks")
}
var hdrA, hdrB, hdrReuse *storage.Header
var dataA, dataB, dataReuse []float64
if hdrA, hdrB, hdrReuse, _, _, _, _, _, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "Float64Engine.Add")
}
dataA = hdrA.Float64s()
dataB = hdrB.Float64s()
if hdrReuse != nil {
dataReuse = hdrReuse.Float64s()
}
switch {
case incr:
vecf64.IncrAdd(dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
copy(dataReuse, dataA)
vecf64.Add(dataReuse, dataB)
retVal = reuse
case !safe:
vecf64.Add(dataA, dataB)
retVal = a
default:
ret := a.Clone().(headerer)
vecf64.Add(ret.hdr().Float64s(), dataB)
retVal = ret.(Tensor)
}
return
}
func (e Float64Engine) Inner(a, b Tensor) (retVal float64, err error) {
var A, B []float64
var AD, BD *Dense
var ok bool
if AD, ok = a.(*Dense); !ok {
return 0, errors.Errorf("a is not a *Dense")
}
if BD, ok = b.(*Dense); !ok {
return 0, errors.Errorf("b is not a *Dense")
}
A = AD.Float64s()
B = BD.Float64s()
retVal = whichblas.Ddot(len(A), A, 1, B, 1)
return
}
tensor-0.9.24/defaultenginefloat64_test.go 0000664 0000000 0000000 00000001635 14265126151 0020530 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"testing/quick"
)
func TestFloat64Engine_makeArray(t *testing.T) {
// the uint16 is just to make sure that tests are correctly run.
// we don't want the quicktest to randomly generate a size that is so large
// that Go takes a long time just to allocate. We'll test the other sizes (like negative numbers)
// after the quick test.
f := func(sz uint16) bool {
size := int(sz)
e := Float64Engine{StdEng{}}
dt := Float64
arr := array{}
e.makeArray(&arr, dt, size)
if len(arr.Raw) != size*8 {
t.Errorf("Expected raw to be size*8. Got %v instead", len(arr.Raw))
return false
}
v, ok := arr.Data().([]float64)
if !ok {
t.Errorf("Expected v to be []float32. Got %T instead", arr.Data())
return false
}
if len(v) != size {
return false
}
return true
}
if err := quick.Check(f, nil); err != nil {
t.Errorf("Quick test failed %v", err)
}
}
tensor-0.9.24/dense.go 0000664 0000000 0000000 00000033232 14265126151 0014553 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"reflect"
"unsafe"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
const (
maskCompEvery int = 8
)
// Dense represents a dense tensor - this is the most common form of tensors. It can be used to represent vectors, matrices.. etc
type Dense struct {
AP
array
flag MemoryFlag
e Engine // execution engine for the *Dense
oe standardEngine // optimized engine
// backup AP. When a transpose is done, the old *AP is backed up here, for easy untransposes
old AP
transposeWith []int
// if viewOf != nil, then this *Dense is a view.
viewOf uintptr
mask []bool // mask slice can be used to identify missing or invalid values. len(mask)<=len(v)
maskIsSoft bool
}
// NewDense creates a new *Dense. It tries its best to get from the tensor pool.
func NewDense(dt Dtype, shape Shape, opts ...ConsOpt) *Dense {
return recycledDense(dt, shape, opts...)
}
func recycledDense(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) {
retVal = recycledDenseNoFix(dt, shape, opts...)
retVal.fix()
if err := retVal.sanity(); err != nil {
panic(err)
}
return
}
func recycledDenseNoFix(dt Dtype, shape Shape, opts ...ConsOpt) (retVal *Dense) {
// size := shape.TotalSize()
//if shape.IsScalar() {
// size = 1
//}
retVal = borrowDense()
retVal.array.t = dt
retVal.AP.zeroWithDims(shape.Dims())
for _, opt := range opts {
opt(retVal)
}
retVal.setShape(shape...)
return
}
func (t *Dense) fromSlice(x interface{}) {
t.array.Header.Raw = nil // GC anything else
t.array.fromSlice(x)
}
func (t *Dense) addMask(mask []bool) {
l := len(mask)
if l > 0 && l != t.len() {
panic("Mask is not same length as data")
}
t.mask = mask
}
func (t *Dense) makeArray(size int) {
switch te := t.e.(type) {
case NonStdEngine:
t.flag = MakeMemoryFlag(t.flag, ManuallyManaged)
case arrayMaker:
te.makeArray(&t.array, t.t, size)
return
default:
}
memsize := calcMemSize(t.t, size)
mem, err := t.e.Alloc(memsize)
if err != nil {
panic(err)
}
t.array.Raw = storage.FromMemory(mem.Uintptr(), uintptr(memsize))
return
}
// Info returns the access pattern which explains how the data in the underlying array is accessed. This is mostly used for debugging.
func (t *Dense) Info() *AP { return &t.AP }
// Dtype returns the data type of the *Dense tensor.
func (t *Dense) Dtype() Dtype { return t.t }
// Data returns the underlying array. If the *Dense represents a scalar value, the scalar value is returned instead
func (t *Dense) Data() interface{} {
if t.IsScalar() {
return t.Get(0)
}
// build a type of []T
shdr := reflect.SliceHeader{
Data: t.array.Uintptr(),
Len: t.array.Len(),
Cap: t.array.Cap(),
}
sliceT := reflect.SliceOf(t.t.Type)
ptr := unsafe.Pointer(&shdr)
val := reflect.Indirect(reflect.NewAt(sliceT, ptr))
return val.Interface()
}
// DataSize returns the size of the underlying array. Typically t.DataSize() == t.Shape().TotalSize()
func (t *Dense) DataSize() int {
if t.IsScalar() {
return 0 // DOUBLE CHECK
}
return t.array.Len()
}
// Engine returns the execution engine associated with this Tensor
func (t *Dense) Engine() Engine { return t.e }
// Reshape reshapes a *Dense. If the tensors need to be materialized (either it's a view or transpose), it will be materialized before the reshape happens
func (t *Dense) Reshape(dims ...int) error {
if t.Shape().TotalSize() != Shape(dims).TotalSize() {
return errors.Errorf("Cannot reshape %v into %v", t.Shape(), dims)
}
if t.viewOf != 0 && t.o.IsNotContiguous() {
return errors.Errorf(methodNYI, "Reshape", "non-contiguous views")
}
if !t.old.IsZero() {
t.Transpose()
}
return t.reshape(dims...)
}
func (t *Dense) reshape(dims ...int) error {
t.setShape(dims...)
return t.sanity()
}
func (t *Dense) unsqueeze(axis int) error {
if axis > t.shape.Dims()+1 {
return errors.Errorf("Cannot unsqueeze on axis %d when the tensor has shape %v", axis, t.shape)
}
t.shape = append(t.shape, 1)
copy(t.shape[axis+1:], t.shape[axis:])
t.shape[axis] = 1
t.strides = append(t.strides, 1)
copy(t.strides[axis+1:], t.strides[axis:])
return nil
}
// ScalarValue returns the scalar value of a *Tensor,
// IF and ONLY IF it's a Tensor representation of a scalar value.
// This is required because operations like a (vec · vec) would return a scalar value.
// I didn't want to return interface{} for all the API methods, so the next best solution is to
// wrap the scalar value in a *Tensor
func (t *Dense) ScalarValue() interface{} {
if !t.IsScalar() {
panic(fmt.Sprintf("ScalarValue only works when the Tensor is a representation of a scalar value. The value of the tensor is %v", t))
}
return t.Get(0)
}
// IsView indicates if the Tensor is a view of another (typically from slicing)
func (t *Dense) IsView() bool {
return t.viewOf != 0
}
// IsMaterializeable indicates if the Tensor is materializable - if it has either gone through some transforms or slicing
func (t *Dense) IsMaterializable() bool {
return t.viewOf != 0 || !t.old.IsZero()
}
// IsManuallyManaged returns true if the memory associated with this *Dense is manually managed (by the user)
func (t *Dense) IsManuallyManaged() bool { return t.flag.manuallyManaged() }
// IsNativelyAccessible checks if the pointers are accessible by Go
func (t *Dense) IsNativelyAccessible() bool { return t.flag.nativelyAccessible() }
// Clone clones a *Dense. It creates a copy of the data, and the underlying array will be allocated
func (t *Dense) Clone() interface{} {
if t.e != nil {
retVal := new(Dense)
t.AP.CloneTo(&retVal.AP)
retVal.t = t.t
retVal.e = t.e
retVal.oe = t.oe
retVal.flag = t.flag
retVal.makeArray(t.Len())
if !t.old.IsZero() {
retVal.old = t.old.Clone()
t.old.CloneTo(&retVal.old)
}
copyDense(retVal, t)
retVal.lock()
return retVal
}
panic("Unreachable: No engine")
}
// IsMasked indicates whether tensor is masked
func (t *Dense) IsMasked() bool { return len(t.mask) == t.len() }
// MaskFromDense adds a mask slice to tensor by XORing dense arguments' masks
func (t *Dense) MaskFromDense(tts ...*Dense) {
hasMask := BorrowBools(len(tts))
defer ReturnBools(hasMask)
numMasked := 0
var masked = false
for i, tt := range tts {
if tt != nil {
hasMask[i] = tt.IsMasked()
masked = masked || hasMask[i]
if hasMask[i] {
numMasked++
}
}
}
if numMasked < 1 {
return
}
//Only make mask if none already. This way one of the tts can be t itself
if len(t.mask) < t.DataSize() {
t.makeMask()
}
for i, tt := range tts {
if tt != nil {
n := len(tt.mask)
if hasMask[i] {
for j := range t.mask {
t.mask[j] = t.mask[j] || tt.mask[j%n]
}
}
}
}
}
// Private methods
func (t *Dense) cap() int { return t.array.Cap() }
func (t *Dense) len() int { return t.array.Len() } // exactly the same as DataSize
func (t *Dense) arr() array { return t.array }
func (t *Dense) arrPtr() *array { return &t.array }
func (t *Dense) setShape(s ...int) {
t.unlock()
t.SetShape(s...)
t.lock()
return
}
func (t *Dense) setAP(ap *AP) { t.AP = *ap }
func (t *Dense) fix() {
if t.e == nil {
t.e = StdEng{}
}
if oe, ok := t.e.(standardEngine); ok {
t.oe = oe
}
switch {
case t.IsScalar() && t.array.Header.Raw == nil:
t.makeArray(1)
case t.Shape() == nil && t.array.Header.Raw != nil:
size := t.Len()
if size == 1 {
t.SetShape() // scalar
} else {
t.SetShape(size) // vector
}
case t.array.Header.Raw == nil && t.t != Dtype{}:
size := t.Shape().TotalSize()
t.makeArray(size)
}
if len(t.mask) != t.len() {
t.mask = t.mask[:0]
}
t.lock() // don't put this in a defer - if t.array.Ptr == nil and t.Shape() == nil. then leave it unlocked
}
// makeMask adds a mask slice to tensor if required
func (t *Dense) makeMask() {
var size int
size = t.shape.TotalSize()
if len(t.mask) >= size {
t.mask = t.mask[:size]
}
if cap(t.mask) < size {
t.mask = make([]bool, size)
}
t.mask = t.mask[:size]
memsetBools(t.mask, false)
}
// sanity is a function that sanity checks that a tensor is correct.
func (t *Dense) sanity() error {
if !t.AP.IsZero() && t.Shape() == nil && t.array.Header.Raw == nil {
return errors.New(emptyTensor)
}
size := t.Len()
expected := t.Size()
if t.viewOf == 0 && size != expected && !t.IsScalar() {
return errors.Wrap(errors.Errorf(shapeMismatch, t.Shape(), size), "sanity check failed")
}
// TODO: sanity check for views
return nil
}
// isTransposed returns true if the *Dense holds a transposed array.
func (t *Dense) isTransposed() bool { return t.old.IsZero() }
// oshape returns the original shape
func (t *Dense) oshape() Shape {
if !t.old.IsZero() {
return t.old.Shape()
}
return t.Shape()
}
// ostrides returns the original strides
func (t *Dense) ostrides() []int {
if !t.old.IsZero() {
return t.old.Strides()
}
return t.Strides()
}
// ShallowClone clones the *Dense without making a copy of the underlying array
func (t *Dense) ShallowClone() *Dense {
retVal := borrowDense()
retVal.e = t.e
retVal.oe = t.oe
t.AP.CloneTo(&retVal.AP)
retVal.flag = t.flag
retVal.array = t.array
retVal.old = t.old
retVal.transposeWith = t.transposeWith
retVal.viewOf = t.viewOf
retVal.mask = t.mask
retVal.maskIsSoft = t.maskIsSoft
return retVal
}
func (t *Dense) oldAP() *AP { return &t.old }
func (t *Dense) setOldAP(ap *AP) { t.old = *ap }
func (t *Dense) transposeAxes() []int { return t.transposeWith }
//go:nocheckptr
func (t *Dense) parentTensor() *Dense {
if t.viewOf != 0 {
return (*Dense)(unsafe.Pointer(t.viewOf))
}
return nil
}
func (t *Dense) setParentTensor(d *Dense) {
if d == nil {
t.viewOf = 0
return
}
t.viewOf = uintptr(unsafe.Pointer(d))
}
/* ------ Mask operations */
//ResetMask fills the mask with either false, or the provided boolean value
func (t *Dense) ResetMask(val ...bool) error {
if !t.IsMasked() {
t.makeMask()
}
var fillValue = false
if len(val) > 0 {
fillValue = val[0]
}
memsetBools(t.mask, fillValue)
return nil
}
// HardenMask forces the mask to hard. If mask is hard, then true mask values can not be unset
func (t *Dense) HardenMask() bool {
t.maskIsSoft = false
return t.maskIsSoft
}
// SoftenMask forces the mask to soft
func (t *Dense) SoftenMask() bool {
t.maskIsSoft = true
return t.maskIsSoft
}
// MaskFromSlice makes mask from supplied slice
func (t *Dense) MaskFromSlice(x interface{}) {
t.makeMask()
n := len(t.mask)
switch m := x.(type) {
case []bool:
copy(t.mask, m)
return
case []int:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []int8:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []int16:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []int32:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []int64:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []uint:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []byte:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []uint16:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []uint32:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []uint64:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []float32:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []float64:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []complex64:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []complex128:
for i, v := range m {
if v != 0 {
t.mask[i] = true
}
if i >= n {
return
}
}
case []string:
for i, v := range m {
if v != "" {
t.mask[i] = true
}
if i >= n {
return
}
}
default:
return
}
}
// Memset sets all the values in the *Dense tensor.
func (t *Dense) Memset(x interface{}) error {
if !t.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, t)
}
if t.IsMaterializable() {
it := newFlatIterator(&t.AP)
return t.array.memsetIter(x, it)
}
return t.array.Memset(x)
}
// Eq checks that any two things are equal. If the shapes are the same, but the strides are not the same, it's will still be considered the same
func (t *Dense) Eq(other interface{}) bool {
if ot, ok := other.(*Dense); ok {
if ot == t {
return true
}
if !t.Shape().Eq(ot.Shape()) {
return false
}
return t.array.Eq(&ot.array)
}
return false
}
func (t *Dense) Zero() {
if t.IsMaterializable() {
it := newFlatIterator(&t.AP)
if err := t.zeroIter(it); err != nil {
panic(err)
}
}
if t.IsMasked() {
t.ResetMask()
}
t.array.Zero()
}
func (t *Dense) Mask() []bool { return t.mask }
func (t *Dense) SetMask(mask []bool) {
// if len(mask) != t.len() {
// panic("Cannot set mask")
// }
t.mask = mask
}
func (t *Dense) slice(start, end int) {
t.array = t.array.slice(start, end)
}
// RequiresIterator indicates if an iterator is required to read the data in *Dense in the correct fashion
func (t *Dense) RequiresIterator() bool {
if t.len() == 1 {
return false
}
// non continuous slice, transpose, or masked. If it's a slice and contiguous, then iterator is not required
if !t.o.IsContiguous() || !t.old.IsZero() || t.IsMasked() {
return true
}
return false
}
func (t *Dense) Iterator() Iterator { return IteratorFromDense(t) }
func (t *Dense) standardEngine() standardEngine { return t.oe }
tensor-0.9.24/dense_apply_test.go 0000664 0000000 0000000 00000012016 14265126151 0017014 0 ustar 00root root 0000000 0000000 package tensor
import (
"math/rand"
"testing"
"testing/quick"
"time"
"unsafe"
)
func getMutateVal(dt Dtype) interface{} {
switch dt {
case Int:
return int(1)
case Int8:
return int8(1)
case Int16:
return int16(1)
case Int32:
return int32(1)
case Int64:
return int64(1)
case Uint:
return uint(1)
case Uint8:
return uint8(1)
case Uint16:
return uint16(1)
case Uint32:
return uint32(1)
case Uint64:
return uint64(1)
case Float32:
return float32(1)
case Float64:
return float64(1)
case Complex64:
var c complex64 = 1
return c
case Complex128:
var c complex128 = 1
return c
case Bool:
return true
case String:
return "Hello World"
case Uintptr:
return uintptr(0xdeadbeef)
case UnsafePointer:
return unsafe.Pointer(uintptr(0xdeadbeef))
}
return nil
}
func getMutateFn(dt Dtype) interface{} {
switch dt {
case Int:
return mutateI
case Int8:
return mutateI8
case Int16:
return mutateI16
case Int32:
return mutateI32
case Int64:
return mutateI64
case Uint:
return mutateU
case Uint8:
return mutateU8
case Uint16:
return mutateU16
case Uint32:
return mutateU32
case Uint64:
return mutateU64
case Float32:
return mutateF32
case Float64:
return mutateF64
case Complex64:
return mutateC64
case Complex128:
return mutateC128
case Bool:
return mutateB
case String:
return mutateStr
case Uintptr:
return mutateUintptr
case UnsafePointer:
return mutateUnsafePointer
}
return nil
}
func TestDense_Apply(t *testing.T) {
var r *rand.Rand
mut := func(q *Dense) bool {
var mutVal interface{}
if mutVal = getMutateVal(q.Dtype()); mutVal == nil {
return true // we'll temporarily skip those we cannot mutate/get a mutation value
}
var fn interface{}
if fn = getMutateFn(q.Dtype()); fn == nil {
return true // we'll skip those that we cannot mutate
}
we, eqFail := willerr(q, nil, nil)
_, ok := q.Engine().(Mapper)
we = we || !ok
a := q.Clone().(*Dense)
correct := q.Clone().(*Dense)
correct.Memset(mutVal)
ret, err := a.Apply(fn)
if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) {
return false
}
// wrong fn type/illogical values
if _, err = a.Apply(getMutateFn); err == nil {
t.Error("Expected an error")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil {
t.Errorf("Applying mutation function failed %v", err)
}
}
func TestDense_Apply_unsafe(t *testing.T) {
var r *rand.Rand
mut := func(q *Dense) bool {
var mutVal interface{}
if mutVal = getMutateVal(q.Dtype()); mutVal == nil {
return true // we'll temporarily skip those we cannot mutate/get a mutation value
}
var fn interface{}
if fn = getMutateFn(q.Dtype()); fn == nil {
return true // we'll skip those that we cannot mutate
}
we, eqFail := willerr(q, nil, nil)
_, ok := q.Engine().(Mapper)
we = we || !ok
a := q.Clone().(*Dense)
correct := q.Clone().(*Dense)
correct.Memset(mutVal)
ret, err := a.Apply(fn, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Error("Expected ret == correct (Unsafe option was used)")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil {
t.Errorf("Applying mutation function failed %v", err)
}
}
func TestDense_Apply_reuse(t *testing.T) {
var r *rand.Rand
mut := func(q *Dense) bool {
var mutVal interface{}
if mutVal = getMutateVal(q.Dtype()); mutVal == nil {
return true // we'll temporarily skip those we cannot mutate/get a mutation value
}
var fn interface{}
if fn = getMutateFn(q.Dtype()); fn == nil {
return true // we'll skip those that we cannot mutate
}
we, eqFail := willerr(q, nil, nil)
_, ok := q.Engine().(Mapper)
we = we || !ok
a := q.Clone().(*Dense)
reuse := q.Clone().(*Dense)
reuse.Zero()
correct := q.Clone().(*Dense)
correct.Memset(mutVal)
ret, err := a.Apply(fn, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Apply", a, nil, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), eqFail, correct.Data(), ret.Data()) {
return false
}
if ret != reuse {
t.Error("Expected ret == correct (Unsafe option was used)")
return false
}
return true
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(mut, &quick.Config{Rand: r}); err != nil {
t.Errorf("Applying mutation function failed %v", err)
}
}
tensor-0.9.24/dense_argmethods.go 0000664 0000000 0000000 00000002225 14265126151 0016766 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
/* Argmax */
// Argmax finds the index of the max value along the axis provided
func (t *Dense) Argmax(axis int) (retVal *Dense, err error) {
e := t.e
switch am := e.(type) {
case denseArgmaxer:
return am.argmaxDenseTensor(t, axis)
case Argmaxer:
var ret Tensor
var ok bool
if ret, err = am.Argmax(t, axis); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmax")
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(extractionFail, "*Dense", ret)
}
return
}
return nil, errors.New("Engine does not suport Argmax")
}
/* Argmin */
// Argmin finds the index of the min value along the axis provided
func (t *Dense) Argmin(axis int) (retVal *Dense, err error) {
e := t.e
switch am := e.(type) {
case denseArgminer:
return am.argminDenseTensor(t, axis)
case Argminer:
var ret Tensor
var ok bool
if ret, err = am.Argmin(t, axis); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmax")
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(extractionFail, "*Dense", ret)
}
return
}
return nil, errors.New("Engine does not suport Argmax")
}
tensor-0.9.24/dense_argmethods_test.go 0000664 0000000 0000000 00000132427 14265126151 0020035 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"math"
"testing"
"github.com/chewxy/math32"
"github.com/stretchr/testify/assert"
)
/* Test data */
var basicDenseI = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseI8 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int8{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseI16 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int16{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseI32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseI64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]int64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseU = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseU8 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint8{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseU16 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint16{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseU32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseU64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]uint64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseF32 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]float32{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var basicDenseF64 = New(WithShape(2, 3, 4, 5, 2), WithBacking([]float64{3, 4, 2, 4, 3, 8, 3, 9, 7, 4, 3, 0, 3, 9, 9, 0, 6, 7, 3, 9, 4, 8, 5, 1, 1, 9, 4, 0, 4, 1, 6, 6, 4, 9, 3, 8, 1, 7, 0, 7, 4, 0, 6, 8, 2, 8, 0, 6, 1, 6, 2, 3, 7, 5, 7, 3, 0, 8, 6, 5, 6, 9, 7, 5, 6, 8, 7, 9, 5, 0, 8, 1, 4, 0, 6, 6, 3, 3, 8, 1, 1, 3, 2, 5, 9, 0, 4, 5, 3, 1, 9, 1, 9, 3, 9, 3, 3, 4, 5, 9, 4, 2, 2, 7, 9, 8, 1, 6, 9, 4, 4, 1, 8, 9, 8, 0, 9, 9, 4, 6, 7, 5, 9, 9, 4, 8, 5, 8, 2, 4, 8, 2, 7, 2, 8, 7, 2, 3, 7, 0, 9, 9, 8, 9, 2, 1, 7, 0, 7, 9, 0, 2, 4, 8, 7, 9, 6, 8, 3, 3, 7, 2, 9, 2, 8, 2, 3, 6, 0, 8, 7, 7, 0, 9, 0, 9, 3, 2, 6, 9, 5, 8, 6, 9, 5, 6, 1, 8, 7, 8, 1, 9, 9, 3, 7, 7, 6, 8, 2, 1, 1, 5, 1, 4, 0, 5, 1, 7, 9, 5, 6, 6, 8, 7, 5, 1, 3, 4, 0, 1, 8, 0, 2, 6, 9, 1, 4, 8, 0, 5, 6, 2, 9, 4, 4, 2, 4, 4, 4, 3}))
var argmaxCorrect = []struct {
shape Shape
data []int
}{
{Shape{3, 4, 5, 2}, []int{
1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1,
1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0,
1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1,
0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,
1, 0, 0, 0, 0,
}},
{Shape{2, 4, 5, 2}, []int{
1, 0, 1, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1,
2, 2, 0, 1, 1, 2, 2, 1, 0, 2, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 2, 1, 0, 1, 2, 1, 0, 1, 1, 2, 0, 1, 0, 0, 0, 0, 2, 1, 0, 1, 0,
0, 2, 1, 1, 0, 0, 0, 0, 0, 2, 0,
}},
{Shape{2, 3, 5, 2}, []int{
3, 2, 2, 1, 1, 2, 1, 0, 0, 1, 3, 2, 1, 0, 1, 0, 2, 2, 3, 0, 1, 0, 1,
3, 0, 2, 3, 3, 2, 1, 2, 2, 0, 0, 1, 3, 2, 0, 1, 2, 0, 3, 0, 1, 0, 1,
3, 2, 2, 1, 2, 1, 3, 1, 2, 0, 2, 2, 0, 0,
}},
{Shape{2, 3, 4, 2}, []int{
4, 3, 2, 1, 1, 2, 0, 1, 1, 1, 1, 3, 1, 0, 0, 2, 2, 1, 0, 4, 2, 2, 3,
1, 1, 1, 0, 2, 0, 0, 2, 2, 1, 4, 0, 1, 4, 1, 1, 0, 4, 3, 1, 1, 2, 3,
1, 1,
}},
{Shape{2, 3, 4, 5}, []int{
1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,
1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0,
0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1,
0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,
0, 0, 0, 0, 0,
}},
}
var argminCorrect = []struct {
shape Shape
data []int
}{
{Shape{3, 4, 5, 2}, []int{
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0,
1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1,
0, 1, 1, 0, 1,
}},
{Shape{2, 4, 5, 2}, []int{
2, 1, 0, 0, 1, 2, 1, 2, 1, 2, 1, 0, 0, 2, 1, 0, 1, 2, 0, 1, 0, 2, 2,
0, 0, 1, 2, 0, 0, 1, 2, 1, 0, 1, 0, 2, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1,
2, 1, 1, 0, 2, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 2, 2, 0, 0, 1, 0, 2,
2, 0, 0, 0, 1, 2, 2, 2, 2, 1, 1,
}},
{Shape{2, 3, 5, 2}, []int{
0, 1, 0, 2, 2, 1, 3, 2, 3, 2, 1, 0, 3, 3, 0, 1, 0, 3, 0, 2, 0, 1, 0,
1, 3, 0, 2, 1, 0, 0, 3, 1, 3, 1, 2, 2, 1, 2, 0, 1, 3, 0, 1, 0, 1, 0,
2, 1, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 1,
}},
{Shape{2, 3, 4, 2}, []int{
1, 0, 0, 0, 2, 3, 4, 0, 3, 0, 3, 0, 4, 4, 3, 1, 0, 2, 3, 0, 3, 0, 0,
2, 4, 4, 3, 4, 2, 3, 0, 0, 4, 0, 1, 3, 3, 2, 0, 4, 2, 1, 4, 2, 4, 0,
2, 0,
}},
{Shape{2, 3, 4, 5}, []int{
0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1,
1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0,
1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0,
1, 1, 1, 0, 1,
}},
}
func TestDense_Argmax_I(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseI.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseI.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_I(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseI.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseI.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_I8(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseI8.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseI8.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_I8(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseI8.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseI8.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_I16(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseI16.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseI16.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_I16(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseI16.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseI16.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_I32(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseI32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseI32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_I32(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseI32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseI32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_I64(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseI64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseI64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_I64(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseI64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseI64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_U(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseU.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseU.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_U(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseU.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseU.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_U8(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseU8.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseU8.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_U8(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseU8.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseU8.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_U16(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseU16.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseU16.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_U16(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseU16.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseU16.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_U32(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseU32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseU32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_U32(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseU32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseU32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_U64(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseU64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// with different engine
T = basicDenseU64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_U64(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseU64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// with different engine
T = basicDenseU64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_F32(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseF32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// test with NaN
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.NaN(), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(2, argmax.ScalarValue(), "NaN test")
// test with Mask and Nan
T = New(WithShape(4), WithBacking([]float32{1, 9, math32.NaN(), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked NaN test")
// test with +Inf
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(1), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with +Inf: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(2, argmax.ScalarValue(), "+Inf test")
// test with Mask and +Inf
T = New(WithShape(4), WithBacking([]float32{1, 9, math32.Inf(1), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked NaN test")
// test with -Inf
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(-1), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with -Inf: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "+Inf test")
// test with Mask and -Inf
T = New(WithShape(4), WithBacking([]float32{1, 9, math32.Inf(-1), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked -Inf test")
// with different engine
T = basicDenseF32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_F32(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseF32.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// test with NaN
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.NaN(), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(2, argmin.ScalarValue(), "NaN test")
// test with Mask and Nan
T = New(WithShape(4), WithBacking([]float32{1, -9, math32.NaN(), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked NaN test")
// test with +Inf
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(1), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with +Inf: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "+Inf test")
// test with Mask and +Inf
T = New(WithShape(4), WithBacking([]float32{1, -9, math32.Inf(1), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked NaN test")
// test with -Inf
T = New(WithShape(4), WithBacking([]float32{1, 2, math32.Inf(-1), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with -Inf: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(2, argmin.ScalarValue(), "+Inf test")
// test with Mask and -Inf
T = New(WithShape(4), WithBacking([]float32{1, -9, math32.Inf(-1), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked -Inf test")
// with different engine
T = basicDenseF32.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
func TestDense_Argmax_F64(t *testing.T) {
assert := assert.New(t)
var T, argmax *Dense
var err error
T = basicDenseF64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// test all axes
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmax.IsScalar())
assert.Equal(7, argmax.ScalarValue())
// test with NaN
T = New(WithShape(4), WithBacking([]float64{1, 2, math.NaN(), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(2, argmax.ScalarValue(), "NaN test")
// test with Mask and Nan
T = New(WithShape(4), WithBacking([]float64{1, 9, math.NaN(), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked NaN test")
// test with +Inf
T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(1), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with +Inf: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(2, argmax.ScalarValue(), "+Inf test")
// test with Mask and +Inf
T = New(WithShape(4), WithBacking([]float64{1, 9, math.Inf(1), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked NaN test")
// test with -Inf
T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(-1), 4}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with -Inf: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "+Inf test")
// test with Mask and -Inf
T = New(WithShape(4), WithBacking([]float64{1, 9, math.Inf(-1), 4}, []bool{false, true, true, false}))
if argmax, err = T.Argmax(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmax.IsScalar())
assert.Equal(3, argmax.ScalarValue(), "Masked -Inf test")
// with different engine
T = basicDenseF64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmax, err = T.Argmax(i); err != nil {
t.Error(err)
continue
}
assert.True(argmaxCorrect[i].shape.Eq(argmax.Shape()), "Argmax(%d) error. Want shape %v. Got %v", i, argmaxCorrect[i].shape)
assert.Equal(argmaxCorrect[i].data, argmax.Data(), "Argmax(%d) error. ", i)
}
// idiotsville
_, err = T.Argmax(10000)
assert.NotNil(err)
}
func TestDense_Argmin_F64(t *testing.T) {
assert := assert.New(t)
var T, argmin *Dense
var err error
T = basicDenseF64.Clone().(*Dense)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// test all axes
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Error(err)
return
}
assert.True(argmin.IsScalar())
assert.Equal(11, argmin.ScalarValue())
// test with NaN
T = New(WithShape(4), WithBacking([]float64{1, 2, math.NaN(), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(2, argmin.ScalarValue(), "NaN test")
// test with Mask and Nan
T = New(WithShape(4), WithBacking([]float64{1, -9, math.NaN(), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked NaN test")
// test with +Inf
T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(1), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with +Inf: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "+Inf test")
// test with Mask and +Inf
T = New(WithShape(4), WithBacking([]float64{1, -9, math.Inf(1), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked NaN test")
// test with -Inf
T = New(WithShape(4), WithBacking([]float64{1, 2, math.Inf(-1), 4}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with -Inf: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(2, argmin.ScalarValue(), "+Inf test")
// test with Mask and -Inf
T = New(WithShape(4), WithBacking([]float64{1, -9, math.Inf(-1), 4}, []bool{false, true, true, false}))
if argmin, err = T.Argmin(AllAxes); err != nil {
t.Errorf("Failed test with NaN: %v", err)
}
assert.True(argmin.IsScalar())
assert.Equal(0, argmin.ScalarValue(), "Masked -Inf test")
// with different engine
T = basicDenseF64.Clone().(*Dense)
WithEngine(dummyEngine2{})(T)
for i := 0; i < T.Dims(); i++ {
if argmin, err = T.Argmin(i); err != nil {
t.Error(err)
continue
}
assert.True(argminCorrect[i].shape.Eq(argmin.Shape()), "Argmin(%d) error. Want shape %v. Got %v", i, argminCorrect[i].shape)
assert.Equal(argminCorrect[i].data, argmin.Data(), "Argmin(%d) error. ", i)
}
// idiotsville
_, err = T.Argmin(10000)
assert.NotNil(err)
}
tensor-0.9.24/dense_arith.go 0000664 0000000 0000000 00000025401 14265126151 0015741 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import "github.com/pkg/errors"
// Add performs t + other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Add(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Add(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Add()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Add")
}
return
}
if adder, ok := t.e.(Adder); ok {
if ret, err = adder.Add(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Add()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Add")
}
return
}
return nil, errors.Errorf("Engine does not support Add()")
}
// Sub performs t - other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Sub(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Sub(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Sub()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Sub")
}
return
}
if suber, ok := t.e.(Suber); ok {
if ret, err = suber.Sub(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Sub()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Sub")
}
return
}
return nil, errors.Errorf("Engine does not support Sub()")
}
// Mul performs t × other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Mul(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Mul(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Mul()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Mul")
}
return
}
if muler, ok := t.e.(Muler); ok {
if ret, err = muler.Mul(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Mul()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Mul")
}
return
}
return nil, errors.Errorf("Engine does not support Mul()")
}
// Div performs t ÷ other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Div(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Div(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Div()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Div")
}
return
}
if diver, ok := t.e.(Diver); ok {
if ret, err = diver.Div(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Div()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Div")
}
return
}
return nil, errors.Errorf("Engine does not support Div()")
}
// Pow performs t ^ other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Pow(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Pow(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Pow()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Pow")
}
return
}
if power, ok := t.e.(Power); ok {
if ret, err = power.Pow(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Pow()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Pow")
}
return
}
return nil, errors.Errorf("Engine does not support Pow()")
}
// Mod performs t % other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) Mod(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Mod(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Mod()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Mod")
}
return
}
if moder, ok := t.e.(Moder); ok {
if ret, err = moder.Mod(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Mod()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Mod")
}
return
}
return nil, errors.Errorf("Engine does not support Mod()")
}
// AddScalar performs t + other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) AddScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.AddScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do AddScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "AddScalar")
}
return
}
if adder, ok := t.e.(Adder); ok {
if ret, err = adder.AddScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do AddScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "AddScalar")
}
return
}
return nil, errors.Errorf("Engine does not support AddScalar()")
}
// SubScalar performs t - other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) SubScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.SubScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do SubScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "SubScalar")
}
return
}
if suber, ok := t.e.(Suber); ok {
if ret, err = suber.SubScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do SubScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "SubScalar")
}
return
}
return nil, errors.Errorf("Engine does not support SubScalar()")
}
// MulScalar performs t × other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) MulScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.MulScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do MulScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "MulScalar")
}
return
}
if muler, ok := t.e.(Muler); ok {
if ret, err = muler.MulScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do MulScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "MulScalar")
}
return
}
return nil, errors.Errorf("Engine does not support MulScalar()")
}
// DivScalar performs t ÷ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) DivScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.DivScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do DivScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "DivScalar")
}
return
}
if diver, ok := t.e.(Diver); ok {
if ret, err = diver.DivScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do DivScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "DivScalar")
}
return
}
return nil, errors.Errorf("Engine does not support DivScalar()")
}
// PowScalar performs t ^ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) PowScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.PowScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do PowScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "PowScalar")
}
return
}
if power, ok := t.e.(Power); ok {
if ret, err = power.PowScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do PowScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "PowScalar")
}
return
}
return nil, errors.Errorf("Engine does not support PowScalar()")
}
// ModScalar performs t % other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (t *Dense) ModScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.ModScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do ModScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "ModScalar")
}
return
}
if moder, ok := t.e.(Moder); ok {
if ret, err = moder.ModScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do ModScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "ModScalar")
}
return
}
return nil, errors.Errorf("Engine does not support ModScalar()")
}
tensor-0.9.24/dense_arith_test.go 0000664 0000000 0000000 00000152335 14265126151 0017007 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
"testing/quick"
)
func TestDense_Add(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := a.Add(b)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestDense_Sub(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := a.Sub(b)
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Add(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestDense_Mul(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := a.Mul(b)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDense_Div(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := a.Div(b)
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Mul(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestDense_Pow(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := a.Pow(b)
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestDense_Add_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := a.Add(b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestDense_Sub_unsafe(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := a.Sub(b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Add(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestDense_Mul_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := a.Mul(b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDense_Div_unsafe(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := a.Div(b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Mul(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestDense_Pow_unsafe(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := a.Pow(b, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestDense_Add_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := a.Add(b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Add(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Add(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Add(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Add failed: %v", err)
}
}
func TestDense_Sub_reuse(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := a.Sub(b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Add(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Sub(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Sub(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Sub(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Sub failed: %v", err)
}
}
func TestDense_Mul_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := a.Mul(b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Mul(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Mul(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Mul(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Mul failed: %v", err)
}
}
func TestDense_Div_reuse(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := a.Div(b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Mul(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Div(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Div(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Div(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Div failed: %v", err)
}
}
func TestDense_Pow_reuse(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := a.Pow(b, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestDense_Add_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
ret, err := a.Add(b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add failed: %v", err)
}
}
func TestDense_Sub_incr(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Suber)
we = we || !ok
ret, err := a.Sub(b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Add(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub failed: %v", err)
}
}
func TestDense_Mul_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
ret, err := a.Mul(b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul failed: %v", err)
}
}
func TestDense_Div_incr(t *testing.T) {
inv := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
ret, err := a.Div(b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.Mul(b, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div failed: %v", err)
}
}
func TestDense_Pow_incr(t *testing.T) {
iden := func(a *Dense) bool {
b := New(Of(a.t), WithShape(a.Shape().Clone()...), WithEngine(a.Engine()))
b.Memset(identityVal(1, a.t))
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := a.Engine().(Power)
we = we || !ok
ret, err := a.Pow(b, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow failed: %v", err)
}
}
func TestDense_AddScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, true)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, false)
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Add(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Add (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Add(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Add (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDense_SubScalar(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, true)
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.AddScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, false)
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.SubScalar(b, false, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Sub(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Sub (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Sub(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Sub (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDense_MulScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, true)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, false)
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Mul(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Mul (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Mul(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Mul (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDense_DivScalar(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := a.DivScalar(b, true)
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.MulScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Div(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Div (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Div(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Div (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDense_PowScalar(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := a.PowScalar(b, true)
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
type Foo int
wt1 := func(a *Dense) bool {
b := Foo(0)
ret, err := Pow(a, b)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Pow (tensor as left, scalar as right) failed: %v", err)
}
wt2 := func(a *Dense) bool {
b := Foo(0)
ret, err := Pow(b, a)
if err == nil {
return false
}
_ = ret
return true
}
if err := quick.Check(wt2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("WrongType test for Pow (tensor as right, scalar as left) failed: %v", err)
}
}
func TestDense_AddScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, true, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, false, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDense_SubScalar_unsafe(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, true, UseUnsafe())
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.AddScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, false, UseUnsafe())
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.SubScalar(b, false, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDense_MulScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, true, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, false, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDense_DivScalar_unsafe(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := a.DivScalar(b, true, UseUnsafe())
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.MulScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
}
func TestDense_PowScalar_unsafe(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := a.PowScalar(b, true, UseUnsafe())
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if ret != a {
t.Errorf("Expected ret to be the same as a")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
func TestDense_AddScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, true, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, false, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Add(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Adder)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Add(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Add(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Add failed: %v", err)
}
}
func TestDense_SubScalar_reuse(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, true, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.AddScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
inv2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, false, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "SubSV", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.SubScalar(b, false, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (scalar as left, tensor as right) failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Sub(b)
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := a.Engine().(Suber)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Sub(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Sub(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Sub", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Sub failed: %v", err)
}
}
func TestDense_MulScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, true, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, false, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Mul(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Muler)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Mul(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Mul(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Mul failed: %v", err)
}
}
func TestDense_DivScalar_reuse(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := a.DivScalar(b, true, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.MulScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
mut := func(a, b *Dense, reuseA bool) bool {
// req because we're only testing on one kind of tensor/engine combo
a.e = StdEng{}
a.oe = StdEng{}
a.flag = 0
b.e = StdEng{}
b.oe = StdEng{}
b.flag = 0
if a.Dtype() != b.Dtype() {
return true
}
if !a.Shape().Eq(b.Shape()) {
return true
}
correct, err := a.Div(b)
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := a.Engine().(Diver)
we = we || !ok
var ret, reuse *Dense
if reuseA {
ret, err = a.Div(b, WithReuse(a))
reuse = a
} else {
ret, err = a.Div(b, WithReuse(b))
reuse = b
}
if err, retEarly := qcErrCheck(t, "Div", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(mut, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Reuse Mutation test for Div failed: %v", err)
}
}
func TestDense_PowScalar_reuse(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
reuse := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := a.PowScalar(b, true, WithReuse(reuse))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
if reuse != ret {
t.Errorf("Expected reuse to be the same as retVal")
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
func TestDense_AddScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, true, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Adder)
we = we || !ok
ret, err := a.AddScalar(b, false, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Add", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Add (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDense_SubScalar_incr(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(0, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, unsignedTypes)
_, ok := q.Engine().(Suber)
we = we || !ok
ret, err := a.SubScalar(b, true, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "SubVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.AddScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Sub (tensor as left, scalar as right) failed: %v", err)
}
}
func TestDense_MulScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, true, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (tensor as left, scalar as right) failed: %v", err)
}
iden2 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Muler)
we = we || !ok
ret, err := a.MulScalar(b, false, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Mul", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden2, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Mul (scalar as left, tensor as right) failed: %v", err)
}
}
func TestDense_DivScalar_incr(t *testing.T) {
inv1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, numberTypes, nil)
_, ok := q.Engine().(Diver)
we = we || !ok
ret, err := a.DivScalar(b, true, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "DivVS", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
ret, err = ret.MulScalar(b, true, UseUnsafe())
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(inv1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Inv test for Div (tensor as left, scalar as right) failed: %v", err)
}
}
func TestDense_PowScalar_incr(t *testing.T) {
iden1 := func(q *Dense) bool {
a := q.Clone().(*Dense)
b := identityVal(1, q.t)
incr := New(Of(a.t), WithShape(a.Shape().Clone()...))
correct := a.Clone().(*Dense)
incr.Memset(identityVal(100, a.t))
correct.Add(incr, UseUnsafe())
we, willFailEq := willerr(a, floatcmplxTypes, complexTypes)
_, ok := q.Engine().(Power)
we = we || !ok
ret, err := a.PowScalar(b, true, WithIncr(incr))
if err, retEarly := qcErrCheck(t, "Pow", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !qcEqCheck(t, a.Dtype(), willFailEq, correct.Data(), ret.Data()) {
return false
}
return true
}
if err := quick.Check(iden1, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Identity test for Pow (tensor as left, scalar as right) failed: %v", err)
}
}
tensor-0.9.24/dense_assign.go 0000664 0000000 0000000 00000004074 14265126151 0016121 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
func overlaps(a, b DenseTensor) bool {
if a.cap() == 0 || b.cap() == 0 {
return false
}
aarr := a.arr()
barr := b.arr()
if aarr.Uintptr() == barr.Uintptr() {
return true
}
aptr := aarr.Uintptr()
bptr := barr.Uintptr()
capA := aptr + uintptr(cap(aarr.Header.Raw))
capB := bptr + uintptr(cap(barr.Header.Raw))
switch {
case aptr < bptr:
if bptr < capA {
return true
}
case aptr > bptr:
if aptr < capB {
return true
}
}
return false
}
func assignArray(dest, src DenseTensor) (err error) {
// var copiedSrc bool
if src.IsScalar() {
panic("HELP")
}
dd := dest.Dims()
sd := src.Dims()
dstrides := dest.Strides()
sstrides := src.Strides()
var ds, ss int
ds = dstrides[0]
if src.IsVector() {
ss = sstrides[0]
} else {
ss = sstrides[sd-1]
}
// when dd == 1, and the strides point in the same direction
// we copy to a temporary if there is an overlap of data
if ((dd == 1 && sd >= 1 && ds*ss < 0) || dd > 1) && overlaps(dest, src) {
// create temp
// copiedSrc = true
}
// broadcast src to dest for raw iteration
tmpShape := Shape(BorrowInts(sd))
tmpStrides := BorrowInts(len(src.Strides()))
copy(tmpShape, src.Shape())
copy(tmpStrides, src.Strides())
defer ReturnInts(tmpShape)
defer ReturnInts(tmpStrides)
if sd > dd {
tmpDim := sd
for tmpDim > dd && tmpShape[0] == 1 {
tmpDim--
// this is better than tmpShape = tmpShape[1:]
// because we are going to return these ints later
copy(tmpShape, tmpShape[1:])
copy(tmpStrides, tmpStrides[1:])
}
}
var newStrides []int
if newStrides, err = BroadcastStrides(dest.Shape(), tmpShape, dstrides, tmpStrides); err != nil {
err = errors.Wrapf(err, "BroadcastStrides failed")
return
}
dap := dest.Info()
sap := MakeAP(tmpShape, newStrides, src.Info().o, src.Info().Δ)
diter := newFlatIterator(dap)
siter := newFlatIterator(&sap)
_, err = copyDenseIter(dest, src, diter, siter)
sap.zeroOnly() // cleanup, but not entirely because tmpShape and tmpStrides are separately cleaned up. Don't double free
return
}
tensor-0.9.24/dense_cmp.go 0000664 0000000 0000000 00000030627 14265126151 0015417 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import "github.com/pkg/errors"
// Gt performs t > other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) Gt(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Gt(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Gt()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Gt")
}
return
}
if gter, ok := t.e.(Gter); ok {
if ret, err = gter.Gt(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Gt()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Gt")
}
return
}
return nil, errors.Errorf("Engine does not support Gt()")
}
// Gte performs t ≥ other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) Gte(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Gte(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Gte()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Gte")
}
return
}
if gteer, ok := t.e.(Gteer); ok {
if ret, err = gteer.Gte(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Gte()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Gte")
}
return
}
return nil, errors.Errorf("Engine does not support Gte()")
}
// Lt performs t < other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) Lt(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Lt(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Lt()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Lt")
}
return
}
if lter, ok := t.e.(Lter); ok {
if ret, err = lter.Lt(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Lt()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Lt")
}
return
}
return nil, errors.Errorf("Engine does not support Lt()")
}
// Lte performs t ≤ other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) Lte(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.Lte(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Lte()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Lte")
}
return
}
if lteer, ok := t.e.(Lteer); ok {
if ret, err = lteer.Lte(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Lte()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Lte")
}
return
}
return nil, errors.Errorf("Engine does not support Lte()")
}
// ElEq performs t == other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) ElEq(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.ElEq(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Eq()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Eq")
}
return
}
if eleqer, ok := t.e.(ElEqer); ok {
if ret, err = eleqer.ElEq(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Eq()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Eq")
}
return
}
return nil, errors.Errorf("Engine does not support Eq()")
}
// ElNe performs t ≠ other elementwise. Both t and other must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
//UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) ElNe(other *Dense, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.ElNe(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Ne()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Ne")
}
return
}
if eleqer, ok := t.e.(ElEqer); ok {
if ret, err = eleqer.ElNe(t, other, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do Ne()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Ne")
}
return
}
return nil, errors.Errorf("Engine does not support Ne()")
}
// GtScalar performs t > other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) GtScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.GtScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do GtScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "GtScalar")
}
return
}
if gter, ok := t.e.(Gter); ok {
if ret, err = gter.GtScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do GtScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "GtScalar")
}
return
}
return nil, errors.Errorf("Engine does not support GtScalar()")
}
// GteScalar performs t ≥ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) GteScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.GteScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do GteScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "GteScalar")
}
return
}
if gteer, ok := t.e.(Gteer); ok {
if ret, err = gteer.GteScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do GteScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "GteScalar")
}
return
}
return nil, errors.Errorf("Engine does not support GteScalar()")
}
// LtScalar performs t < other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) LtScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.LtScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do LtScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "LtScalar")
}
return
}
if lter, ok := t.e.(Lter); ok {
if ret, err = lter.LtScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do LtScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "LtScalar")
}
return
}
return nil, errors.Errorf("Engine does not support LtScalar()")
}
// LteScalar performs t ≤ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) LteScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.LteScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do LteScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "LteScalar")
}
return
}
if lteer, ok := t.e.(Lteer); ok {
if ret, err = lteer.LteScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do LteScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "LteScalar")
}
return
}
return nil, errors.Errorf("Engine does not support LteScalar()")
}
// EqScalar performs t == other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) ElEqScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.EqScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do EqScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "EqScalar")
}
return
}
if eleqer, ok := t.e.(ElEqer); ok {
if ret, err = eleqer.EqScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do EqScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "EqScalar")
}
return
}
return nil, errors.Errorf("Engine does not support EqScalar()")
}
// NeScalar performs t ≠ other elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in other
// Acceptable FuncOpts are: UseUnsafe(), AsSameType(), WithReuse().
// UseUnsafe() will ensure that the same type is returned.
// Tensors used in WithReuse has to have the same Dtype as the return value's Dtype.
func (t *Dense) ElNeScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {
var ret Tensor
if t.oe != nil {
if ret, err = t.oe.NeScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do NeScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "NeScalar")
}
return
}
if eleqer, ok := t.e.(ElEqer); ok {
if ret, err = eleqer.NeScalar(t, other, leftTensor, opts...); err != nil {
return nil, errors.Wrapf(err, "Unable to do NeScalar()")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "NeScalar")
}
return
}
return nil, errors.Errorf("Engine does not support NeScalar()")
}
tensor-0.9.24/dense_cmp_test.go 0000664 0000000 0000000 00000104415 14265126151 0016453 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"reflect"
"testing"
"testing/quick"
)
func TestDense_Gt(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Gt(b)
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Gt(c)
if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gt(c)
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestDense_Gte(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Gte(b)
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Gte(c)
if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gte(c)
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestDense_Lt(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Lt(b)
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Lt(c)
if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lt(c)
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestDense_Lte(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Lte(b)
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Lte(c)
if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lte(c)
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestDense_ElEq(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.ElEq(b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.ElEq(c)
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.ElEq(c)
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := a.ElEq(b)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := b.ElEq(a)
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
}
func TestDense_ElNe(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := a.ElNe(b)
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := b.ElNe(a)
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElNe failed: %v", err)
}
}
func TestDense_Gt_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Gt(b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Gt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestDense_Gte_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Gte(b, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Gte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestDense_Lt_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Lt(b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Lt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestDense_Lte_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.Lte(b, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.Lte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestDense_ElEq_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
c := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
cv, _ := quick.Value(c.Dtype().Type, r)
b.Memset(bv.Interface())
c.Memset(cv.Interface())
axb, err := a.ElEq(b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := b.ElEq(c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", b, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.ElEq(c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := a.ElEq(b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := b.ElEq(a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
}
func TestDense_ElNe_assame(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
b := q.Clone().(*Dense)
bv, _ := quick.Value(b.Dtype().Type, r)
b.Memset(bv.Interface())
axb, err := a.ElNe(b, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := b.ElNe(a, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElNe failed: %v", err)
}
}
func TestDense_GtScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.GtScalar(b, true)
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.GtScalar(b, false)
if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gt(c)
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestDense_GteScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.GteScalar(b, true)
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.GteScalar(b, false)
if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gte(c)
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestDense_LtScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.LtScalar(b, true)
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.LtScalar(b, false)
if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lt(c)
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestDense_LteScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, ordTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.LteScalar(b, true)
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.LteScalar(b, false)
if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lte(c)
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestDense_ElEqScalar(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.ElEqScalar(b, true)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.ElEqScalar(b, false)
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.ElEq(c)
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
ab := axb.Bools()
bc := bxc.Bools()
ac := axc.Bools()
for i, vab := range ab {
if vab && bc[i] {
if !ac[i] {
return false
}
}
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := a.ElEqScalar(b, true)
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := a.ElEqScalar(b, false)
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElEq failed: %v", err)
}
}
func TestDense_ElNeScalar(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, eqTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := a.ElNeScalar(b, true)
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := a.ElNeScalar(b, false)
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElNe failed: %v", err)
}
}
func TestDense_GtScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.GtScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.GtScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gt failed: %v", err)
}
}
func TestDense_GteScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Gteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.GteScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.GteScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Gte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Gte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Gte failed: %v", err)
}
}
func TestDense_LtScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lter)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.LtScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.LtScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lt(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lt - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lt failed: %v", err)
}
}
func TestDense_LteScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(Lteer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.LteScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.LteScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.Lte(c, AsSameType())
if err, retEarly := qcErrCheck(t, "Lte - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for Lte failed: %v", err)
}
}
func TestDense_ElEqScalar_assame(t *testing.T) {
transFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
c := q.Clone().(*Dense)
cv, _ := quick.Value(c.Dtype().Type, r)
c.Memset(cv.Interface())
axb, err := a.ElEqScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxc, err := c.ElEqScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙c", c, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
axc, err := a.ElEq(c, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙c", a, c, we, err); retEarly {
if err != nil {
return false
}
return true
}
if !threewayEq(axb.Data(), bxc.Data(), axc.Data()) {
t.Errorf("a: %-v", a)
t.Errorf("b: %-v", b)
t.Errorf("c: %-v", c)
t.Errorf("axb.Data() %v", axb.Data())
t.Errorf("bxc.Data() %v", bxc.Data())
t.Errorf("axc.Data() %v", axc.Data())
return false
}
return true
}
if err := quick.Check(transFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Transitivity test for ElEq failed: %v", err)
}
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := a.ElEqScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := a.ElEqScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "ElEq - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElEq failed: %v", err)
}
}
func TestDense_ElNeScalar_assame(t *testing.T) {
symFn := func(q *Dense) bool {
we, _ := willerr(q, nonComplexNumberTypes, nil)
_, ok := q.Engine().(ElEqer)
we = we || !ok
if err := typeclassCheck(q.Dtype(), nonComplexNumberTypes); err != nil {
return true // we exit early if the generated type is not something we can handle
}
r := newRand()
a := q.Clone().(*Dense)
bv, _ := quick.Value(a.Dtype().Type, r)
b := bv.Interface()
axb, err := a.ElNeScalar(b, true, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - a∙b", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
bxa, err := a.ElNeScalar(b, false, AsSameType())
if err, retEarly := qcErrCheck(t, "ElNe - b∙a", a, b, we, err); retEarly {
if err != nil {
return false
}
return true
}
return reflect.DeepEqual(axb.Data(), bxa.Data())
}
if err := quick.Check(symFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Symmetry test for ElNe failed: %v", err)
}
}
tensor-0.9.24/dense_colmajor_linalg_test.go 0000664 0000000 0000000 00000054067 14265126151 0021037 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
var colMajorTraceTests = []struct {
data interface{}
correct interface{}
err bool
}{
{[]int{0, 1, 2, 3, 4, 5}, int(4), false},
{[]int8{0, 1, 2, 3, 4, 5}, int8(4), false},
{[]int16{0, 1, 2, 3, 4, 5}, int16(4), false},
{[]int32{0, 1, 2, 3, 4, 5}, int32(4), false},
{[]int64{0, 1, 2, 3, 4, 5}, int64(4), false},
{[]uint{0, 1, 2, 3, 4, 5}, uint(4), false},
{[]uint8{0, 1, 2, 3, 4, 5}, uint8(4), false},
{[]uint16{0, 1, 2, 3, 4, 5}, uint16(4), false},
{[]uint32{0, 1, 2, 3, 4, 5}, uint32(4), false},
{[]uint64{0, 1, 2, 3, 4, 5}, uint64(4), false},
{[]float32{0, 1, 2, 3, 4, 5}, float32(4), false},
{[]float64{0, 1, 2, 3, 4, 5}, float64(4), false},
{[]complex64{0, 1, 2, 3, 4, 5}, complex64(4), false},
{[]complex128{0, 1, 2, 3, 4, 5}, complex128(4), false},
{[]bool{true, false, true, false, true, false}, nil, true},
}
func TestColMajor_Dense_Trace(t *testing.T) {
assert := assert.New(t)
for i, tts := range colMajorTraceTests {
T := New(WithShape(2, 3), AsFortran(tts.data))
trace, err := T.Trace()
if checkErr(t, tts.err, err, "Trace", i) {
continue
}
assert.Equal(tts.correct, trace)
//
T = New(WithBacking(tts.data))
_, err = T.Trace()
if err == nil {
t.Error("Expected an error when Trace() on non-matrices")
}
}
}
var colMajorInnerTests = []struct {
a, b interface{}
shapeA, shapeB Shape
correct interface{}
err bool
}{
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3, 1}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3, 1}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{1, 3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{1, 3}, float64(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3, 1}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3, 1}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{1, 3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{1, 3}, float32(5), false},
// stupids: type differences
{Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float32, 0, 3), Range(Byte, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true},
// differing size
{Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{4}, Shape{3}, nil, true},
// A is not a matrix
{Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{2, 2}, Shape{3}, nil, true},
}
func TestColMajor_Dense_Inner(t *testing.T) {
for i, its := range colMajorInnerTests {
a := New(WithShape(its.shapeA...), AsFortran(its.a))
b := New(WithShape(its.shapeB...), AsFortran(its.b))
T, err := a.Inner(b)
if checkErr(t, its.err, err, "Inner", i) {
continue
}
assert.Equal(t, its.correct, T)
}
}
var colMajorMatVecMulTests = []linalgTest{
// Float64s
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
// float64s with transposed matrix
{Range(Float64, 0, 6), Range(Float64, 0, 2), Shape{2, 3}, Shape{2}, true, false,
Range(Float64, 52, 55), Range(Float64, 100, 103), Shape{3}, Shape{3},
[]float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{3}, false, false, false},
// Float32s
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
// stupids : unpossible shapes (wrong A)
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{6}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad A shape
{Range(Float64, 0, 8), Range(Float64, 0, 3), Shape{4, 2}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad B shape
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad reuse
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 55), Range(Float64, 100, 102), Shape{3}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true},
//stupids: bad incr shape
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 105), Shape{2}, Shape{5},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
// stupids: type mismatch A and B
{Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B (non-Float)
{Range(Float64, 0, 6), Range(Int, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch, reuse
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float32, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true},
// stupids: type mismatch, incr
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float32, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
// stupids: type mismatch, incr not a Number
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), []bool{true, true, true}, Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
}
func TestColMajor_Dense_MatVecMul(t *testing.T) {
assert := assert.New(t)
for i, mvmt := range colMajorMatVecMulTests {
a := New(WithShape(mvmt.shapeA...), AsFortran(mvmt.a))
b := New(WithShape(mvmt.shapeB...), AsFortran(mvmt.b))
if mvmt.transA {
if err := a.T(); err != nil {
t.Error(err)
continue
}
}
T, err := a.MatVecMul(b)
if checkErr(t, mvmt.err, err, "Safe", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(mvmt.correct, T.Data())
// incr
incr := New(WithShape(mvmt.shapeI...), AsFortran(mvmt.incr))
T, err = a.MatVecMul(b, WithIncr(incr))
if checkErr(t, mvmt.errIncr, err, "WithIncr", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(mvmt.correctIncr, T.Data())
// reuse
reuse := New(WithShape(mvmt.shapeR...), AsFortran(mvmt.reuse))
T, err = a.MatVecMul(b, WithReuse(reuse))
if checkErr(t, mvmt.errReuse, err, "WithReuse", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(mvmt.correct, T.Data())
// reuse AND incr
T, err = a.MatVecMul(b, WithIncr(incr), WithReuse(reuse))
if checkErr(t, mvmt.err, err, "WithReuse and WithIncr", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(mvmt.correctIncrReuse, T.Data())
}
}
var colMajorMatMulTests = []linalgTest{
// Float64s
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, false},
// Float32s
{Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float32, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float32{10, 28, 13, 40}, []float32{110, 130, 114, 143}, []float32{120, 158, 127, 183}, Shape{2, 2}, false, false, false},
// Edge cases - Row Vecs (Float64)
{Range(Float64, 0, 2), Range(Float64, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false,
Range(Float64, 10, 16), Range(Float64, 100, 106), Shape{2, 3}, Shape{2, 3},
[]float64{0, 0, 0, 1, 0, 2}, []float64{100, 103, 101, 105, 102, 107}, []float64{100, 103, 101, 106, 102, 109}, Shape{2, 3}, false, false, false},
{Range(Float64, 0, 2), Range(Float64, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false,
Range(Float64, 10, 13), Range(Float64, 100, 103), Shape{1, 3}, Shape{1, 3},
[]float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{1, 3}, false, false, false},
{Range(Float64, 0, 2), Range(Float64, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false,
Range(Float64, 0, 1), Range(Float64, 100, 101), Shape{1, 1}, Shape{1, 1},
[]float64{1}, []float64{101}, []float64{102}, Shape{1, 1}, false, false, false},
// Edge cases - Row Vecs (Float32)
{Range(Float32, 0, 2), Range(Float32, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false,
Range(Float32, 10, 16), Range(Float32, 100, 106), Shape{2, 3}, Shape{2, 3},
[]float32{0, 0, 0, 1, 0, 2}, []float32{100, 103, 101, 105, 102, 107}, []float32{100, 103, 101, 106, 102, 109}, Shape{2, 3}, false, false, false},
{Range(Float32, 0, 2), Range(Float32, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false,
Range(Float32, 10, 13), Range(Float32, 100, 103), Shape{1, 3}, Shape{1, 3},
[]float32{3, 4, 5}, []float32{103, 105, 107}, []float32{106, 109, 112}, Shape{1, 3}, false, false, false},
{Range(Float32, 0, 2), Range(Float32, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false,
Range(Float32, 0, 1), Range(Float32, 100, 101), Shape{1, 1}, Shape{1, 1},
[]float32{1}, []float32{101}, []float32{102}, Shape{1, 1}, false, false, false},
// stupids - bad shape (not matrices):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids - bad shape (incompatible shapes):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6, 1}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids - bad shape (bad reuse shape):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 57), Range(Float64, 100, 104), Shape{5}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, true},
// stupids - bad shape (bad incr shape):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{4},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, true, false},
// stupids - type mismatch (a,b)
{Range(Float64, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids - type mismatch (a,b)
{Range(Float32, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids type mismatch (b not float)
{Range(Float64, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids type mismatch (a not float)
{Range(Int, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, true, false, false},
// stupids: type mismatch (incr)
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, true, false},
// stupids: type mismatch (reuse)
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float32, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 28, 13, 40}, []float64{110, 130, 114, 143}, []float64{120, 158, 127, 183}, Shape{2, 2}, false, false, true},
// stupids: type mismatch (reuse)
{Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float32{10, 28, 13, 40}, []float32{110, 130, 114, 143}, []float32{120, 158, 127, 183}, Shape{2, 2}, false, false, true},
}
func TestColMajorDense_MatMul(t *testing.T) {
assert := assert.New(t)
for i, mmt := range colMajorMatMulTests {
a := New(WithShape(mmt.shapeA...), AsFortran(mmt.a))
b := New(WithShape(mmt.shapeB...), AsFortran(mmt.b))
T, err := a.MatMul(b)
if checkErr(t, mmt.err, err, "Safe", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(mmt.correct, T.Data(), "Test %d", i)
// incr
incr := New(WithShape(mmt.shapeI...), AsFortran(mmt.incr))
T, err = a.MatMul(b, WithIncr(incr))
if checkErr(t, mmt.errIncr, err, "WithIncr", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correctIncr, T.Data())
// reuse
reuse := New(WithShape(mmt.shapeR...), AsFortran(mmt.reuse))
T, err = a.MatMul(b, WithReuse(reuse))
if checkErr(t, mmt.errReuse, err, "WithReuse", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correct, T.Data())
// reuse AND incr
T, err = a.MatMul(b, WithIncr(incr), WithReuse(reuse))
if checkErr(t, mmt.err, err, "WithIncr and WithReuse", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correctIncrReuse, T.Data())
}
}
var colMajorOuterTests = []linalgTest{
// Float64s
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
false, false, false},
// Float32s
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float32, 52, 61), Range(Float32, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float32{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float32{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float32{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
false, false, false},
// stupids - a or b not vector
{Range(Float64, 0, 3), Range(Float64, 0, 6), Shape{3}, Shape{3, 2}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
true, false, false},
// stupids - bad incr shape
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 106), Shape{3, 3}, Shape{3, 2},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
false, true, false},
// stupids - bad reuse shape
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 58), Range(Float64, 100, 109), Shape{3, 2}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
false, false, true},
// stupids - b not Float
{Range(Float64, 0, 3), Range(Int, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
true, false, false},
// stupids - a not Float
{Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
true, false, false},
// stupids - a-b type mismatch
{Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
true, false, false},
// stupids a-b type mismatch
{Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 103, 106, 101, 105, 109, 102, 107, 112}, []float64{100, 103, 106, 101, 106, 111, 102, 109, 116}, Shape{3, 3},
true, false, false},
}
func TestColMajor_Dense_Outer(t *testing.T) {
assert := assert.New(t)
for i, ot := range colMajorOuterTests {
a := New(WithShape(ot.shapeA...), AsFortran(ot.a))
b := New(WithShape(ot.shapeB...), AsFortran(ot.b))
T, err := a.Outer(b)
if checkErr(t, ot.err, err, "Safe", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(ot.correct, T.Data())
// incr
incr := New(WithShape(ot.shapeI...), AsFortran(ot.incr))
T, err = a.Outer(b, WithIncr(incr))
if checkErr(t, ot.errIncr, err, "WithIncr", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(ot.correctIncr, T.Data())
// reuse
reuse := New(WithShape(ot.shapeR...), AsFortran(ot.reuse))
T, err = a.Outer(b, WithReuse(reuse))
if checkErr(t, ot.errReuse, err, "WithReuse", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(ot.correct, T.Data())
// reuse AND incr
T, err = a.Outer(b, WithIncr(incr), WithReuse(reuse))
if err != nil {
t.Errorf("Reuse and Incr error'd %+v", err)
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsColMajor())
assert.Equal(ot.correctIncrReuse, T.Data())
}
}
tensor-0.9.24/dense_compat.go 0000664 0000000 0000000 00000036702 14265126151 0016123 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"fmt"
"math"
"math/cmplx"
"reflect"
arrow "github.com/apache/arrow/go/arrow"
arrowArray "github.com/apache/arrow/go/arrow/array"
"github.com/apache/arrow/go/arrow/bitutil"
arrowTensor "github.com/apache/arrow/go/arrow/tensor"
"github.com/chewxy/math32"
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
)
func convFromFloat64s(to Dtype, data []float64) interface{} {
switch to {
case Int:
retVal := make([]int, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = int(v)
}
}
return retVal
case Int8:
retVal := make([]int8, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = int8(v)
}
}
return retVal
case Int16:
retVal := make([]int16, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = int16(v)
}
}
return retVal
case Int32:
retVal := make([]int32, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = int32(v)
}
}
return retVal
case Int64:
retVal := make([]int64, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = int64(v)
}
}
return retVal
case Uint:
retVal := make([]uint, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = uint(v)
}
}
return retVal
case Uint8:
retVal := make([]uint8, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = uint8(v)
}
}
return retVal
case Uint16:
retVal := make([]uint16, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = uint16(v)
}
}
return retVal
case Uint32:
retVal := make([]uint32, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = uint32(v)
}
}
return retVal
case Uint64:
retVal := make([]uint64, len(data))
for i, v := range data {
switch {
case math.IsNaN(v), math.IsInf(v, 0):
retVal[i] = 0
default:
retVal[i] = uint64(v)
}
}
return retVal
case Float32:
retVal := make([]float32, len(data))
for i, v := range data {
switch {
case math.IsNaN(v):
retVal[i] = math32.NaN()
case math.IsInf(v, 1):
retVal[i] = math32.Inf(1)
case math.IsInf(v, -1):
retVal[i] = math32.Inf(-1)
default:
retVal[i] = float32(v)
}
}
return retVal
case Float64:
retVal := make([]float64, len(data))
copy(retVal, data)
return retVal
case Complex64:
retVal := make([]complex64, len(data))
for i, v := range data {
switch {
case math.IsNaN(v):
retVal[i] = complex64(cmplx.NaN())
case math.IsInf(v, 0):
retVal[i] = complex64(cmplx.Inf())
default:
retVal[i] = complex(float32(v), float32(0))
}
}
return retVal
case Complex128:
retVal := make([]complex128, len(data))
for i, v := range data {
switch {
case math.IsNaN(v):
retVal[i] = cmplx.NaN()
case math.IsInf(v, 0):
retVal[i] = cmplx.Inf()
default:
retVal[i] = complex(v, float64(0))
}
}
return retVal
default:
panic("Unsupported Dtype")
}
}
func convToFloat64s(t *Dense) (retVal []float64) {
retVal = make([]float64, t.len())
switch t.t {
case Int:
for i, v := range t.Ints() {
retVal[i] = float64(v)
}
return retVal
case Int8:
for i, v := range t.Int8s() {
retVal[i] = float64(v)
}
return retVal
case Int16:
for i, v := range t.Int16s() {
retVal[i] = float64(v)
}
return retVal
case Int32:
for i, v := range t.Int32s() {
retVal[i] = float64(v)
}
return retVal
case Int64:
for i, v := range t.Int64s() {
retVal[i] = float64(v)
}
return retVal
case Uint:
for i, v := range t.Uints() {
retVal[i] = float64(v)
}
return retVal
case Uint8:
for i, v := range t.Uint8s() {
retVal[i] = float64(v)
}
return retVal
case Uint16:
for i, v := range t.Uint16s() {
retVal[i] = float64(v)
}
return retVal
case Uint32:
for i, v := range t.Uint32s() {
retVal[i] = float64(v)
}
return retVal
case Uint64:
for i, v := range t.Uint64s() {
retVal[i] = float64(v)
}
return retVal
case Float32:
for i, v := range t.Float32s() {
switch {
case math32.IsNaN(v):
retVal[i] = math.NaN()
case math32.IsInf(v, 1):
retVal[i] = math.Inf(1)
case math32.IsInf(v, -1):
retVal[i] = math.Inf(-1)
default:
retVal[i] = float64(v)
}
}
return retVal
case Float64:
return t.Float64s()
return retVal
case Complex64:
for i, v := range t.Complex64s() {
switch {
case cmplx.IsNaN(complex128(v)):
retVal[i] = math.NaN()
case cmplx.IsInf(complex128(v)):
retVal[i] = math.Inf(1)
default:
retVal[i] = float64(real(v))
}
}
return retVal
case Complex128:
for i, v := range t.Complex128s() {
switch {
case cmplx.IsNaN(v):
retVal[i] = math.NaN()
case cmplx.IsInf(v):
retVal[i] = math.Inf(1)
default:
retVal[i] = real(v)
}
}
return retVal
default:
panic(fmt.Sprintf("Cannot convert *Dense of %v to []float64", t.t))
}
}
func convToFloat64(x interface{}) float64 {
switch xt := x.(type) {
case int:
return float64(xt)
case int8:
return float64(xt)
case int16:
return float64(xt)
case int32:
return float64(xt)
case int64:
return float64(xt)
case uint:
return float64(xt)
case uint8:
return float64(xt)
case uint16:
return float64(xt)
case uint32:
return float64(xt)
case uint64:
return float64(xt)
case float32:
return float64(xt)
case float64:
return float64(xt)
case complex64:
return float64(real(xt))
case complex128:
return real(xt)
default:
panic("Cannot convert to float64")
}
}
// FromMat64 converts a *"gonum/matrix/mat64".Dense into a *tensorf64.Tensor.
func FromMat64(m *mat.Dense, opts ...FuncOpt) *Dense {
r, c := m.Dims()
fo := ParseFuncOpts(opts...)
defer returnOpOpt(fo)
toCopy := fo.Safe()
as := fo.As()
if as.Type == nil {
as = Float64
}
switch as.Kind() {
case reflect.Int:
backing := convFromFloat64s(Int, m.RawMatrix().Data).([]int)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Int8:
backing := convFromFloat64s(Int8, m.RawMatrix().Data).([]int8)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Int16:
backing := convFromFloat64s(Int16, m.RawMatrix().Data).([]int16)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Int32:
backing := convFromFloat64s(Int32, m.RawMatrix().Data).([]int32)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Int64:
backing := convFromFloat64s(Int64, m.RawMatrix().Data).([]int64)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Uint:
backing := convFromFloat64s(Uint, m.RawMatrix().Data).([]uint)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Uint8:
backing := convFromFloat64s(Uint8, m.RawMatrix().Data).([]uint8)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Uint16:
backing := convFromFloat64s(Uint16, m.RawMatrix().Data).([]uint16)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Uint32:
backing := convFromFloat64s(Uint32, m.RawMatrix().Data).([]uint32)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Uint64:
backing := convFromFloat64s(Uint64, m.RawMatrix().Data).([]uint64)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Float32:
backing := convFromFloat64s(Float32, m.RawMatrix().Data).([]float32)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Float64:
var backing []float64
if toCopy {
backing = make([]float64, len(m.RawMatrix().Data))
copy(backing, m.RawMatrix().Data)
} else {
backing = m.RawMatrix().Data
}
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Complex64:
backing := convFromFloat64s(Complex64, m.RawMatrix().Data).([]complex64)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
case reflect.Complex128:
backing := convFromFloat64s(Complex128, m.RawMatrix().Data).([]complex128)
retVal := New(WithBacking(backing), WithShape(r, c))
return retVal
default:
panic(fmt.Sprintf("Unsupported Dtype - cannot convert float64 to %v", as))
}
panic("Unreachable")
}
// ToMat64 converts a *Dense to a *mat.Dense. All the values are converted into float64s.
// This function will only convert matrices. Anything *Dense with dimensions larger than 2 will cause an error.
func ToMat64(t *Dense, opts ...FuncOpt) (retVal *mat.Dense, err error) {
// checks:
if !t.IsNativelyAccessible() {
return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Data is inaccessible")
}
if !t.IsMatrix() {
// error
return nil, errors.Errorf("Cannot convert *Dense to *mat.Dense. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape())
}
fo := ParseFuncOpts(opts...)
defer returnOpOpt(fo)
toCopy := fo.Safe()
// fix dims
r := t.Shape()[0]
c := t.Shape()[1]
var data []float64
switch {
case t.t == Float64 && toCopy && !t.IsMaterializable():
data = make([]float64, t.len())
copy(data, t.Float64s())
case !t.IsMaterializable():
data = convToFloat64s(t)
default:
it := newFlatIterator(&t.AP)
var next int
for next, err = it.Next(); err == nil; next, err = it.Next() {
if err = handleNoOp(err); err != nil {
return
}
data = append(data, convToFloat64(t.Get(next)))
}
err = nil
}
retVal = mat.NewDense(r, c, data)
return
}
// FromArrowArray converts an "arrow/array".Interface into a Tensor of matching DataType.
func FromArrowArray(a arrowArray.Interface) *Dense {
a.Retain()
defer a.Release()
r := a.Len()
// TODO(poopoothegorilla): instead of creating bool ValidMask maybe
// bitmapBytes can be used from arrow API
mask := make([]bool, r)
for i := 0; i < r; i++ {
mask[i] = a.IsNull(i)
}
switch a.DataType() {
case arrow.BinaryTypes.String:
backing := make([]string, r)
for i := 0; i < r; i++ {
backing[i] = a.(*arrowArray.String).Value(i)
}
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.FixedWidthTypes.Boolean:
backing := make([]bool, r)
for i := 0; i < r; i++ {
backing[i] = a.(*arrowArray.Boolean).Value(i)
}
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Int8:
backing := a.(*arrowArray.Int8).Int8Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Int16:
backing := a.(*arrowArray.Int16).Int16Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Int32:
backing := a.(*arrowArray.Int32).Int32Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Int64:
backing := a.(*arrowArray.Int64).Int64Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Uint8:
backing := a.(*arrowArray.Uint8).Uint8Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Uint16:
backing := a.(*arrowArray.Uint16).Uint16Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Uint32:
backing := a.(*arrowArray.Uint32).Uint32Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Uint64:
backing := a.(*arrowArray.Uint64).Uint64Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Float32:
backing := a.(*arrowArray.Float32).Float32Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
case arrow.PrimitiveTypes.Float64:
backing := a.(*arrowArray.Float64).Float64Values()
retVal := New(WithBacking(backing, mask), WithShape(r, 1))
return retVal
default:
panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType()))
}
panic("Unreachable")
}
// FromArrowTensor converts an "arrow/tensor".Interface into a Tensor of matching DataType.
func FromArrowTensor(a arrowTensor.Interface) *Dense {
a.Retain()
defer a.Release()
if !a.IsContiguous() {
panic("Non-contiguous data is Unsupported")
}
var shape []int
for _, val := range a.Shape() {
shape = append(shape, int(val))
}
l := a.Len()
validMask := a.Data().Buffers()[0].Bytes()
dataOffset := a.Data().Offset()
mask := make([]bool, l)
for i := 0; i < l; i++ {
mask[i] = len(validMask) != 0 && bitutil.BitIsNotSet(validMask, dataOffset+i)
}
switch a.DataType() {
case arrow.PrimitiveTypes.Int8:
backing := a.(*arrowTensor.Int8).Int8Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Int16:
backing := a.(*arrowTensor.Int16).Int16Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Int32:
backing := a.(*arrowTensor.Int32).Int32Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Int64:
backing := a.(*arrowTensor.Int64).Int64Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Uint8:
backing := a.(*arrowTensor.Uint8).Uint8Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Uint16:
backing := a.(*arrowTensor.Uint16).Uint16Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Uint32:
backing := a.(*arrowTensor.Uint32).Uint32Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Uint64:
backing := a.(*arrowTensor.Uint64).Uint64Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Float32:
backing := a.(*arrowTensor.Float32).Float32Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
case arrow.PrimitiveTypes.Float64:
backing := a.(*arrowTensor.Float64).Float64Values()
if a.IsColMajor() {
return New(WithShape(shape...), AsFortran(backing, mask))
}
return New(WithShape(shape...), WithBacking(backing, mask))
default:
panic(fmt.Sprintf("Unsupported Arrow DataType - %v", a.DataType()))
}
panic("Unreachable")
}
tensor-0.9.24/dense_compat_test.go 0000664 0000000 0000000 00000053504 14265126151 0017161 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
arrow "github.com/apache/arrow/go/arrow"
arrowArray "github.com/apache/arrow/go/arrow/array"
"github.com/apache/arrow/go/arrow/memory"
arrowTensor "github.com/apache/arrow/go/arrow/tensor"
"github.com/stretchr/testify/assert"
"gonum.org/v1/gonum/mat"
)
var toMat64Tests = []struct {
data interface{}
sliced interface{}
shape Shape
dt Dtype
}{
{Range(Int, 0, 6), []int{0, 1, 3, 4}, Shape{2, 3}, Int},
{Range(Int8, 0, 6), []int8{0, 1, 3, 4}, Shape{2, 3}, Int8},
{Range(Int16, 0, 6), []int16{0, 1, 3, 4}, Shape{2, 3}, Int16},
{Range(Int32, 0, 6), []int32{0, 1, 3, 4}, Shape{2, 3}, Int32},
{Range(Int64, 0, 6), []int64{0, 1, 3, 4}, Shape{2, 3}, Int64},
{Range(Uint, 0, 6), []uint{0, 1, 3, 4}, Shape{2, 3}, Uint},
{Range(Uint8, 0, 6), []uint8{0, 1, 3, 4}, Shape{2, 3}, Uint8},
{Range(Uint16, 0, 6), []uint16{0, 1, 3, 4}, Shape{2, 3}, Uint16},
{Range(Uint32, 0, 6), []uint32{0, 1, 3, 4}, Shape{2, 3}, Uint32},
{Range(Uint64, 0, 6), []uint64{0, 1, 3, 4}, Shape{2, 3}, Uint64},
{Range(Float32, 0, 6), []float32{0, 1, 3, 4}, Shape{2, 3}, Float32},
{Range(Float64, 0, 6), []float64{0, 1, 3, 4}, Shape{2, 3}, Float64},
{Range(Complex64, 0, 6), []complex64{0, 1, 3, 4}, Shape{2, 3}, Complex64},
{Range(Complex128, 0, 6), []complex128{0, 1, 3, 4}, Shape{2, 3}, Complex128},
}
func TestToMat64(t *testing.T) {
assert := assert.New(t)
for i, tmt := range toMat64Tests {
T := New(WithBacking(tmt.data), WithShape(tmt.shape...))
var m *mat.Dense
var err error
if m, err = ToMat64(T); err != nil {
t.Errorf("ToMat basic test %d failed : %v", i, err)
continue
}
conv := anyToFloat64s(tmt.data)
assert.Equal(conv, m.RawMatrix().Data, "i %d from %v", i, tmt.dt)
if T, err = sliceDense(T, nil, makeRS(0, 2)); err != nil {
t.Errorf("Slice failed %v", err)
continue
}
if m, err = ToMat64(T); err != nil {
t.Errorf("ToMat of slice test %d failed : %v", i, err)
continue
}
conv = anyToFloat64s(tmt.sliced)
assert.Equal(conv, m.RawMatrix().Data, "sliced test %d from %v", i, tmt.dt)
t.Logf("Done")
if tmt.dt == Float64 {
T = New(WithBacking(tmt.data), WithShape(tmt.shape...))
if m, err = ToMat64(T, UseUnsafe()); err != nil {
t.Errorf("ToMat64 unsafe test %d failed: %v", i, err)
}
conv = anyToFloat64s(tmt.data)
assert.Equal(conv, m.RawMatrix().Data, "float64 unsafe i %d from %v", i, tmt.dt)
conv[0] = 1000
assert.Equal(conv, m.RawMatrix().Data, "float64 unsafe i %d from %v", i, tmt.dt)
conv[0] = 0 // reset for future tests that use the same backing
}
}
// idiocy test
T := New(Of(Float64), WithShape(2, 3, 4))
_, err := ToMat64(T)
if err == nil {
t.Error("Expected an error when trying to convert a 3-T to *mat.Dense")
}
}
func TestFromMat64(t *testing.T) {
assert := assert.New(t)
var m *mat.Dense
var T *Dense
var backing []float64
for i, tmt := range toMat64Tests {
backing = Range(Float64, 0, 6).([]float64)
m = mat.NewDense(2, 3, backing)
T = FromMat64(m)
conv := anyToFloat64s(tmt.data)
assert.Equal(conv, T.Float64s(), "test %d: []float64 from %v", i, tmt.dt)
assert.True(T.Shape().Eq(tmt.shape))
T = FromMat64(m, As(tmt.dt))
assert.Equal(tmt.data, T.Data())
assert.True(T.Shape().Eq(tmt.shape))
if tmt.dt == Float64 {
backing = Range(Float64, 0, 6).([]float64)
m = mat.NewDense(2, 3, backing)
T = FromMat64(m, UseUnsafe())
assert.Equal(backing, T.Float64s())
assert.True(T.Shape().Eq(tmt.shape))
backing[0] = 1000
assert.Equal(backing, T.Float64s(), "test %d - unsafe float64", i)
}
}
}
var toArrowArrayTests = []struct {
data interface{}
valid []bool
dt arrow.DataType
shape Shape
}{
{
data: Range(Int8, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Int8,
shape: Shape{6, 1},
},
{
data: Range(Int16, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Int16,
shape: Shape{6, 1},
},
{
data: Range(Int32, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Int32,
shape: Shape{6, 1},
},
{
data: Range(Int64, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Int64,
shape: Shape{6, 1},
},
{
data: Range(Uint8, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Uint8,
shape: Shape{6, 1},
},
{
data: Range(Uint16, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Uint16,
shape: Shape{6, 1},
},
{
data: Range(Uint32, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Uint32,
shape: Shape{6, 1},
},
{
data: Range(Uint64, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Uint64,
shape: Shape{6, 1},
},
{
data: Range(Float32, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Float32,
shape: Shape{6, 1},
},
{
data: Range(Float64, 0, 6),
valid: []bool{true, true, true, false, true, true},
dt: arrow.PrimitiveTypes.Float64,
shape: Shape{6, 1},
},
}
func TestFromArrowArray(t *testing.T) {
assert := assert.New(t)
var T *Dense
pool := memory.NewGoAllocator()
for i, taat := range toArrowArrayTests {
var m arrowArray.Interface
switch taat.dt {
case arrow.BinaryTypes.String:
b := arrowArray.NewStringBuilder(pool)
defer b.Release()
b.AppendValues(
[]string{"0", "1", "2", "3", "4", "5"},
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.FixedWidthTypes.Boolean:
b := arrowArray.NewBooleanBuilder(pool)
defer b.Release()
b.AppendValues(
[]bool{true, false, true, false, true, false},
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Int8:
b := arrowArray.NewInt8Builder(pool)
defer b.Release()
b.AppendValues(
Range(Int8, 0, 6).([]int8),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Int16:
b := arrowArray.NewInt16Builder(pool)
defer b.Release()
b.AppendValues(
Range(Int16, 0, 6).([]int16),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Int32:
b := arrowArray.NewInt32Builder(pool)
defer b.Release()
b.AppendValues(
Range(Int32, 0, 6).([]int32),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Int64:
b := arrowArray.NewInt64Builder(pool)
defer b.Release()
b.AppendValues(
Range(Int64, 0, 6).([]int64),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Uint8:
b := arrowArray.NewUint8Builder(pool)
defer b.Release()
b.AppendValues(
Range(Uint8, 0, 6).([]uint8),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Uint16:
b := arrowArray.NewUint16Builder(pool)
defer b.Release()
b.AppendValues(
Range(Uint16, 0, 6).([]uint16),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Uint32:
b := arrowArray.NewUint32Builder(pool)
defer b.Release()
b.AppendValues(
Range(Uint32, 0, 6).([]uint32),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Uint64:
b := arrowArray.NewUint64Builder(pool)
defer b.Release()
b.AppendValues(
Range(Uint64, 0, 6).([]uint64),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Float32:
b := arrowArray.NewFloat32Builder(pool)
defer b.Release()
b.AppendValues(
Range(Float32, 0, 6).([]float32),
taat.valid,
)
m = b.NewArray()
defer m.Release()
case arrow.PrimitiveTypes.Float64:
b := arrowArray.NewFloat64Builder(pool)
defer b.Release()
b.AppendValues(
Range(Float64, 0, 6).([]float64),
taat.valid,
)
m = b.NewArray()
defer m.Release()
default:
t.Errorf("DataType not supported in tests: %v", taat.dt)
}
T = FromArrowArray(m)
switch taat.dt {
case arrow.PrimitiveTypes.Int8:
conv := taat.data.([]int8)
assert.Equal(conv, T.Int8s(), "test %d: []int8 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Int16:
conv := taat.data.([]int16)
assert.Equal(conv, T.Int16s(), "test %d: []int16 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Int32:
conv := taat.data.([]int32)
assert.Equal(conv, T.Int32s(), "test %d: []int32 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Int64:
conv := taat.data.([]int64)
assert.Equal(conv, T.Int64s(), "test %d: []int64 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Uint8:
conv := taat.data.([]uint8)
assert.Equal(conv, T.Uint8s(), "test %d: []uint8 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Uint16:
conv := taat.data.([]uint16)
assert.Equal(conv, T.Uint16s(), "test %d: []uint16 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Uint32:
conv := taat.data.([]uint32)
assert.Equal(conv, T.Uint32s(), "test %d: []uint32 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Uint64:
conv := taat.data.([]uint64)
assert.Equal(conv, T.Uint64s(), "test %d: []uint64 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Float32:
conv := taat.data.([]float32)
assert.Equal(conv, T.Float32s(), "test %d: []float32 from %v", i, taat.dt)
case arrow.PrimitiveTypes.Float64:
conv := taat.data.([]float64)
assert.Equal(conv, T.Float64s(), "test %d: []float64 from %v", i, taat.dt)
default:
t.Errorf("DataType not supported in tests: %v", taat.dt)
}
for i, invalid := range T.Mask() {
assert.Equal(taat.valid[i], !invalid)
}
assert.True(T.Shape().Eq(taat.shape))
}
}
var toArrowTensorTests = []struct {
rowMajorData interface{}
colMajorData interface{}
rowMajorValid []bool
colMajorValid []bool
dt arrow.DataType
shape Shape
}{
{
rowMajorData: []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []int8{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Int8,
shape: Shape{2, 5},
},
{
rowMajorData: []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []int16{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Int16,
shape: Shape{2, 5},
},
{
rowMajorData: []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []int32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Int32,
shape: Shape{2, 5},
},
{
rowMajorData: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []int64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Int64,
shape: Shape{2, 5},
},
{
rowMajorData: []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []uint8{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Uint8,
shape: Shape{2, 5},
},
{
rowMajorData: []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []uint16{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Uint16,
shape: Shape{2, 5},
},
{
rowMajorData: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []uint32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Uint32,
shape: Shape{2, 5},
},
{
rowMajorData: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []uint64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Uint64,
shape: Shape{2, 5},
},
{
rowMajorData: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []float32{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Float32,
shape: Shape{2, 5},
},
{
rowMajorData: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
colMajorData: []float64{1, 6, 2, 7, 3, 8, 4, 9, 5, 10},
rowMajorValid: []bool{true, false, true, false, true, false, true, false, true, false},
colMajorValid: []bool{true, false, false, true, true, false, false, true, true, false},
dt: arrow.PrimitiveTypes.Float64,
shape: Shape{2, 5},
},
}
func TestFromArrowTensor(t *testing.T) {
assert := assert.New(t)
var rowMajorT *Dense
var colMajorT *Dense
pool := memory.NewGoAllocator()
for i, taat := range toArrowTensorTests {
var rowMajorArr arrowArray.Interface
var colMajorArr arrowArray.Interface
var rowMajor arrowTensor.Interface
var colMajor arrowTensor.Interface
switch taat.dt {
case arrow.PrimitiveTypes.Int8:
b := arrowArray.NewInt8Builder(pool)
defer b.Release()
b.AppendValues(
[]int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewInt8(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewInt8(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int8SizeBytes), int64(arrow.Int8SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Int16:
b := arrowArray.NewInt16Builder(pool)
defer b.Release()
b.AppendValues(
[]int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewInt16(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewInt16(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int16SizeBytes), int64(arrow.Int16SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Int32:
b := arrowArray.NewInt32Builder(pool)
defer b.Release()
b.AppendValues(
[]int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewInt32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewInt32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int32SizeBytes), int64(arrow.Int32SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Int64:
b := arrowArray.NewInt64Builder(pool)
defer b.Release()
b.AppendValues(
[]int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewInt64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewInt64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Int64SizeBytes), int64(arrow.Int64SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Uint8:
b := arrowArray.NewUint8Builder(pool)
defer b.Release()
b.AppendValues(
[]uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewUint8(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewUint8(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint8SizeBytes), int64(arrow.Uint8SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Uint16:
b := arrowArray.NewUint16Builder(pool)
defer b.Release()
b.AppendValues(
[]uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewUint16(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewUint16(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint16SizeBytes), int64(arrow.Uint16SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Uint32:
b := arrowArray.NewUint32Builder(pool)
defer b.Release()
b.AppendValues(
[]uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewUint32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewUint32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint32SizeBytes), int64(arrow.Uint32SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Uint64:
b := arrowArray.NewUint64Builder(pool)
defer b.Release()
b.AppendValues(
[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewUint64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewUint64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Uint64SizeBytes), int64(arrow.Uint64SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Float32:
b := arrowArray.NewFloat32Builder(pool)
defer b.Release()
b.AppendValues(
[]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewFloat32(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewFloat32(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Float32SizeBytes), int64(arrow.Float32SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
case arrow.PrimitiveTypes.Float64:
b := arrowArray.NewFloat64Builder(pool)
defer b.Release()
b.AppendValues(
[]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
rowMajorArr = b.NewArray()
defer rowMajorArr.Release()
b.AppendValues(
[]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
taat.rowMajorValid,
)
colMajorArr = b.NewArray()
defer colMajorArr.Release()
rowMajor = arrowTensor.NewFloat64(rowMajorArr.Data(), []int64{2, 5}, nil, []string{"x", "y"})
defer rowMajor.Release()
colMajor = arrowTensor.NewFloat64(colMajorArr.Data(), []int64{2, 5}, []int64{int64(arrow.Float64SizeBytes), int64(arrow.Float64SizeBytes * 2)}, []string{"x", "y"})
defer colMajor.Release()
default:
t.Errorf("DataType not supported in tests: %v", taat.dt)
}
rowMajorT = FromArrowTensor(rowMajor)
colMajorT = FromArrowTensor(colMajor)
assert.Equal(taat.rowMajorData, rowMajorT.Data(), "test %d: row major %v", i, taat.dt)
assert.Equal(len(taat.rowMajorValid), len(rowMajorT.Mask()), "test %d: row major %v mask length incorrect", i, taat.dt)
for i, invalid := range rowMajorT.Mask() {
assert.Equal(taat.rowMajorValid[i], !invalid, "test %d: row major %v mask value incorrect", i, taat.dt)
}
assert.True(colMajorT.Shape().Eq(taat.shape))
assert.Equal(taat.colMajorData, colMajorT.Data(), "test %d: column major %v", i, taat.dt)
assert.Equal(len(taat.colMajorValid), len(colMajorT.Mask()), "test %d: column major %v mask length incorrect", i, taat.dt)
for i, invalid := range colMajorT.Mask() {
assert.Equal(taat.colMajorValid[i], !invalid, "test %d: column major %v mask value incorrect", i, taat.dt)
}
assert.True(rowMajorT.Shape().Eq(taat.shape))
}
}
tensor-0.9.24/dense_format.go 0000664 0000000 0000000 00000023336 14265126151 0016127 0 ustar 00root root 0000000 0000000 package tensor
import (
"bytes"
"fmt"
"reflect"
"strconv"
)
var fmtFlags = [...]rune{'+', '-', '#', ' ', '0'}
var fmtByte = []byte("%")
var precByte = []byte(".")
var newline = []byte("\n")
var (
matFirstStart = []byte("⎡")
matFirstEnd = []byte("⎤\n")
matLastStart = []byte("⎣")
matLastEnd = []byte("⎦\n")
rowStart = []byte("⎢")
rowEnd = []byte("⎥\n")
vecStart = []byte("[")
vecEnd = []byte("]")
colVecStart = []byte("C[")
rowVecStart = []byte("R[")
hElisionCompact = []byte("⋯ ")
hElision = []byte("... ")
vElisionCompact = []byte(" ⋮ \n")
vElision = []byte(".\n.\n.\n")
ufVec = []byte("Vector")
ufMat = []byte("Matrix")
ufTensor = []byte("Tensor-")
hInvalid = []byte("--")
)
type fmtState struct {
fmt.State
buf *bytes.Buffer
pad []byte
hElision, vElision []byte
meta bool
flat bool
ext bool // extended (i.e no elision)
comp bool // compact
c rune // c is here mainly for struct packing reasons
w, p int // width and precision
base int // used only for int/byte arrays
rows, cols int
pr, pc int // printed row, printed col
}
func newFmtState(f fmt.State, c rune) *fmtState {
retVal := &fmtState{
State: f,
buf: bytes.NewBuffer(make([]byte, 10)),
c: c,
meta: f.Flag('+'),
flat: f.Flag('-'),
ext: f.Flag('#'),
comp: c == 's',
hElision: hElision,
vElision: vElision,
}
w, _ := f.Width()
p, _ := f.Precision()
retVal.w = w
retVal.p = p
return retVal
}
func (f *fmtState) originalFmt() string {
buf := bytes.NewBuffer(fmtByte)
for _, flag := range fmtFlags {
if f.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
// width
if w, ok := f.Width(); ok {
buf.WriteString(strconv.Itoa(w))
}
// precision
if p, ok := f.Precision(); ok {
buf.Write(precByte)
buf.WriteString(strconv.Itoa(p))
}
buf.WriteRune(f.c)
return buf.String()
}
func (f *fmtState) cleanFmt() string {
buf := bytes.NewBuffer(fmtByte)
// width
if w, ok := f.Width(); ok {
buf.WriteString(strconv.Itoa(w))
}
// precision
if p, ok := f.Precision(); ok {
buf.Write(precByte)
buf.WriteString(strconv.Itoa(p))
}
buf.WriteRune(f.c)
return buf.String()
}
// does the calculation for metadata
func (f *fmtState) populate(t *Dense) {
switch {
case t.IsVector():
f.rows = 1
f.cols = t.Size()
case t.IsScalarEquiv():
f.rows = 1
f.cols = 1
default:
f.rows = t.Shape()[t.Dims()-2]
f.cols = t.Shape()[t.Dims()-1]
}
switch {
case f.flat && f.ext:
f.pc = t.len()
case f.flat && f.comp:
f.pc = 5
f.hElision = hElisionCompact
case f.flat:
f.pc = 10
case f.ext:
f.pc = f.cols
f.pr = f.rows
case f.comp:
f.pc = MinInt(f.cols, 4)
f.pr = MinInt(f.rows, 4)
f.hElision = hElisionCompact
f.vElision = vElisionCompact
default:
f.pc = MinInt(f.cols, 8)
f.pr = MinInt(f.rows, 8)
}
}
func (f *fmtState) acceptableRune(d *Dense) {
if f.c == 'H' {
f.meta = true
return // accept H as header only
}
switch d.t.Kind() {
case reflect.Float64:
switch f.c {
case 'f', 'e', 'E', 'G', 'b':
default:
f.c = 'g'
}
case reflect.Float32:
switch f.c {
case 'f', 'e', 'E', 'G', 'b':
default:
f.c = 'g'
}
case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
switch f.c {
case 'b':
f.base = 2
case 'd':
f.base = 10
case 'o':
f.base = 8
case 'x', 'X':
f.base = 16
default:
f.base = 10
f.c = 'd'
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch f.c {
case 'b':
f.base = 2
case 'd':
f.base = 10
case 'o':
f.base = 8
case 'x', 'X':
f.base = 16
default:
f.base = 10
f.c = 'd'
}
case reflect.Bool:
f.c = 't'
default:
f.c = 'v'
}
}
func (f *fmtState) calcWidth(d *Dense) {
format := f.cleanFmt()
f.w = 0
masked := false
if d.IsMasked() {
if d.MaskedAny().(bool) {
masked = true
}
}
for i := 0; i < d.len(); i++ {
w, _ := fmt.Fprintf(f.buf, format, d.Get(i))
if masked {
if d.mask[i] {
w, _ = fmt.Fprintf(f.buf, "%s", hInvalid)
}
}
if w > f.w {
f.w = w
}
f.buf.Reset()
}
}
func (f *fmtState) makePad() {
f.pad = make([]byte, MaxInt(f.w, 2))
for i := range f.pad {
f.pad[i] = ' '
}
}
func (f *fmtState) writeHElision() {
f.Write(f.hElision)
}
func (f *fmtState) writeVElision() {
f.Write(f.vElision)
}
// Format implements fmt.Formatter. Formatting can be controlled with verbs and flags. All default Go verbs are supported and work as expected.
// By default, only 8 columns and rows are printed (the first and the last 4 columns and rows, while the middle columns and rows are ellided)
// Special flags are:
// '-' for printing a flat array of values
// '+' for printing extra metadata before printing the tensor (it prints shape, stride and type, which are useful for debugging)
// '#' for printing the full tensor - there are no elisions. Overrides the 's' verb
//
// Special care also needs be taken for the verb 's' - it prints a super compressed version of the tensor, only printing 4 cols and 4 rows.
func (t *Dense) Format(s fmt.State, c rune) {
if c == 'i' {
fmt.Fprintf(s, "INFO:\n\tAP: %v\n\tOLD: %v\n\tTRANS %v\n\tENGINE: %T\n", t.AP, t.old, t.transposeWith, t.e)
return
}
f := newFmtState(s, c)
if t.IsScalar() {
o := f.originalFmt()
fmt.Fprintf(f, o, t.Get(0))
return
}
f.acceptableRune(t)
f.calcWidth(t)
f.makePad()
f.populate(t)
if f.meta {
switch {
case t.IsVector():
f.Write(ufVec)
case t.Dims() == 2:
f.Write(ufMat)
default:
f.Write(ufTensor)
fmt.Fprintf(f, "%d", t.Dims())
}
fmt.Fprintf(f, " %v %v\n", t.Shape(), t.Strides())
}
if f.c == 'H' {
return
}
if !t.IsNativelyAccessible() {
fmt.Fprintf(f, "Inaccesible data")
return
}
format := f.cleanFmt()
if f.flat {
f.Write(vecStart)
switch {
case f.ext:
for i := 0; i < t.len(); i++ {
if !t.IsMasked() {
fmt.Fprintf(f, format, t.Get(i))
} else {
if t.mask[i] {
fmt.Fprintf(f, "%s", hInvalid)
} else {
fmt.Fprintf(f, format, t.Get(i))
}
}
if i < t.len()-1 {
f.Write(f.pad[:1])
}
}
case t.viewOf != 0:
it := IteratorFromDense(t)
var c, i int
var err error
for i, err = it.Next(); err == nil; i, err = it.Next() {
if !t.IsMasked() {
fmt.Fprintf(f, format, t.Get(i))
} else {
if t.mask[i] {
fmt.Fprintf(f, "%s", hInvalid)
} else {
fmt.Fprintf(f, format, t.Get(i))
}
}
f.Write(f.pad[:1])
c++
if c >= f.pc {
f.writeHElision()
break
}
}
if err != nil {
if _, noop := err.(NoOpError); !noop {
fmt.Fprintf(f, "ERROR ITERATING: %v", err)
}
}
default:
for i := 0; i < f.pc; i++ {
if !t.IsMasked() {
fmt.Fprintf(f, format, t.Get(i))
} else {
if t.mask[i] {
fmt.Fprintf(f, "%s", hInvalid)
} else {
fmt.Fprintf(f, format, t.Get(i))
}
}
f.Write(f.pad[:1])
}
if f.pc < t.len() {
f.writeHElision()
}
}
f.Write(vecEnd)
return
}
// standard stuff
it := NewIterator(&t.AP)
coord := it.Coord()
firstRow := true
firstVal := true
var lastRow, lastCol int
var expected int
for next, err := it.Next(); err == nil; next, err = it.Next() {
if next < expected {
continue
}
var col, row int
row = lastRow
col = lastCol
if f.rows > f.pr && row > f.pr/2 && row < f.rows-f.pr/2 {
continue
}
if firstVal {
if firstRow {
switch {
case t.IsColVec():
f.Write(colVecStart)
case t.IsRowVec():
f.Write(rowVecStart)
case t.IsVector():
f.Write(vecStart)
case t.IsScalarEquiv():
for i := 0; i < t.Dims(); i++ {
f.Write(vecStart)
}
default:
f.Write(matFirstStart)
}
} else {
var matLastRow bool
if !t.IsVector() {
matLastRow = coord[len(coord)-2] == f.rows-1
}
if matLastRow {
f.Write(matLastStart)
} else {
f.Write(rowStart)
}
}
firstVal = false
}
// actual printing of the value
if f.cols <= f.pc || (col < f.pc/2 || (col >= f.cols-f.pc/2)) {
var w int
if t.IsMasked() {
if t.mask[next] {
w, _ = fmt.Fprintf(f.buf, "%s", hInvalid)
} else {
w, _ = fmt.Fprintf(f.buf, format, t.Get(next))
}
} else {
w, _ = fmt.Fprintf(f.buf, format, t.Get(next))
}
f.Write(f.pad[:f.w-w]) // prepad
f.Write(f.buf.Bytes()) // write
if col < f.cols-1 { // pad with a space
f.Write(f.pad[:2])
}
f.buf.Reset()
} else if col == f.pc/2 {
f.writeHElision()
}
// done printing
// check for end of rows
if col == f.cols-1 {
eom := row == f.rows-1
switch {
case t.IsVector():
f.Write(vecEnd)
return
case t.IsScalarEquiv():
for i := 0; i < t.Dims(); i++ {
f.Write(vecEnd)
}
return
case firstRow:
f.Write(matFirstEnd)
case eom:
f.Write(matLastEnd)
if t.IsMatrix() {
return
}
// one newline for every dimension above 2
for i := t.Dims(); i > 2; i-- {
f.Write(newline)
}
default:
f.Write(rowEnd)
}
if firstRow {
firstRow = false
}
if eom {
firstRow = true
}
firstVal = true
// figure out elision
if f.rows > f.pr && row+1 == f.pr/2 {
expectedCoord := BorrowInts(len(coord))
copy(expectedCoord, coord)
expectedCoord[len(expectedCoord)-2] = f.rows - (f.pr / 2)
expected, _ = Ltoi(t.Shape(), t.Strides(), expectedCoord...)
ReturnInts(expectedCoord)
f.writeVElision()
}
}
// cleanup
switch {
case t.IsRowVec():
lastRow = coord[len(coord)-2]
lastCol = coord[len(coord)-1]
case t.IsColVec():
lastRow = coord[len(coord)-1]
lastCol = coord[len(coord)-2]
case t.IsVector():
lastCol = coord[len(coord)-1]
default:
lastRow = coord[len(coord)-2]
lastCol = coord[len(coord)-1]
}
}
}
tensor-0.9.24/dense_format_test.go 0000664 0000000 0000000 00000017534 14265126151 0017171 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDense_Format(t *testing.T) {
// if os.Getenv("TRAVISTEST") == "true" {
// t.Skip("skipping format test; This is being run on TravisCI")
// }
assert := assert.New(t)
var T *Dense
var res, expected string
// Scalar
T = New(Of(Float64), FromScalar(3.14))
res = fmt.Sprintf("%3.3f", T)
assert.Equal("3.140", res)
// Scalar-equiv (vector)
T = New(WithBacking([]float64{3.14}), WithShape(1))
res = fmt.Sprintf("%3.3f", T)
assert.Equal("[3.140]", res)
// Scalar-equiv (n-dimensional)
T = New(WithBacking([]float64{3.14}), WithShape(1, 1, 1, 1))
res = fmt.Sprintf("%3.3f", T)
assert.Equal("[[[[3.140]]]]", res)
// short vector
T = New(Of(Float64), WithShape(4))
res = fmt.Sprintf("%v", T)
expected = "[0 0 0 0]"
assert.Equal(expected, res)
T = New(WithShape(2, 2), WithBacking([]float64{3.141515163242, 20, 5.15, 6.28}))
res = fmt.Sprintf("\n%v", T)
expected = `
⎡3.141515163242 20⎤
⎣ 5.15 6.28⎦
`
assert.Equal(expected, res, res)
// precision
res = fmt.Sprintf("\n%0.2v", T)
expected = `
⎡3.1 20⎤
⎣5.2 6.3⎦
`
assert.Equal(expected, res, res)
// with metadata
res = fmt.Sprintf("\n%+0.2v", T)
expected = `
Matrix (2, 2) [2 1]
⎡3.1 20⎤
⎣5.2 6.3⎦
`
assert.Equal(expected, res, res)
// many columns
T = New(WithShape(16, 14), WithBacking(Range(Float32, 0, 16*14)))
res = fmt.Sprintf("\n%v", T)
expected = `
⎡ 0 1 2 3 ... 10 11 12 13⎤
⎢ 14 15 16 17 ... 24 25 26 27⎥
⎢ 28 29 30 31 ... 38 39 40 41⎥
⎢ 42 43 44 45 ... 52 53 54 55⎥
.
.
.
⎢168 169 170 171 ... 178 179 180 181⎥
⎢182 183 184 185 ... 192 193 194 195⎥
⎢196 197 198 199 ... 206 207 208 209⎥
⎣210 211 212 213 ... 220 221 222 223⎦
`
assert.Equal(expected, res, "expected %v. Got %v", expected, res)
// many cols, rows, compressed
T = New(WithShape(16, 14), WithBacking(Range(Float64, 0, 16*14)))
res = fmt.Sprintf("\n%s", T)
// this clunky string addition thing is because some editors like to trim whitespace.
// There should be two spaces after ` ⋮` .
expected = `
⎡ 0 1 ⋯ 12 13⎤
⎢ 14 15 ⋯ 26 27⎥
` + ` ⋮ ` + `
` + `⎢196 197 ⋯ 208 209⎥
⎣210 211 ⋯ 222 223⎦
`
assert.Equal(expected, res, "expected %v. Got %v", expected, res)
// many cols, full
T = New(WithShape(8, 9), WithBacking(Range(Float64, 0, 8*9)))
res = fmt.Sprintf("\n%#v", T)
expected = `
⎡ 0 1 2 3 4 5 6 7 8⎤
⎢ 9 10 11 12 13 14 15 16 17⎥
⎢18 19 20 21 22 23 24 25 26⎥
⎢27 28 29 30 31 32 33 34 35⎥
⎢36 37 38 39 40 41 42 43 44⎥
⎢45 46 47 48 49 50 51 52 53⎥
⎢54 55 56 57 58 59 60 61 62⎥
⎣63 64 65 66 67 68 69 70 71⎦
`
assert.Equal(expected, res, res)
// vectors
T = New(Of(Int), WithShape(3, 1))
res = fmt.Sprintf("%v", T)
expected = `C[0 0 0]`
assert.Equal(expected, res)
T = New(Of(Int32), WithShape(1, 3))
res = fmt.Sprintf("%v", T)
expected = `R[0 0 0]`
assert.Equal(expected, res)
// 3+ Dimensional Tensors - super janky for now
T = New(WithShape(2, 3, 2), WithBacking(Range(Float64, 0, 2*3*2)))
res = fmt.Sprintf("\n%v", T)
expected = `
⎡ 0 1⎤
⎢ 2 3⎥
⎣ 4 5⎦
⎡ 6 7⎤
⎢ 8 9⎥
⎣10 11⎦
`
assert.Equal(expected, res, res)
// checking metadata + compression
res = fmt.Sprintf("\n%+s", T)
expected = `
Tensor-3 (2, 3, 2) [6 2 1]
⎡ 0 1⎤
⎢ 2 3⎥
⎣ 4 5⎦
⎡ 6 7⎤
⎢ 8 9⎥
⎣10 11⎦
`
assert.Equal(expected, res, res)
// check flat + compress
res = fmt.Sprintf("%-s", T)
expected = `[0 1 2 3 4 ⋯ ]`
assert.Equal(expected, res, res)
// check flat
res = fmt.Sprintf("%-3.3f", T)
expected = `[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000 8.000 9.000 ... ]`
assert.Equal(expected, res, res)
// check flat + extended
res = fmt.Sprintf("%-#v", T)
expected = `[0 1 2 3 4 5 6 7 8 9 10 11]`
assert.Equal(expected, res, res)
/* Test Views and Sliced Tensors */
var V Tensor
var err error
V, err = T.Slice(makeRS(1, 2))
if err != nil {
t.Error(err)
}
// flat mode for view
res = fmt.Sprintf("\n%-s", V)
expected = "\n[6 7 8 9 10 ⋯ ]"
assert.Equal(expected, res, res)
// standard
res = fmt.Sprintf("\n%+s", V)
expected = `
Matrix (3, 2) [2 1]
⎡ 6 7⎤
⎢ 8 9⎥
⎣10 11⎦
`
assert.Equal(expected, res, res)
// T[:, 1]
V, err = T.Slice(nil, ss(1))
res = fmt.Sprintf("\n%+s", V)
expected = `
Matrix (2, 2) [6 1]
⎡2 3⎤
⎣8 9⎦
`
assert.Equal(expected, res, res)
// transpose a view
V.T()
expected = `
Matrix (2, 2) [1 6]
⎡2 8⎤
⎣3 9⎦
`
res = fmt.Sprintf("\n%+s", V)
assert.Equal(expected, res, res)
// T[1, :, 1]
V, err = T.Slice(ss(1), nil, ss(1))
if err != nil {
t.Error(err)
}
expected = `Vector (3) [2]
[7881299347898368p-50 5066549580791808p-49 6192449487634432p-49]`
res = fmt.Sprintf("%+b", V)
assert.Equal(expected, res)
// T[1, 1, 1] - will result in a scalar
V, err = T.Slice(ss(1), ss(1), ss(1))
if err != nil {
t.Error(err)
}
res = fmt.Sprintf("%#3.3E", V)
expected = `9.000E+00`
assert.Equal(expected, res)
// on regular matrices
T = New(WithShape(3, 5), WithBacking(Range(Float64, 0, 3*5)))
V, err = T.Slice(ss(1))
if err != nil {
t.Error(err)
}
expected = `[5 6 7 8 9]`
res = fmt.Sprintf("%v", V)
assert.Equal(expected, res)
}
var basicFmtTests = []struct {
a interface{}
format string
correct string
}{
{Range(Float64, 0, 4), "%1.1f", "[0.0 1.0 2.0 3.0]"},
{Range(Float32, 0, 4), "%1.1f", "[0.0 1.0 2.0 3.0]"},
{Range(Int, 0, 4), "%b", "[ 0 1 10 11]"},
{Range(Int, 0, 4), "%d", "[0 1 2 3]"},
{Range(Int, 6, 10), "%o", "[ 6 7 10 11]"},
{Range(Int, 14, 18), "%x", "[ e f 10 11]"},
{Range(Int, 0, 4), "%f", "[0 1 2 3]"},
{Range(Int32, 0, 4), "%b", "[ 0 1 10 11]"},
{Range(Int32, 0, 4), "%d", "[0 1 2 3]"},
{Range(Int32, 6, 10), "%o", "[ 6 7 10 11]"},
{Range(Int32, 14, 18), "%x", "[ e f 10 11]"},
{Range(Int32, 0, 4), "%f", "[0 1 2 3]"},
{Range(Int64, 0, 4), "%b", "[ 0 1 10 11]"},
{Range(Int64, 0, 4), "%d", "[0 1 2 3]"},
{Range(Int64, 6, 10), "%o", "[ 6 7 10 11]"},
{Range(Int64, 14, 18), "%x", "[ e f 10 11]"},
{Range(Int64, 0, 4), "%f", "[0 1 2 3]"},
{Range(Byte, 0, 4), "%b", "[ 0 1 10 11]"},
{Range(Byte, 0, 4), "%d", "[0 1 2 3]"},
{Range(Byte, 6, 10), "%o", "[ 6 7 10 11]"},
{Range(Byte, 14, 18), "%x", "[ e f 10 11]"},
{Range(Byte, 0, 4), "%f", "[0 1 2 3]"},
{[]bool{true, false, true, false}, "%f", "[ true false true false]"},
{[]bool{true, false, true, false}, "%s", "[ true false true false]"},
}
func TestDense_Format_basics(t *testing.T) {
for _, v := range basicFmtTests {
T := New(WithBacking(v.a))
s := fmt.Sprintf(v.format, T)
if s != v.correct {
t.Errorf("Expected %q. Got %q", v.correct, s)
}
}
}
func TestDense_Format_Masked(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(1, 12))
data := T.Ints()
for i := 0; i < len(data); i++ {
data[i] = i
}
T.ResetMask(false)
for i := 0; i < 12; i += 2 {
T.mask[i] = true
}
s := fmt.Sprintf("%d", T)
assert.Equal(`R[-- 1 -- 3 ... -- 9 -- 11]`, s)
T = New(Of(Int), WithShape(2, 4, 16))
data = T.Ints()
for i := 0; i < len(data); i++ {
data[i] = i
}
T.ResetMask(false)
for i := 0; i < len(data); i += 2 {
T.mask[i] = true
}
s = fmt.Sprintf("%d", T)
assert.Equal(`⎡ -- 1 -- 3 ... -- 13 -- 15⎤
⎢ -- 17 -- 19 ... -- 29 -- 31⎥
⎢ -- 33 -- 35 ... -- 45 -- 47⎥
⎣ -- 49 -- 51 ... -- 61 -- 63⎦
⎡ -- 65 -- 67 ... -- 77 -- 79⎤
⎢ -- 81 -- 83 ... -- 93 -- 95⎥
⎢ -- 97 -- 99 ... -- 109 -- 111⎥
⎣ -- 113 -- 115 ... -- 125 -- 127⎦
`, s)
}
tensor-0.9.24/dense_generated.go 0000664 0000000 0000000 00000006563 14265126151 0016600 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import "reflect"
// Ones creates a *Dense with the provided shape and type
func Ones(dt Dtype, shape ...int) *Dense {
d := recycledDense(dt, shape)
switch d.t.Kind() {
case reflect.Int:
d.Memset(int(1))
case reflect.Int8:
d.Memset(int8(1))
case reflect.Int16:
d.Memset(int16(1))
case reflect.Int32:
d.Memset(int32(1))
case reflect.Int64:
d.Memset(int64(1))
case reflect.Uint:
d.Memset(uint(1))
case reflect.Uint8:
d.Memset(uint8(1))
case reflect.Uint16:
d.Memset(uint16(1))
case reflect.Uint32:
d.Memset(uint32(1))
case reflect.Uint64:
d.Memset(uint64(1))
case reflect.Float32:
d.Memset(float32(1))
case reflect.Float64:
d.Memset(float64(1))
case reflect.Complex64:
d.Memset(complex64(1))
case reflect.Complex128:
d.Memset(complex128(1))
case reflect.Bool:
d.Memset(true)
default:
// TODO: add a Oner interface
}
return d
}
// I creates the identity matrix (usually a square) matrix with 1s across the diagonals, and zeroes elsewhere, like so:
// Matrix(4,4)
// ⎡1 0 0 0⎤
// ⎢0 1 0 0⎥
// ⎢0 0 1 0⎥
// ⎣0 0 0 1⎦
// While technically an identity matrix is a square matrix, in attempt to keep feature parity with Numpy,
// the I() function allows you to create non square matrices, as well as an index to start the diagonals.
//
// For example:
// T = I(Float64, 4, 4, 1)
// Yields:
// ⎡0 1 0 0⎤
// ⎢0 0 1 0⎥
// ⎢0 0 0 1⎥
// ⎣0 0 0 0⎦
//
// The index k can also be a negative number:
// T = I(Float64, 4, 4, -1)
// Yields:
// ⎡0 0 0 0⎤
// ⎢1 0 0 0⎥
// ⎢0 1 0 0⎥
// ⎣0 0 1 0⎦
func I(dt Dtype, r, c, k int) *Dense {
ret := New(Of(dt), WithShape(r, c))
i := k
if k < 0 {
i = (-k) * c
}
var s *Dense
var err error
end := c - k
if end > r {
s, err = sliceDense(ret, nil)
} else {
s, err = sliceDense(ret, rs{0, end, 1})
}
if err != nil {
panic(err)
}
var nexts []int
iter := newFlatIterator(&s.AP)
nexts, err = iter.Slice(rs{i, s.Size(), c + 1})
switch s.t.Kind() {
case reflect.Int:
data := s.Ints()
for _, v := range nexts {
data[v] = 1
}
case reflect.Int8:
data := s.Int8s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Int16:
data := s.Int16s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Int32:
data := s.Int32s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Int64:
data := s.Int64s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Uint:
data := s.Uints()
for _, v := range nexts {
data[v] = 1
}
case reflect.Uint8:
data := s.Uint8s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Uint16:
data := s.Uint16s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Uint32:
data := s.Uint32s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Uint64:
data := s.Uint64s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Float32:
data := s.Float32s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Float64:
data := s.Float64s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Complex64:
data := s.Complex64s()
for _, v := range nexts {
data[v] = 1
}
case reflect.Complex128:
data := s.Complex128s()
for _, v := range nexts {
data[v] = 1
}
}
// TODO: create Oner interface for custom types
return ret
}
tensor-0.9.24/dense_generated_test.go 0000664 0000000 0000000 00000036524 14265126151 0017637 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
var onesTests = []struct {
of Dtype
shape Shape
correct interface{}
}{
{Int, ScalarShape(), int(1)},
{Int, Shape{2, 2}, []int{1, 1, 1, 1}},
{Int8, ScalarShape(), int8(1)},
{Int8, Shape{2, 2}, []int8{1, 1, 1, 1}},
{Int16, ScalarShape(), int16(1)},
{Int16, Shape{2, 2}, []int16{1, 1, 1, 1}},
{Int32, ScalarShape(), int32(1)},
{Int32, Shape{2, 2}, []int32{1, 1, 1, 1}},
{Int64, ScalarShape(), int64(1)},
{Int64, Shape{2, 2}, []int64{1, 1, 1, 1}},
{Uint, ScalarShape(), uint(1)},
{Uint, Shape{2, 2}, []uint{1, 1, 1, 1}},
{Uint8, ScalarShape(), uint8(1)},
{Uint8, Shape{2, 2}, []uint8{1, 1, 1, 1}},
{Uint16, ScalarShape(), uint16(1)},
{Uint16, Shape{2, 2}, []uint16{1, 1, 1, 1}},
{Uint32, ScalarShape(), uint32(1)},
{Uint32, Shape{2, 2}, []uint32{1, 1, 1, 1}},
{Uint64, ScalarShape(), uint64(1)},
{Uint64, Shape{2, 2}, []uint64{1, 1, 1, 1}},
{Float32, ScalarShape(), float32(1)},
{Float32, Shape{2, 2}, []float32{1, 1, 1, 1}},
{Float64, ScalarShape(), float64(1)},
{Float64, Shape{2, 2}, []float64{1, 1, 1, 1}},
{Complex64, ScalarShape(), complex64(1)},
{Complex64, Shape{2, 2}, []complex64{1, 1, 1, 1}},
{Complex128, ScalarShape(), complex128(1)},
{Complex128, Shape{2, 2}, []complex128{1, 1, 1, 1}},
{Bool, ScalarShape(), true},
{Bool, Shape{2, 2}, []bool{true, true, true, true}},
}
func TestOnes(t *testing.T) {
assert := assert.New(t)
for _, ot := range onesTests {
T := Ones(ot.of, ot.shape...)
assert.True(ot.shape.Eq(T.Shape()))
assert.Equal(ot.correct, T.Data())
}
}
// yes, it's a pun on eye tests, stop asking and go see your optometrist
var eyeTests = []struct {
E Dtype
R, C, K int
correct interface{}
}{
{Int, 4, 4, 0, []int{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Int, 4, 4, 1, []int{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Int, 4, 4, 2, []int{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int, 4, 4, 3, []int{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int, 4, 4, 4, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int, 4, 4, -1, []int{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Int, 4, 4, -2, []int{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Int, 4, 4, -3, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Int, 4, 4, -4, []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int, 4, 5, 0, []int{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Int, 4, 5, 1, []int{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Int, 4, 5, -1, []int{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Int8, 4, 4, 0, []int8{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Int8, 4, 4, 1, []int8{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Int8, 4, 4, 2, []int8{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int8, 4, 4, 3, []int8{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int8, 4, 4, 4, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int8, 4, 4, -1, []int8{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Int8, 4, 4, -2, []int8{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Int8, 4, 4, -3, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Int8, 4, 4, -4, []int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int8, 4, 5, 0, []int8{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Int8, 4, 5, 1, []int8{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Int8, 4, 5, -1, []int8{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Int16, 4, 4, 0, []int16{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Int16, 4, 4, 1, []int16{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Int16, 4, 4, 2, []int16{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int16, 4, 4, 3, []int16{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int16, 4, 4, 4, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int16, 4, 4, -1, []int16{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Int16, 4, 4, -2, []int16{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Int16, 4, 4, -3, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Int16, 4, 4, -4, []int16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int16, 4, 5, 0, []int16{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Int16, 4, 5, 1, []int16{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Int16, 4, 5, -1, []int16{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Int32, 4, 4, 0, []int32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Int32, 4, 4, 1, []int32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Int32, 4, 4, 2, []int32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int32, 4, 4, 3, []int32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int32, 4, 4, 4, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int32, 4, 4, -1, []int32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Int32, 4, 4, -2, []int32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Int32, 4, 4, -3, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Int32, 4, 4, -4, []int32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int32, 4, 5, 0, []int32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Int32, 4, 5, 1, []int32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Int32, 4, 5, -1, []int32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Int64, 4, 4, 0, []int64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Int64, 4, 4, 1, []int64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Int64, 4, 4, 2, []int64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int64, 4, 4, 3, []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int64, 4, 4, 4, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int64, 4, 4, -1, []int64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Int64, 4, 4, -2, []int64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Int64, 4, 4, -3, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Int64, 4, 4, -4, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Int64, 4, 5, 0, []int64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Int64, 4, 5, 1, []int64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Int64, 4, 5, -1, []int64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Uint, 4, 4, 0, []uint{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Uint, 4, 4, 1, []uint{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Uint, 4, 4, 2, []uint{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint, 4, 4, 3, []uint{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint, 4, 4, 4, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint, 4, 4, -1, []uint{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Uint, 4, 4, -2, []uint{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Uint, 4, 4, -3, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Uint, 4, 4, -4, []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint, 4, 5, 0, []uint{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Uint, 4, 5, 1, []uint{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Uint, 4, 5, -1, []uint{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Uint8, 4, 4, 0, []uint8{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Uint8, 4, 4, 1, []uint8{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Uint8, 4, 4, 2, []uint8{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint8, 4, 4, 3, []uint8{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint8, 4, 4, 4, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint8, 4, 4, -1, []uint8{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Uint8, 4, 4, -2, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Uint8, 4, 4, -3, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Uint8, 4, 4, -4, []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint8, 4, 5, 0, []uint8{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Uint8, 4, 5, 1, []uint8{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Uint8, 4, 5, -1, []uint8{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Uint16, 4, 4, 0, []uint16{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Uint16, 4, 4, 1, []uint16{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Uint16, 4, 4, 2, []uint16{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint16, 4, 4, 3, []uint16{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint16, 4, 4, 4, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint16, 4, 4, -1, []uint16{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Uint16, 4, 4, -2, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Uint16, 4, 4, -3, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Uint16, 4, 4, -4, []uint16{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint16, 4, 5, 0, []uint16{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Uint16, 4, 5, 1, []uint16{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Uint16, 4, 5, -1, []uint16{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Uint32, 4, 4, 0, []uint32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Uint32, 4, 4, 1, []uint32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Uint32, 4, 4, 2, []uint32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint32, 4, 4, 3, []uint32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint32, 4, 4, 4, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint32, 4, 4, -1, []uint32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Uint32, 4, 4, -2, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Uint32, 4, 4, -3, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Uint32, 4, 4, -4, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint32, 4, 5, 0, []uint32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Uint32, 4, 5, 1, []uint32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Uint32, 4, 5, -1, []uint32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Uint64, 4, 4, 0, []uint64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Uint64, 4, 4, 1, []uint64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Uint64, 4, 4, 2, []uint64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint64, 4, 4, 3, []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint64, 4, 4, 4, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint64, 4, 4, -1, []uint64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Uint64, 4, 4, -2, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Uint64, 4, 4, -3, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Uint64, 4, 4, -4, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Uint64, 4, 5, 0, []uint64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Uint64, 4, 5, 1, []uint64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Uint64, 4, 5, -1, []uint64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Float32, 4, 4, 0, []float32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Float32, 4, 4, 1, []float32{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Float32, 4, 4, 2, []float32{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float32, 4, 4, 3, []float32{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float32, 4, 4, 4, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float32, 4, 4, -1, []float32{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Float32, 4, 4, -2, []float32{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Float32, 4, 4, -3, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Float32, 4, 4, -4, []float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float32, 4, 5, 0, []float32{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Float32, 4, 5, 1, []float32{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Float32, 4, 5, -1, []float32{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Float64, 4, 4, 0, []float64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Float64, 4, 4, 1, []float64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Float64, 4, 4, 2, []float64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float64, 4, 4, 3, []float64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float64, 4, 4, 4, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float64, 4, 4, -1, []float64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Float64, 4, 4, -2, []float64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Float64, 4, 4, -3, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Float64, 4, 4, -4, []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Float64, 4, 5, 0, []float64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Float64, 4, 5, 1, []float64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Float64, 4, 5, -1, []float64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Complex64, 4, 4, 0, []complex64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Complex64, 4, 4, 1, []complex64{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Complex64, 4, 4, 2, []complex64{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex64, 4, 4, 3, []complex64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex64, 4, 4, 4, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex64, 4, 4, -1, []complex64{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Complex64, 4, 4, -2, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Complex64, 4, 4, -3, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Complex64, 4, 4, -4, []complex64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex64, 4, 5, 0, []complex64{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Complex64, 4, 5, 1, []complex64{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Complex64, 4, 5, -1, []complex64{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
{Complex128, 4, 4, 0, []complex128{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}},
{Complex128, 4, 4, 1, []complex128{0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}},
{Complex128, 4, 4, 2, []complex128{0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex128, 4, 4, 3, []complex128{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex128, 4, 4, 4, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex128, 4, 4, -1, []complex128{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}},
{Complex128, 4, 4, -2, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}},
{Complex128, 4, 4, -3, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}},
{Complex128, 4, 4, -4, []complex128{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{Complex128, 4, 5, 0, []complex128{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}},
{Complex128, 4, 5, 1, []complex128{0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1}},
{Complex128, 4, 5, -1, []complex128{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0}},
}
func TestI(t *testing.T) {
assert := assert.New(t)
var T Tensor
for i, it := range eyeTests {
T = I(it.E, it.R, it.C, it.K)
assert.True(Shape{it.R, it.C}.Eq(T.Shape()))
assert.Equal(it.correct, T.Data(), "Test %d-R: %d, C: %d K: %d", i, it.R, it.C, it.K)
}
}
tensor-0.9.24/dense_getset_test.go 0000664 0000000 0000000 00000015720 14265126151 0017167 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"reflect"
"testing"
"testing/quick"
"github.com/stretchr/testify/assert"
)
var denseSetGetTests = []struct {
of Dtype
data interface{}
set interface{}
correct []interface{}
}{
{Bool, []bool{true, false, true, false, true, false}, false, []interface{}{bool(true), bool(false), bool(true), bool(false), bool(true), bool(false)}},
{Int, []int{0, 1, 2, 3, 4, 5}, 45, []interface{}{int(0), int(1), int(2), int(3), int(4), int(5)}},
{Int8, []int8{0, 1, 2, 3, 4, 5}, 45, []interface{}{int8(0), int8(1), int8(2), int8(3), int8(4), int8(5)}},
{Int16, []int16{0, 1, 2, 3, 4, 5}, 45, []interface{}{int16(0), int16(1), int16(2), int16(3), int16(4), int16(5)}},
{Int32, []int32{0, 1, 2, 3, 4, 5}, 45, []interface{}{int32(0), int32(1), int32(2), int32(3), int32(4), int32(5)}},
{Int64, []int64{0, 1, 2, 3, 4, 5}, 45, []interface{}{int64(0), int64(1), int64(2), int64(3), int64(4), int64(5)}},
{Uint, []uint{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint(0), uint(1), uint(2), uint(3), uint(4), uint(5)}},
{Uint8, []uint8{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint8(0), uint8(1), uint8(2), uint8(3), uint8(4), uint8(5)}},
{Uint16, []uint16{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint16(0), uint16(1), uint16(2), uint16(3), uint16(4), uint16(5)}},
{Uint32, []uint32{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint32(0), uint32(1), uint32(2), uint32(3), uint32(4), uint32(5)}},
{Uint64, []uint64{0, 1, 2, 3, 4, 5}, 45, []interface{}{uint64(0), uint64(1), uint64(2), uint64(3), uint64(4), uint64(5)}},
{Float32, []float32{0, 1, 2, 3, 4, 5}, 45, []interface{}{float32(0), float32(1), float32(2), float32(3), float32(4), float32(5)}},
{Float64, []float64{0, 1, 2, 3, 4, 5}, 45, []interface{}{float64(0), float64(1), float64(2), float64(3), float64(4), float64(5)}},
{Complex64, []complex64{0, 1, 2, 3, 4, 5}, 45, []interface{}{complex64(0), complex64(1), complex64(2), complex64(3), complex64(4), complex64(5)}},
{Complex128, []complex128{0, 1, 2, 3, 4, 5}, 45, []interface{}{complex128(0), complex128(1), complex128(2), complex128(3), complex128(4), complex128(5)}},
{String, []string{"zero", "one", "two", "three", "four", "five"}, "HELLO WORLD", []interface{}{string("zero"), string("one"), string("two"), string("three"), string("four"), string("five")}},
}
func TestDense_setget(t *testing.T) {
assert := assert.New(t)
for _, gts := range denseSetGetTests {
T := New(Of(gts.of), WithShape(len(gts.correct)))
for i, v := range gts.correct {
T.Set(i, v)
got := T.Get(i)
assert.Equal(v, got)
}
}
}
var denseMemsetTests = []struct {
of Dtype
data interface{}
val interface{}
shape Shape
correct interface{}
}{
{Bool, []bool{true, false, true, false, true, false}, bool(false), Shape{2, 3}, []bool{false, false, false, false, false, false}},
{Int, []int{0, 1, 2, 3, 4, 5}, int(45), Shape{2, 3}, []int{45, 45, 45, 45, 45, 45}},
{Int8, []int8{0, 1, 2, 3, 4, 5}, int8(45), Shape{2, 3}, []int8{45, 45, 45, 45, 45, 45}},
{Int16, []int16{0, 1, 2, 3, 4, 5}, int16(45), Shape{2, 3}, []int16{45, 45, 45, 45, 45, 45}},
{Int32, []int32{0, 1, 2, 3, 4, 5}, int32(45), Shape{2, 3}, []int32{45, 45, 45, 45, 45, 45}},
{Int64, []int64{0, 1, 2, 3, 4, 5}, int64(45), Shape{2, 3}, []int64{45, 45, 45, 45, 45, 45}},
{Uint, []uint{0, 1, 2, 3, 4, 5}, uint(45), Shape{2, 3}, []uint{45, 45, 45, 45, 45, 45}},
{Uint8, []uint8{0, 1, 2, 3, 4, 5}, uint8(45), Shape{2, 3}, []uint8{45, 45, 45, 45, 45, 45}},
{Uint16, []uint16{0, 1, 2, 3, 4, 5}, uint16(45), Shape{2, 3}, []uint16{45, 45, 45, 45, 45, 45}},
{Uint32, []uint32{0, 1, 2, 3, 4, 5}, uint32(45), Shape{2, 3}, []uint32{45, 45, 45, 45, 45, 45}},
{Uint64, []uint64{0, 1, 2, 3, 4, 5}, uint64(45), Shape{2, 3}, []uint64{45, 45, 45, 45, 45, 45}},
{Float32, []float32{0, 1, 2, 3, 4, 5}, float32(45), Shape{2, 3}, []float32{45, 45, 45, 45, 45, 45}},
{Float64, []float64{0, 1, 2, 3, 4, 5}, float64(45), Shape{2, 3}, []float64{45, 45, 45, 45, 45, 45}},
{Complex64, []complex64{0, 1, 2, 3, 4, 5}, complex64(45), Shape{2, 3}, []complex64{45, 45, 45, 45, 45, 45}},
{Complex128, []complex128{0, 1, 2, 3, 4, 5}, complex128(45), Shape{2, 3}, []complex128{45, 45, 45, 45, 45, 45}},
{String, []string{"zero", "one", "two", "three", "four", "five"}, string("HELLO WORLD"), Shape{2, 3}, []string{"HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD", "HELLO WORLD"}},
}
func TestDense_memset(t *testing.T) {
assert := assert.New(t)
for _, mts := range denseMemsetTests {
T := New(Of(mts.of), WithShape(mts.shape...))
T.Memset(mts.val)
assert.Equal(mts.correct, T.Data())
T = New(Of(mts.of), WithShape(mts.shape...), WithBacking(mts.data))
T2, _ := T.Slice(nil)
T2.Memset(mts.val)
assert.Equal(mts.correct, T2.Data())
}
}
var denseZeroTests = []struct {
of Dtype
data interface{}
correct interface{}
}{
{Bool, []bool{true, false, true, false, true, false}, []bool{false, false, false, false, false, false}},
{Int, []int{0, 1, 2, 3, 4, 5}, []int{0, 0, 0, 0, 0, 0}},
{Int8, []int8{0, 1, 2, 3, 4, 5}, []int8{0, 0, 0, 0, 0, 0}},
{Int16, []int16{0, 1, 2, 3, 4, 5}, []int16{0, 0, 0, 0, 0, 0}},
{Int32, []int32{0, 1, 2, 3, 4, 5}, []int32{0, 0, 0, 0, 0, 0}},
{Int64, []int64{0, 1, 2, 3, 4, 5}, []int64{0, 0, 0, 0, 0, 0}},
{Uint, []uint{0, 1, 2, 3, 4, 5}, []uint{0, 0, 0, 0, 0, 0}},
{Uint8, []uint8{0, 1, 2, 3, 4, 5}, []uint8{0, 0, 0, 0, 0, 0}},
{Uint16, []uint16{0, 1, 2, 3, 4, 5}, []uint16{0, 0, 0, 0, 0, 0}},
{Uint32, []uint32{0, 1, 2, 3, 4, 5}, []uint32{0, 0, 0, 0, 0, 0}},
{Uint64, []uint64{0, 1, 2, 3, 4, 5}, []uint64{0, 0, 0, 0, 0, 0}},
{Float32, []float32{0, 1, 2, 3, 4, 5}, []float32{0, 0, 0, 0, 0, 0}},
{Float64, []float64{0, 1, 2, 3, 4, 5}, []float64{0, 0, 0, 0, 0, 0}},
{Complex64, []complex64{0, 1, 2, 3, 4, 5}, []complex64{0, 0, 0, 0, 0, 0}},
{Complex128, []complex128{0, 1, 2, 3, 4, 5}, []complex128{0, 0, 0, 0, 0, 0}},
{String, []string{"zero", "one", "two", "three", "four", "five"}, []string{"", "", "", "", "", ""}},
}
func TestDense_Zero(t *testing.T) {
assert := assert.New(t)
for _, mts := range denseZeroTests {
typ := reflect.TypeOf(mts.data)
val := reflect.ValueOf(mts.data)
data := reflect.MakeSlice(typ, val.Len(), val.Cap())
reflect.Copy(data, val)
T := New(Of(mts.of), WithBacking(data.Interface()))
T.Zero()
assert.Equal(mts.correct, T.Data())
T = New(Of(mts.of), WithBacking(mts.data))
T2, _ := T.Slice(nil)
T2.Zero()
assert.Equal(mts.correct, T2.Data())
}
}
func TestDense_Eq(t *testing.T) {
eqFn := func(q *Dense) bool {
a := q.Clone().(*Dense)
if !q.Eq(a) {
t.Error("Expected a clone to be exactly equal")
return false
}
a.Zero()
// Bools are excluded because the probability of having an array of all false is very high
if q.Eq(a) && a.len() > 3 && a.Dtype() != Bool {
t.Errorf("a %v", a.Data())
t.Errorf("q %v", q.Data())
t.Error("Expected *Dense to be not equal")
return false
}
return true
}
if err := quick.Check(eqFn, &quick.Config{Rand: newRand(), MaxCount: quickchecks}); err != nil {
t.Errorf("Failed to perform equality checks")
}
}
tensor-0.9.24/dense_io.go 0000664 0000000 0000000 00000047716 14265126151 0015256 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"bytes"
"encoding/binary"
"encoding/csv"
"encoding/gob"
"fmt"
"io"
"reflect"
"regexp"
"strconv"
"strings"
flatbuffers "github.com/google/flatbuffers/go"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/serialization/fb"
"gorgonia.org/tensor/internal/serialization/pb"
)
/* GOB SERIALIZATION */
// GobEncode implements gob.GobEncoder
func (t *Dense) GobEncode() (p []byte, err error) {
var buf bytes.Buffer
encoder := gob.NewEncoder(&buf)
if err = encoder.Encode(t.Shape()); err != nil {
return
}
if err = encoder.Encode(t.Strides()); err != nil {
return
}
if err = encoder.Encode(t.AP.o); err != nil {
return
}
if err = encoder.Encode(t.AP.Δ); err != nil {
return
}
if err = encoder.Encode(t.mask); err != nil {
return
}
data := t.Data()
if err = encoder.Encode(&data); err != nil {
return
}
return buf.Bytes(), err
}
// GobDecode implements gob.GobDecoder
func (t *Dense) GobDecode(p []byte) (err error) {
buf := bytes.NewBuffer(p)
decoder := gob.NewDecoder(buf)
var shape Shape
if err = decoder.Decode(&shape); err != nil {
return
}
var strides []int
if err = decoder.Decode(&strides); err != nil {
return
}
var o DataOrder
var tr Triangle
if err = decoder.Decode(&o); err == nil {
if err = decoder.Decode(&tr); err != nil {
return
}
}
t.AP.Init(shape, strides)
t.AP.o = o
t.AP.Δ = tr
var mask []bool
if err = decoder.Decode(&mask); err != nil {
return
}
var data interface{}
if err = decoder.Decode(&data); err != nil {
return
}
t.fromSlice(data)
t.addMask(mask)
t.fix()
if t.e == nil {
t.e = StdEng{}
}
return t.sanity()
}
/* NPY SERIALIZATION */
var npyDescRE = regexp.MustCompile(`'descr':\s*'([^']*)'`)
var rowOrderRE = regexp.MustCompile(`'fortran_order':\s*(False|True)`)
var shapeRE = regexp.MustCompile(`'shape':\s*\(([^\(]*)\)`)
type binaryWriter struct {
io.Writer
err error
seq int
}
func (w *binaryWriter) w(x interface{}) {
if w.err != nil {
return
}
w.err = binary.Write(w, binary.LittleEndian, x)
w.seq++
}
func (w *binaryWriter) Err() error {
if w.err == nil {
return nil
}
return errors.Wrapf(w.err, "Sequence %d", w.seq)
}
type binaryReader struct {
io.Reader
err error
seq int
}
func (r *binaryReader) Read(data interface{}) {
if r.err != nil {
return
}
r.err = binary.Read(r.Reader, binary.LittleEndian, data)
r.seq++
}
func (r *binaryReader) Err() error {
if r.err == nil {
return nil
}
return errors.Wrapf(r.err, "Sequence %d", r.seq)
}
// WriteNpy writes the *Tensor as a numpy compatible serialized file.
//
// The format is very well documented here:
// http://docs.scipy.org/doc/numpy/neps/npy-format.html
//
// Gorgonia specifically uses Version 1.0, as 65535 bytes should be more than enough for the headers.
// The values are written in little endian order, because let's face it -
// 90% of the world's computers are running on x86+ processors.
//
// This method does not close the writer. Closing (if needed) is deferred to the caller
// If tensor is masked, invalid values are replaced by the default fill value.
func (t *Dense) WriteNpy(w io.Writer) (err error) {
var npdt string
if npdt, err = t.t.numpyDtype(); err != nil {
return
}
var header string
if t.Dims() == 1 {
// when t is a 1D vector, numpy expects "(N,)" instead of "(N)" which t.Shape() returns.
header = "{'descr': '<%v', 'fortran_order': False, 'shape': (%d,)}"
header = fmt.Sprintf(header, npdt, t.Shape()[0])
} else {
header = "{'descr': '<%v', 'fortran_order': False, 'shape': %v}"
header = fmt.Sprintf(header, npdt, t.Shape())
}
padding := 16 - ((10 + len(header)) % 16)
if padding > 0 {
header = header + strings.Repeat(" ", padding)
}
bw := binaryWriter{Writer: w}
bw.Write([]byte("\x93NUMPY")) // stupid magic
bw.w(byte(1)) // major version
bw.w(byte(0)) // minor version
bw.w(uint16(len(header))) // 4 bytes to denote header length
if err = bw.Err(); err != nil {
return err
}
bw.Write([]byte(header))
bw.seq = 0
if t.IsMasked() {
fillval := t.FillValue()
it := FlatMaskedIteratorFromDense(t)
for i, err := it.Next(); err == nil; i, err = it.Next() {
if t.mask[i] {
bw.w(fillval)
} else {
bw.w(t.Get(i))
}
}
} else {
for i := 0; i < t.len(); i++ {
bw.w(t.Get(i))
}
}
return bw.Err()
}
// ReadNpy reads NumPy formatted files into a *Dense
func (t *Dense) ReadNpy(r io.Reader) (err error) {
br := binaryReader{Reader: r}
var magic [6]byte
if br.Read(magic[:]); string(magic[:]) != "\x93NUMPY" {
return errors.Errorf("Not a numpy file. Got %q as the magic number instead", string(magic[:]))
}
var version, minor byte
if br.Read(&version); version != 1 {
return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)")
}
if br.Read(&minor); minor != 0 {
return errors.New("Only verion 1.0 of numpy's serialization format is currently supported (65535 bytes ought to be enough for a header)")
}
var headerLen uint16
br.Read(&headerLen)
header := make([]byte, int(headerLen))
br.Read(header)
if err = br.Err(); err != nil {
return
}
// extract stuff from header
var match [][]byte
if match = npyDescRE.FindSubmatch(header); match == nil {
return errors.New("No dtype information in npy file")
}
// TODO: check for endianness. For now we assume everything is little endian
if t.t, err = fromNumpyDtype(string(match[1][1:])); err != nil {
return
}
if match = rowOrderRE.FindSubmatch(header); match == nil {
return errors.New("No Row Order information found in the numpy file")
}
if string(match[1]) != "False" {
return errors.New("Cannot yet read from Fortran Ordered Numpy files")
}
if match = shapeRE.FindSubmatch(header); match == nil {
return errors.New("No shape information found in npy file")
}
sizesStr := strings.Split(string(match[1]), ",")
var shape Shape
for _, s := range sizesStr {
s = strings.Trim(s, " ")
if len(s) == 0 {
break
}
var size int
if size, err = strconv.Atoi(s); err != nil {
return
}
shape = append(shape, size)
}
size := shape.TotalSize()
if t.e == nil {
t.e = StdEng{}
}
t.makeArray(size)
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Int8:
data := t.Int8s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Int16:
data := t.Int16s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Int32:
data := t.Int32s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Int64:
data := t.Int64s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Uint:
data := t.Uints()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Uint8:
data := t.Uint8s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Uint16:
data := t.Uint16s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Uint32:
data := t.Uint32s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Uint64:
data := t.Uint64s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Float32:
data := t.Float32s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Float64:
data := t.Float64s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Complex64:
data := t.Complex64s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
case reflect.Complex128:
data := t.Complex128s()
for i := 0; i < size; i++ {
br.Read(&data[i])
}
}
if err = br.Err(); err != nil {
return err
}
t.AP.zeroWithDims(len(shape))
t.setShape(shape...)
t.fix()
return t.sanity()
}
/* CSV SERIALIZATION */
// WriteCSV writes the *Dense to a CSV. It accepts an optional string formatting ("%v", "%f", etc...), which controls what is written to the CSV.
// If tensor is masked, invalid values are replaced by the default fill value.
func (t *Dense) WriteCSV(w io.Writer, formats ...string) (err error) {
// checks:
if !t.IsMatrix() {
// error
err = errors.Errorf("Cannot write *Dense to CSV. Expected number of dimensions: <=2, T has got %d dimensions (Shape: %v)", t.Dims(), t.Shape())
return
}
format := "%v"
if len(formats) > 0 {
format = formats[0]
}
cw := csv.NewWriter(w)
it := IteratorFromDense(t)
coord := it.Coord()
// rows := t.Shape()[0]
cols := t.Shape()[1]
record := make([]string, 0, cols)
var i, k, lastCol int
isMasked := t.IsMasked()
fillval := t.FillValue()
fillstr := fmt.Sprintf(format, fillval)
for i, err = it.Next(); err == nil; i, err = it.Next() {
record = append(record, fmt.Sprintf(format, t.Get(i)))
if isMasked {
if t.mask[i] {
record[k] = fillstr
}
k++
}
if lastCol == cols-1 {
if err = cw.Write(record); err != nil {
// TODO: wrap errors
return
}
cw.Flush()
record = record[:0]
}
// cleanup
switch {
case t.IsRowVec():
// lastRow = coord[len(coord)-2]
lastCol = coord[len(coord)-1]
case t.IsColVec():
// lastRow = coord[len(coord)-1]
lastCol = coord[len(coord)-2]
case t.IsVector():
lastCol = coord[len(coord)-1]
default:
// lastRow = coord[len(coord)-2]
lastCol = coord[len(coord)-1]
}
}
return nil
}
// convFromStrs converts a []string to a slice of the Dtype provided. It takes a provided backing slice.
// If into is nil, then a backing slice will be created.
func convFromStrs(to Dtype, record []string, into interface{}) (interface{}, error) {
var err error
switch to.Kind() {
case reflect.Int:
retVal := make([]int, len(record))
var backing []int
if into == nil {
backing = make([]int, 0, len(record))
} else {
backing = into.([]int)
}
for i, v := range record {
var i64 int64
if i64, err = strconv.ParseInt(v, 10, 0); err != nil {
return nil, err
}
retVal[i] = int(i64)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Int8:
retVal := make([]int8, len(record))
var backing []int8
if into == nil {
backing = make([]int8, 0, len(record))
} else {
backing = into.([]int8)
}
for i, v := range record {
var i64 int64
if i64, err = strconv.ParseInt(v, 10, 8); err != nil {
return nil, err
}
retVal[i] = int8(i64)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Int16:
retVal := make([]int16, len(record))
var backing []int16
if into == nil {
backing = make([]int16, 0, len(record))
} else {
backing = into.([]int16)
}
for i, v := range record {
var i64 int64
if i64, err = strconv.ParseInt(v, 10, 16); err != nil {
return nil, err
}
retVal[i] = int16(i64)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Int32:
retVal := make([]int32, len(record))
var backing []int32
if into == nil {
backing = make([]int32, 0, len(record))
} else {
backing = into.([]int32)
}
for i, v := range record {
var i64 int64
if i64, err = strconv.ParseInt(v, 10, 32); err != nil {
return nil, err
}
retVal[i] = int32(i64)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Int64:
retVal := make([]int64, len(record))
var backing []int64
if into == nil {
backing = make([]int64, 0, len(record))
} else {
backing = into.([]int64)
}
for i, v := range record {
var i64 int64
if i64, err = strconv.ParseInt(v, 10, 64); err != nil {
return nil, err
}
retVal[i] = int64(i64)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Uint:
retVal := make([]uint, len(record))
var backing []uint
if into == nil {
backing = make([]uint, 0, len(record))
} else {
backing = into.([]uint)
}
for i, v := range record {
var u uint64
if u, err = strconv.ParseUint(v, 10, 0); err != nil {
return nil, err
}
retVal[i] = uint(u)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Uint8:
retVal := make([]uint8, len(record))
var backing []uint8
if into == nil {
backing = make([]uint8, 0, len(record))
} else {
backing = into.([]uint8)
}
for i, v := range record {
var u uint64
if u, err = strconv.ParseUint(v, 10, 8); err != nil {
return nil, err
}
retVal[i] = uint8(u)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Uint16:
retVal := make([]uint16, len(record))
var backing []uint16
if into == nil {
backing = make([]uint16, 0, len(record))
} else {
backing = into.([]uint16)
}
for i, v := range record {
var u uint64
if u, err = strconv.ParseUint(v, 10, 16); err != nil {
return nil, err
}
retVal[i] = uint16(u)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Uint32:
retVal := make([]uint32, len(record))
var backing []uint32
if into == nil {
backing = make([]uint32, 0, len(record))
} else {
backing = into.([]uint32)
}
for i, v := range record {
var u uint64
if u, err = strconv.ParseUint(v, 10, 32); err != nil {
return nil, err
}
retVal[i] = uint32(u)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Uint64:
retVal := make([]uint64, len(record))
var backing []uint64
if into == nil {
backing = make([]uint64, 0, len(record))
} else {
backing = into.([]uint64)
}
for i, v := range record {
var u uint64
if u, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
retVal[i] = uint64(u)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Float32:
retVal := make([]float32, len(record))
var backing []float32
if into == nil {
backing = make([]float32, 0, len(record))
} else {
backing = into.([]float32)
}
for i, v := range record {
var f float64
if f, err = strconv.ParseFloat(v, 32); err != nil {
return nil, err
}
retVal[i] = float32(f)
}
backing = append(backing, retVal...)
return backing, nil
case reflect.Float64:
retVal := make([]float64, len(record))
var backing []float64
if into == nil {
backing = make([]float64, 0, len(record))
} else {
backing = into.([]float64)
}
for i, v := range record {
if retVal[i], err = strconv.ParseFloat(v, 64); err != nil {
return nil, err
}
}
backing = append(backing, retVal...)
return backing, nil
case reflect.String:
var backing []string
if into == nil {
backing = make([]string, 0, len(record))
} else {
backing = into.([]string)
}
backing = append(backing, record...)
return backing, nil
default:
return nil, errors.Errorf(methodNYI, "convFromStrs", to)
}
}
// ReadCSV reads a CSV into a *Dense. It will override the underlying data.
//
// BUG(chewxy): reading CSV doesn't handle CSVs with different columns per row yet.
func (t *Dense) ReadCSV(r io.Reader, opts ...FuncOpt) (err error) {
fo := ParseFuncOpts(opts...)
as := fo.As()
if as.Type == nil {
as = Float64
}
cr := csv.NewReader(r)
var record []string
var rows, cols int
var backing interface{}
for {
record, err = cr.Read()
if err == io.EOF {
break
} else if err != nil {
return
}
if backing, err = convFromStrs(as, record, backing); err != nil {
return
}
cols = len(record)
rows++
}
t.fromSlice(backing)
t.AP.zero()
t.AP.SetShape(rows, cols)
return nil
return errors.Errorf("not yet handled")
}
/* FB SERIALIZATION */
// FBEncode encodes to a byte slice using flatbuffers.
//
// Only natively accessible data can be encided
func (t *Dense) FBEncode() ([]byte, error) {
builder := flatbuffers.NewBuilder(1024)
fb.DenseStartShapeVector(builder, len(t.shape))
for i := len(t.shape) - 1; i >= 0; i-- {
builder.PrependInt32(int32(t.shape[i]))
}
shape := builder.EndVector(len(t.shape))
fb.DenseStartStridesVector(builder, len(t.strides))
for i := len(t.strides) - 1; i >= 0; i-- {
builder.PrependInt32(int32(t.strides[i]))
}
strides := builder.EndVector(len(t.strides))
var o uint32
switch {
case t.o.IsRowMajor() && t.o.IsContiguous():
o = 0
case t.o.IsRowMajor() && !t.o.IsContiguous():
o = 1
case t.o.IsColMajor() && t.o.IsContiguous():
o = 2
case t.o.IsColMajor() && !t.o.IsContiguous():
o = 3
}
var triangle int32
switch t.Δ {
case NotTriangle:
triangle = fb.TriangleNOT_TRIANGLE
case Upper:
triangle = fb.TriangleUPPER
case Lower:
triangle = fb.TriangleLOWER
case Symmetric:
triangle = fb.TriangleSYMMETRIC
}
dt := builder.CreateString(t.Dtype().String())
data := t.byteSlice()
fb.DenseStartDataVector(builder, len(data))
for i := len(data) - 1; i >= 0; i-- {
builder.PrependUint8(data[i])
}
databyte := builder.EndVector(len(data))
fb.DenseStart(builder)
fb.DenseAddShape(builder, shape)
fb.DenseAddStrides(builder, strides)
fb.DenseAddO(builder, o)
fb.DenseAddT(builder, triangle)
fb.DenseAddType(builder, dt)
fb.DenseAddData(builder, databyte)
serialized := fb.DenseEnd(builder)
builder.Finish(serialized)
return builder.FinishedBytes(), nil
}
// FBDecode decodes a byteslice from a flatbuffer table into a *Dense
func (t *Dense) FBDecode(buf []byte) error {
serialized := fb.GetRootAsDense(buf, 0)
o := serialized.O()
switch o {
case 0:
t.o = 0
case 1:
t.o = MakeDataOrder(NonContiguous)
case 2:
t.o = MakeDataOrder(ColMajor)
case 3:
t.o = MakeDataOrder(ColMajor, NonContiguous)
}
tri := serialized.T()
switch tri {
case fb.TriangleNOT_TRIANGLE:
t.Δ = NotTriangle
case fb.TriangleUPPER:
t.Δ = Upper
case fb.TriangleLOWER:
t.Δ = Lower
case fb.TriangleSYMMETRIC:
t.Δ = Symmetric
}
t.shape = Shape(BorrowInts(serialized.ShapeLength()))
for i := 0; i < serialized.ShapeLength(); i++ {
t.shape[i] = int(int32(serialized.Shape(i)))
}
t.strides = BorrowInts(serialized.StridesLength())
for i := 0; i < serialized.ShapeLength(); i++ {
t.strides[i] = int(serialized.Strides(i))
}
typ := string(serialized.Type())
for _, dt := range allTypes.set {
if dt.String() == typ {
t.t = dt
break
}
}
if t.e == nil {
t.e = StdEng{}
}
t.makeArray(t.shape.TotalSize())
// allocated data. Now time to actually copy over the data
db := t.byteSlice()
copy(db, serialized.DataBytes())
t.fix()
return t.sanity()
}
/* PB SERIALIZATION */
// PBEncode encodes the Dense into a protobuf byte slice.
func (t *Dense) PBEncode() ([]byte, error) {
var toSerialize pb.Dense
toSerialize.Shape = make([]int32, len(t.shape))
for i, v := range t.shape {
toSerialize.Shape[i] = int32(v)
}
toSerialize.Strides = make([]int32, len(t.strides))
for i, v := range t.strides {
toSerialize.Strides[i] = int32(v)
}
switch {
case t.o.IsRowMajor() && t.o.IsContiguous():
toSerialize.O = pb.RowMajorContiguous
case t.o.IsRowMajor() && !t.o.IsContiguous():
toSerialize.O = pb.RowMajorNonContiguous
case t.o.IsColMajor() && t.o.IsContiguous():
toSerialize.O = pb.ColMajorContiguous
case t.o.IsColMajor() && !t.o.IsContiguous():
toSerialize.O = pb.ColMajorNonContiguous
}
toSerialize.T = pb.Triangle(t.Δ)
toSerialize.Type = t.t.String()
data := t.byteSlice()
toSerialize.Data = make([]byte, len(data))
copy(toSerialize.Data, data)
return toSerialize.Marshal()
}
// PBDecode unmarshalls a protobuf byteslice into a *Dense.
func (t *Dense) PBDecode(buf []byte) error {
var toSerialize pb.Dense
if err := toSerialize.Unmarshal(buf); err != nil {
return err
}
t.shape = make(Shape, len(toSerialize.Shape))
for i, v := range toSerialize.Shape {
t.shape[i] = int(v)
}
t.strides = make([]int, len(toSerialize.Strides))
for i, v := range toSerialize.Strides {
t.strides[i] = int(v)
}
switch toSerialize.O {
case pb.RowMajorContiguous:
case pb.RowMajorNonContiguous:
t.o = MakeDataOrder(NonContiguous)
case pb.ColMajorContiguous:
t.o = MakeDataOrder(ColMajor)
case pb.ColMajorNonContiguous:
t.o = MakeDataOrder(ColMajor, NonContiguous)
}
t.Δ = Triangle(toSerialize.T)
typ := string(toSerialize.Type)
for _, dt := range allTypes.set {
if dt.String() == typ {
t.t = dt
break
}
}
if t.e == nil {
t.e = StdEng{}
}
t.makeArray(t.shape.TotalSize())
// allocated data. Now time to actually copy over the data
db := t.byteSlice()
copy(db, toSerialize.Data)
return t.sanity()
}
tensor-0.9.24/dense_io_test.go 0000664 0000000 0000000 00000016475 14265126151 0016313 0 ustar 00root root 0000000 0000000 package tensor
import (
"bytes"
"encoding/gob"
"io/ioutil"
"os"
"os/exec"
"regexp"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSaveLoadNumpy(t *testing.T) {
if os.Getenv("CI_NO_PYTHON") == "true" {
t.Skip("skipping test; This is being run on a CI tool that does not have Python")
}
assert := assert.New(t)
T := New(WithShape(2, 2), WithBacking([]float64{1, 5, 10, -1}))
// also checks the 1D Vector.
T1D := New(WithShape(4), WithBacking([]float64{1, 5, 10, -1}))
f, _ := os.OpenFile("test.npy", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
f1D, _ := os.OpenFile("test1D.npy", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
T.WriteNpy(f)
f.Close()
T1D.WriteNpy(f1D)
f1D.Close()
defer func() {
// cleanup
err := os.Remove("test.npy")
if err != nil {
t.Error(err)
}
err = os.Remove("test1D.npy")
if err != nil {
t.Error(err)
}
}()
script := "import numpy as np\nx = np.load('test.npy')\nprint(x)\nx = np.load('test1D.npy')\nprint(x)"
// Configurable python command, in order to be able to use python or python3
pythonCommand := os.Getenv("PYTHON_COMMAND")
if pythonCommand == "" {
pythonCommand = "python"
}
cmd := exec.Command(pythonCommand)
stdin, err := cmd.StdinPipe()
if err != nil {
t.Error(err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
t.Error(err)
}
go func() {
defer stdin.Close()
stdin.Write([]byte(script))
}()
buf := new(bytes.Buffer)
cmd.Stdout = buf
if err = cmd.Start(); err != nil {
t.Error(err)
t.Logf("Do you have a python with numpy installed? You can change the python interpreter by setting the environment variable PYTHON_COMMAND. Current value: PYTHON_COMMAND=%s", pythonCommand)
}
importError := `ImportError: No module named numpy`
slurpErr, _ := ioutil.ReadAll(stderr)
if ok, _ := regexp.Match(importError, slurpErr); ok {
t.Skipf("Skipping numpy test. It would appear that you do not have Numpy installed.")
}
if err := cmd.Wait(); err != nil {
t.Errorf("%q", err.Error())
}
expected := `\[\[\s*1\.\s*5\.\]\n \[\s*10\.\s*-1\.\]\]\n`
if ok, _ := regexp.Match(expected, buf.Bytes()); !ok {
t.Errorf("Did not successfully read numpy file, \n%q\n%q", buf.String(), expected)
}
// ok now to test if it can read
T2 := new(Dense)
buf = new(bytes.Buffer)
T.WriteNpy(buf)
if err = T2.ReadNpy(buf); err != nil {
t.Fatal(err)
}
assert.Equal(T.Shape(), T2.Shape())
assert.Equal(T.Strides(), T2.Strides())
assert.Equal(T.Data(), T2.Data())
// ok now to test if it can read 1D
T1D2 := new(Dense)
buf = new(bytes.Buffer)
T1D.WriteNpy(buf)
if err = T1D2.ReadNpy(buf); err != nil {
t.Fatal(err)
}
assert.Equal(T1D.Shape(), T1D2.Shape())
assert.Equal(T1D.Strides(), T1D2.Strides())
assert.Equal(T1D.Data(), T1D2.Data())
// try with masked array. masked elements should be filled with default value
T.ResetMask(false)
T.mask[0] = true
T3 := new(Dense)
buf = new(bytes.Buffer)
T.WriteNpy(buf)
if err = T3.ReadNpy(buf); err != nil {
t.Fatal(err)
}
assert.Equal(T.Shape(), T3.Shape())
assert.Equal(T.Strides(), T3.Strides())
data := T.Float64s()
data[0] = T.FillValue().(float64)
assert.Equal(data, T3.Data())
// try with 1D masked array. masked elements should be filled with default value
T1D.ResetMask(false)
T1D.mask[0] = true
T1D3 := new(Dense)
buf = new(bytes.Buffer)
T1D.WriteNpy(buf)
if err = T1D3.ReadNpy(buf); err != nil {
t.Fatal(err)
}
assert.Equal(T1D.Shape(), T1D3.Shape())
assert.Equal(T1D.Strides(), T1D3.Strides())
data = T1D.Float64s()
data[0] = T1D.FillValue().(float64)
assert.Equal(data, T1D3.Data())
}
func TestSaveLoadCSV(t *testing.T) {
assert := assert.New(t)
for _, gtd := range serializationTestData {
if _, ok := gtd.([]complex64); ok {
continue
}
if _, ok := gtd.([]complex128); ok {
continue
}
buf := new(bytes.Buffer)
T := New(WithShape(2, 2), WithBacking(gtd))
if err := T.WriteCSV(buf); err != nil {
t.Error(err)
}
T2 := new(Dense)
if err := T2.ReadCSV(buf, As(T.t)); err != nil {
t.Error(err)
}
assert.Equal(T.Shape(), T2.Shape(), "Test: %v", gtd)
assert.Equal(T.Data(), T2.Data())
}
T := New(WithShape(2, 2), WithBacking([]float64{1, 5, 10, -1}))
f, _ := os.OpenFile("test.csv", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
T.WriteCSV(f)
f.Close()
// cleanup
err := os.Remove("test.csv")
if err != nil {
t.Error(err)
}
// try with masked array. masked elements should be filled with default value
T.ResetMask(false)
T.mask[0] = true
f, _ = os.OpenFile("test.csv", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
T.WriteCSV(f)
f.Close()
// cleanup again
err = os.Remove("test.csv")
if err != nil {
t.Error(err)
}
}
var serializationTestData = []interface{}{
[]int{1, 5, 10, -1},
[]int8{1, 5, 10, -1},
[]int16{1, 5, 10, -1},
[]int32{1, 5, 10, -1},
[]int64{1, 5, 10, -1},
[]uint{1, 5, 10, 255},
[]uint8{1, 5, 10, 255},
[]uint16{1, 5, 10, 255},
[]uint32{1, 5, 10, 255},
[]uint64{1, 5, 10, 255},
[]float32{1, 5, 10, -1},
[]float64{1, 5, 10, -1},
[]complex64{1, 5, 10, -1},
[]complex128{1, 5, 10, -1},
[]string{"hello", "world", "hello", "世界"},
}
func TestDense_GobEncodeDecode(t *testing.T) {
assert := assert.New(t)
var err error
for _, gtd := range serializationTestData {
buf := new(bytes.Buffer)
encoder := gob.NewEncoder(buf)
decoder := gob.NewDecoder(buf)
T := New(WithShape(2, 2), WithBacking(gtd))
if err = encoder.Encode(T); err != nil {
t.Errorf("Error while encoding %v: %v", gtd, err)
continue
}
T2 := new(Dense)
if err = decoder.Decode(T2); err != nil {
t.Errorf("Error while decoding %v: %v", gtd, err)
continue
}
assert.Equal(T.Shape(), T2.Shape())
assert.Equal(T.Strides(), T2.Strides())
assert.Equal(T.Data(), T2.Data())
// try with masked array. masked elements should be filled with default value
buf = new(bytes.Buffer)
encoder = gob.NewEncoder(buf)
decoder = gob.NewDecoder(buf)
T.ResetMask(false)
T.mask[0] = true
assert.True(T.IsMasked())
if err = encoder.Encode(T); err != nil {
t.Errorf("Error while encoding %v: %v", gtd, err)
continue
}
T3 := new(Dense)
if err = decoder.Decode(T3); err != nil {
t.Errorf("Error while decoding %v: %v", gtd, err)
continue
}
assert.Equal(T.Shape(), T3.Shape())
assert.Equal(T.Strides(), T3.Strides())
assert.Equal(T.Data(), T3.Data())
assert.Equal(T.mask, T3.mask)
}
}
func TestDense_FBEncodeDecode(t *testing.T) {
assert := assert.New(t)
for _, gtd := range serializationTestData {
T := New(WithShape(2, 2), WithBacking(gtd))
buf, err := T.FBEncode()
if err != nil {
t.Errorf("UNPOSSIBLE!: %v", err)
continue
}
T2 := new(Dense)
if err = T2.FBDecode(buf); err != nil {
t.Errorf("Error while decoding %v: %v", gtd, err)
continue
}
assert.Equal(T.Shape(), T2.Shape())
assert.Equal(T.Strides(), T2.Strides())
assert.Equal(T.Data(), T2.Data())
// TODO: MASKED ARRAY
}
}
func TestDense_PBEncodeDecode(t *testing.T) {
assert := assert.New(t)
for _, gtd := range serializationTestData {
T := New(WithShape(2, 2), WithBacking(gtd))
buf, err := T.PBEncode()
if err != nil {
t.Errorf("UNPOSSIBLE!: %v", err)
continue
}
T2 := new(Dense)
if err = T2.PBDecode(buf); err != nil {
t.Errorf("Error while decoding %v: %v", gtd, err)
continue
}
assert.Equal(T.Shape(), T2.Shape())
assert.Equal(T.Strides(), T2.Strides())
assert.Equal(T.Data(), T2.Data())
// TODO: MASKED ARRAY
}
}
tensor-0.9.24/dense_linalg.go 0000664 0000000 0000000 00000027340 14265126151 0016104 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
// Trace returns the trace of the matrix (i.e. the sum of the diagonal elements). It only works for matrices
func (t *Dense) Trace() (retVal interface{}, err error) {
e := t.e
if tracer, ok := e.(Tracer); ok {
return tracer.Trace(t)
}
return nil, errors.Errorf("Engine %T does not support Trace", e)
}
// Inner performs a dot product on two vectors. If t or other are not vectors, it will return an error.
func (t *Dense) Inner(other Tensor) (retVal interface{}, err error) {
// check that the data is a float
if err = typeclassCheck(t.t, floatcmplxTypes); err != nil {
return nil, errors.Wrapf(err, unsupportedDtype, t.t, "Inner")
}
// check both are vectors
if !t.Shape().IsVector() || !other.Shape().IsVector() {
return nil, errors.Errorf("Inner only works when there are two vectors. t's Shape: %v; other's Shape %v", t.Shape(), other.Shape())
}
// we do this check instead of the more common t.Shape()[1] != other.Shape()[0],
// basically to ensure a similarity with numpy's dot and vectors.
if t.len() != other.DataSize() {
return nil, errors.Errorf(shapeMismatch, t.Shape(), other.Shape())
}
e := t.e
switch ip := e.(type) {
case InnerProderF32:
return ip.Inner(t, other)
case InnerProderF64:
return ip.Inner(t, other)
case InnerProder:
return ip.Inner(t, other)
}
return nil, errors.Errorf("Engine does not support Inner()")
}
// MatVecMul performs a matrix-vector multiplication.
func (t *Dense) MatVecMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) {
// check that it's a matrix x vector
if t.Dims() != 2 || !other.Shape().IsVector() {
err = errors.Errorf("MatVecMul requires t be a matrix and other to be a vector. Got t's shape: %v, other's shape: %v", t.Shape(), other.Shape())
return
}
// checks that t is mxn matrix
m := t.Shape()[0]
n := t.Shape()[1]
// check shape
var odim int
oshape := other.Shape()
switch {
case oshape.IsColVec():
odim = oshape[0]
case oshape.IsRowVec():
odim = oshape[1]
case oshape.IsVector():
odim = oshape[0]
default:
err = errors.Errorf(shapeMismatch, t.Shape(), other.Shape()) // should be unreachable
return
}
if odim != n {
err = errors.Errorf(shapeMismatch, n, other.Shape())
return
}
expectedShape := Shape{m}
// check whether retVal has the same size as the resulting matrix would be: mx1
fo := ParseFuncOpts(opts...)
defer returnOpOpt(fo)
if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil {
err = errors.Wrapf(err, opFail, "MatVecMul")
return
}
if retVal == nil {
retVal = recycledDense(t.t, expectedShape, WithEngine(t.e))
if t.o.IsColMajor() {
AsFortran(nil)(retVal)
}
}
e := t.e
if mvm, ok := e.(MatVecMuler); ok {
if err = mvm.MatVecMul(t, other, retVal); err != nil {
return nil, errors.Wrapf(err, opFail, "MatVecMul")
}
return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape)
}
return nil, errors.New("engine does not support MatVecMul")
}
// MatMul is the basic matrix multiplication that you learned in high school. It takes an optional reuse ndarray, where the ndarray is reused as the result.
// If that isn't passed in, a new ndarray will be created instead.
func (t *Dense) MatMul(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) {
// check that both are matrices
if !t.Shape().IsMatrix() || !other.Shape().IsMatrix() {
err = errors.Errorf("MatMul requires both operands to be matrices. Got t's shape: %v, other's shape: %v", t.Shape(), other.Shape())
return
}
// checks that t is mxk matrix
var m, n, k int
m = t.Shape()[0]
k = t.Shape()[1]
n = other.Shape()[1]
// check shape
if k != other.Shape()[0] {
err = errors.Errorf(shapeMismatch, t.Shape(), other.Shape())
return
}
// check whether retVal has the same size as the resulting matrix would be: mxn
expectedShape := Shape{m, n}
fo := ParseFuncOpts(opts...)
defer returnOpOpt(fo)
if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil {
err = errors.Wrapf(err, opFail, "MatMul")
return
}
if retVal == nil {
retVal = recycledDense(t.t, expectedShape, WithEngine(t.e))
if t.o.IsColMajor() {
AsFortran(nil)(retVal)
}
}
e := t.e
if mm, ok := e.(MatMuler); ok {
if err = mm.MatMul(t, other, retVal); err != nil {
return
}
return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape)
}
return nil, errors.New("engine does not support MatMul")
}
// Outer finds the outer product of two vectors
func (t *Dense) Outer(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) {
// check both are vectors
if !t.Shape().IsVector() || !other.Shape().IsVector() {
err = errors.Errorf("Outer only works when there are two vectors. t's shape: %v. other's shape: %v", t.Shape(), other.Shape())
return
}
m := t.Size()
n := other.Size()
// check whether retVal has the same size as the resulting matrix would be: mxn
expectedShape := Shape{m, n}
fo := ParseFuncOpts(opts...)
defer returnOpOpt(fo)
if retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil {
err = errors.Wrapf(err, opFail, "Outer")
return
}
if retVal == nil {
retVal = recycledDense(t.t, expectedShape, WithEngine(t.e))
if t.o.IsColMajor() {
AsFortran(nil)(retVal)
}
}
e := t.e
// DGER does not have any beta. So the values have to be zeroed first if the tensor is to be reused
retVal.Zero()
if op, ok := e.(OuterProder); ok {
if err = op.Outer(t, other, retVal); err != nil {
return nil, errors.Wrapf(err, opFail, "engine.uter")
}
return handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape)
}
return nil, errors.New("engine does not support Outer")
}
// TensorMul is for multiplying Tensors with more than 2 dimensions.
//
// The algorithm is conceptually simple (but tricky to get right):
// 1. Transpose and reshape the Tensors in such a way that both t and other are 2D matrices
// 2. Use DGEMM to multiply them
// 3. Reshape the results to be the new expected result
//
// This function is a Go implementation of Numpy's tensordot method. It simplifies a lot of what Numpy does.
func (t *Dense) TensorMul(other Tensor, axesA, axesB []int) (retVal *Dense, err error) {
ts := t.Shape()
td := len(ts)
os := other.Shape()
od := len(os)
na := len(axesA)
nb := len(axesB)
sameLength := na == nb
if sameLength {
for i := 0; i < na; i++ {
if ts[axesA[i]] != os[axesB[i]] {
sameLength = false
break
}
if axesA[i] < 0 {
axesA[i] += td
}
if axesB[i] < 0 {
axesB[i] += od
}
}
}
if !sameLength {
err = errors.Errorf(shapeMismatch, ts, os)
return
}
// handle shapes
var notins []int
for i := 0; i < td; i++ {
notin := true
for _, a := range axesA {
if i == a {
notin = false
break
}
}
if notin {
notins = append(notins, i)
}
}
newAxesA := BorrowInts(len(notins) + len(axesA))
defer ReturnInts(newAxesA)
newAxesA = newAxesA[:0]
newAxesA = append(notins, axesA...)
n2 := 1
for _, a := range axesA {
n2 *= ts[a]
}
newShapeT := Shape(BorrowInts(2))
defer ReturnInts(newShapeT)
newShapeT[0] = ts.TotalSize() / n2
newShapeT[1] = n2
retShape1 := BorrowInts(len(ts))
defer ReturnInts(retShape1)
retShape1 = retShape1[:0]
for _, ni := range notins {
retShape1 = append(retShape1, ts[ni])
}
// work on other now
notins = notins[:0]
for i := 0; i < od; i++ {
notin := true
for _, a := range axesB {
if i == a {
notin = false
break
}
}
if notin {
notins = append(notins, i)
}
}
newAxesB := BorrowInts(len(notins) + len(axesB))
defer ReturnInts(newAxesB)
newAxesB = newAxesB[:0]
newAxesB = append(axesB, notins...)
newShapeO := Shape(BorrowInts(2))
defer ReturnInts(newShapeO)
newShapeO[0] = n2
newShapeO[1] = os.TotalSize() / n2
retShape2 := BorrowInts(len(ts))
retShape2 = retShape2[:0]
for _, ni := range notins {
retShape2 = append(retShape2, os[ni])
}
// we borrowClone because we don't want to touch the original Tensors
doT := t.Clone().(*Dense)
doOther := other.Clone().(*Dense)
defer ReturnTensor(doT)
defer ReturnTensor(doOther)
if err = doT.T(newAxesA...); err != nil {
return
}
doT.Transpose() // we have to materialize the transpose first or the underlying data won't be changed and the reshape that follows would be meaningless
if err = doT.Reshape(newShapeT...); err != nil {
return
}
if err = doOther.T(newAxesB...); err != nil {
return
}
doOther.Transpose()
if err = doOther.Reshape(newShapeO...); err != nil {
return
}
// the magic happens here
var rt Tensor
if rt, err = Dot(doT, doOther); err != nil {
return
}
retVal = rt.(*Dense)
retShape := BorrowInts(len(retShape1) + len(retShape2))
defer ReturnInts(retShape)
retShape = retShape[:0]
retShape = append(retShape, retShape1...)
retShape = append(retShape, retShape2...)
if len(retShape) == 0 { // In case a scalar is returned, it should be returned as shape = {1}
retShape = append(retShape, 1)
}
if err = retVal.Reshape(retShape...); err != nil {
return
}
return
}
// SVD does the Single Value Decomposition for the *Dense.
//
// How it works is it temporarily converts the *Dense into a gonum/mat64 matrix, and uses Gonum's SVD function to perform the SVD.
// In the future, when gonum/lapack fully supports float32, we'll look into rewriting this
func (t *Dense) SVD(uv, full bool) (s, u, v *Dense, err error) {
e := t.Engine()
if svder, ok := e.(SVDer); ok {
var sT, uT, vT Tensor
if sT, uT, vT, err = svder.SVD(t, uv, full); err != nil {
return nil, nil, nil, errors.Wrap(err, "Error while performing *Dense.SVD")
}
if s, err = assertDense(sT); err != nil {
return nil, nil, nil, errors.Wrapf(err, "sT is not *Dense (uv %t full %t). Got %T instead", uv, full, sT)
}
// if not uv and not full, u can be nil
if u, err = assertDense(uT); err != nil && !(!uv && !full) {
return nil, nil, nil, errors.Wrapf(err, "uT is not *Dense (uv %t full %t). Got %T instead", uv, full, uT)
}
// if not uv and not full, v can be nil
if v, err = assertDense(vT); err != nil && !(!uv && !full) {
return nil, nil, nil, errors.Wrapf(err, "vT is not *Dense (uv %t full %t). Got %T instead", uv, full, vT)
}
return s, u, v, nil
}
return nil, nil, nil, errors.New("Engine does not support SVD")
}
/* UTILITY FUNCTIONS */
// handleReuse extracts a *Dense from Tensor, and checks the shape of the reuse Tensor
func handleReuse(reuse Tensor, expectedShape Shape, safe bool) (retVal *Dense, err error) {
if reuse != nil {
if retVal, err = assertDense(reuse); err != nil {
err = errors.Wrapf(err, opFail, "handling reuse")
return
}
if !safe {
return
}
if err = reuseCheckShape(retVal, expectedShape); err != nil {
err = errors.Wrapf(err, "Unable to process reuse *Dense Tensor. Shape error.")
return
}
return
}
return
}
// handleIncr is the cleanup step for when there is an Tensor to increment. If the result tensor is the same as the reuse Tensor, the result tensor gets returned to the pool
func handleIncr(res *Dense, reuse, incr Tensor, expectedShape Shape) (retVal *Dense, err error) {
// handle increments
if incr != nil {
if !expectedShape.Eq(incr.Shape()) {
err = errors.Errorf(shapeMismatch, expectedShape, incr.Shape())
return
}
var incrD *Dense
var ok bool
if incrD, ok = incr.(*Dense); !ok {
err = errors.Errorf(extractionFail, "*Dense", incr)
return
}
if err = typeclassCheck(incrD.t, numberTypes); err != nil {
err = errors.Wrapf(err, "handleIncr only handles Number types. Got %v instead", incrD.t)
return
}
if _, err = incrD.Add(res, UseUnsafe()); err != nil {
return
}
// vecAdd(incr.data, retVal.data)
// return retVal to pool - if and only if retVal is not reuse
// reuse indicates that someone else also has the reference to the *Dense
if res != reuse {
ReturnTensor(res)
}
// then
retVal = incrD
return
}
return res, nil
}
tensor-0.9.24/dense_linalg_test.go 0000664 0000000 0000000 00000100204 14265126151 0017132 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
"gorgonia.org/vecf64"
)
type linalgTest struct {
a, b interface{}
shapeA, shapeB Shape
transA, transB bool
reuse, incr interface{}
shapeR, shapeI Shape
correct interface{}
correctIncr interface{}
correctIncrReuse interface{}
correctShape Shape
err bool
errIncr bool
errReuse bool
}
var traceTests = []struct {
data interface{}
correct interface{}
err bool
}{
{[]int{0, 1, 2, 3, 4, 5}, int(4), false},
{[]int8{0, 1, 2, 3, 4, 5}, int8(4), false},
{[]int16{0, 1, 2, 3, 4, 5}, int16(4), false},
{[]int32{0, 1, 2, 3, 4, 5}, int32(4), false},
{[]int64{0, 1, 2, 3, 4, 5}, int64(4), false},
{[]uint{0, 1, 2, 3, 4, 5}, uint(4), false},
{[]uint8{0, 1, 2, 3, 4, 5}, uint8(4), false},
{[]uint16{0, 1, 2, 3, 4, 5}, uint16(4), false},
{[]uint32{0, 1, 2, 3, 4, 5}, uint32(4), false},
{[]uint64{0, 1, 2, 3, 4, 5}, uint64(4), false},
{[]float32{0, 1, 2, 3, 4, 5}, float32(4), false},
{[]float64{0, 1, 2, 3, 4, 5}, float64(4), false},
{[]complex64{0, 1, 2, 3, 4, 5}, complex64(4), false},
{[]complex128{0, 1, 2, 3, 4, 5}, complex128(4), false},
{[]bool{true, false, true, false, true, false}, nil, true},
}
func TestDense_Trace(t *testing.T) {
assert := assert.New(t)
for i, tts := range traceTests {
T := New(WithBacking(tts.data), WithShape(2, 3))
trace, err := T.Trace()
if checkErr(t, tts.err, err, "Trace", i) {
continue
}
assert.Equal(tts.correct, trace)
//
T = New(WithBacking(tts.data))
_, err = T.Trace()
if err == nil {
t.Error("Expected an error when Trace() on non-matrices")
}
}
}
var innerTests = []struct {
a, b interface{}
shapeA, shapeB Shape
correct interface{}
err bool
}{
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{3, 1}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{3, 1}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3, 1}, Shape{1, 3}, float64(5), false},
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{1, 3}, Shape{1, 3}, float64(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{3, 1}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{3, 1}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3, 1}, Shape{1, 3}, float32(5), false},
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{1, 3}, Shape{1, 3}, float32(5), false},
// stupids: type differences
{Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float32, 0, 3), Range(Byte, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, nil, true},
{Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, nil, true},
// differing size
{Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{4}, Shape{3}, nil, true},
// A is not a matrix
{Range(Float64, 0, 4), Range(Float64, 0, 3), Shape{2, 2}, Shape{3}, nil, true},
}
func TestDense_Inner(t *testing.T) {
for i, its := range innerTests {
a := New(WithShape(its.shapeA...), WithBacking(its.a))
b := New(WithShape(its.shapeB...), WithBacking(its.b))
T, err := a.Inner(b)
if checkErr(t, its.err, err, "Inner", i) {
continue
}
assert.Equal(t, its.correct, T)
}
}
var matVecMulTests = []linalgTest{
// Float64s
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, false},
// float64s with transposed matrix
{Range(Float64, 0, 6), Range(Float64, 0, 2), Shape{2, 3}, Shape{2}, true, false,
Range(Float64, 52, 55), Range(Float64, 100, 103), Shape{3}, Shape{3},
[]float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{3}, false, false, false},
// Float32s
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3, 1}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
{Range(Float32, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{1, 3}, false, false,
Range(Float32, 52, 54), Range(Float32, 100, 102), Shape{2}, Shape{2},
[]float32{5, 14}, []float32{105, 115}, []float32{110, 129}, Shape{2}, false, false, false},
// stupids : unpossible shapes (wrong A)
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{6}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad A shape
{Range(Float64, 0, 8), Range(Float64, 0, 3), Shape{4, 2}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad B shape
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
//stupids: bad reuse
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 55), Range(Float64, 100, 102), Shape{3}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true},
//stupids: bad incr shape
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 105), Shape{2}, Shape{5},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
// stupids: type mismatch A and B
{Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float64, 0, 6), Range(Float32, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B
{Range(Float32, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch A and B (non-Float)
{Range(Float64, 0, 6), Range(Int, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float64, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, true, false, false},
// stupids: type mismatch, reuse
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float32, 52, 54), Range(Float64, 100, 102), Shape{2}, Shape{2},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, false, true},
// stupids: type mismatch, incr
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), Range(Float32, 100, 103), Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
// stupids: type mismatch, incr not a Number
{Range(Float64, 0, 6), Range(Float64, 0, 3), Shape{2, 3}, Shape{3}, false, false,
Range(Float64, 52, 54), []bool{true, true, true}, Shape{2}, Shape{3},
[]float64{5, 14}, []float64{105, 115}, []float64{110, 129}, Shape{2}, false, true, false},
}
func TestDense_MatVecMul(t *testing.T) {
assert := assert.New(t)
for i, mvmt := range matVecMulTests {
a := New(WithBacking(mvmt.a), WithShape(mvmt.shapeA...))
b := New(WithBacking(mvmt.b), WithShape(mvmt.shapeB...))
if mvmt.transA {
if err := a.T(); err != nil {
t.Error(err)
continue
}
}
T, err := a.MatVecMul(b)
if checkErr(t, mvmt.err, err, "Safe", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsRowMajor())
assert.Equal(mvmt.correct, T.Data())
// incr
incr := New(WithBacking(mvmt.incr), WithShape(mvmt.shapeI...))
T, err = a.MatVecMul(b, WithIncr(incr))
if checkErr(t, mvmt.errIncr, err, "WithIncr", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsRowMajor())
assert.Equal(mvmt.correctIncr, T.Data())
// reuse
reuse := New(WithBacking(mvmt.reuse), WithShape(mvmt.shapeR...))
T, err = a.MatVecMul(b, WithReuse(reuse))
if checkErr(t, mvmt.errReuse, err, "WithReuse", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.True(T.DataOrder().IsRowMajor())
assert.Equal(mvmt.correct, T.Data())
// reuse AND incr
T, err = a.MatVecMul(b, WithIncr(incr), WithReuse(reuse))
if checkErr(t, mvmt.err, err, "WithReuse and WithIncr", i) {
continue
}
assert.True(mvmt.correctShape.Eq(T.Shape()))
assert.Equal(mvmt.correctIncrReuse, T.Data())
}
}
var matMulTests = []linalgTest{
// Float64s
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, false},
// Float32s
{Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float32, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float32{10, 13, 28, 40}, []float32{110, 114, 130, 143}, []float32{120, 127, 158, 183}, Shape{2, 2}, false, false, false},
// Edge cases - Row Vecs (Float64)
{Range(Float64, 0, 2), Range(Float64, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false,
Range(Float64, 10, 16), Range(Float64, 100, 106), Shape{2, 3}, Shape{2, 3},
[]float64{0, 0, 0, 0, 1, 2}, []float64{100, 101, 102, 103, 105, 107}, []float64{100, 101, 102, 103, 106, 109}, Shape{2, 3}, false, false, false},
{Range(Float64, 0, 2), Range(Float64, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false,
Range(Float64, 10, 13), Range(Float64, 100, 103), Shape{1, 3}, Shape{1, 3},
[]float64{3, 4, 5}, []float64{103, 105, 107}, []float64{106, 109, 112}, Shape{1, 3}, false, false, false},
{Range(Float64, 0, 2), Range(Float64, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false,
Range(Float64, 0, 1), Range(Float64, 100, 101), Shape{1, 1}, Shape{1, 1},
[]float64{1}, []float64{101}, []float64{102}, Shape{1, 1}, false, false, false},
// Edge cases - Row Vecs (Float32)
{Range(Float32, 0, 2), Range(Float32, 0, 3), Shape{2, 1}, Shape{1, 3}, false, false,
Range(Float32, 10, 16), Range(Float32, 100, 106), Shape{2, 3}, Shape{2, 3},
[]float32{0, 0, 0, 0, 1, 2}, []float32{100, 101, 102, 103, 105, 107}, []float32{100, 101, 102, 103, 106, 109}, Shape{2, 3}, false, false, false},
{Range(Float32, 0, 2), Range(Float32, 0, 6), Shape{1, 2}, Shape{2, 3}, false, false,
Range(Float32, 10, 13), Range(Float32, 100, 103), Shape{1, 3}, Shape{1, 3},
[]float32{3, 4, 5}, []float32{103, 105, 107}, []float32{106, 109, 112}, Shape{1, 3}, false, false, false},
{Range(Float32, 0, 2), Range(Float32, 0, 2), Shape{1, 2}, Shape{2, 1}, false, false,
Range(Float32, 0, 1), Range(Float32, 100, 101), Shape{1, 1}, Shape{1, 1},
[]float32{1}, []float32{101}, []float32{102}, Shape{1, 1}, false, false, false},
// stupids - bad shape (not matrices):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids - bad shape (incompatible shapes):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{6, 1}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids - bad shape (bad reuse shape):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 57), Range(Float64, 100, 104), Shape{5}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, true},
// stupids - bad shape (bad incr shape):
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{4},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, true, false},
// stupids - type mismatch (a,b)
{Range(Float64, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids - type mismatch (a,b)
{Range(Float32, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids type mismatch (b not float)
{Range(Float64, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids type mismatch (a not float)
{Range(Int, 0, 6), Range(Int, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, true, false, false},
// stupids: type mismatch (incr)
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, true, false},
// stupids: type mismatch (reuse)
{Range(Float64, 0, 6), Range(Float64, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float32, 52, 56), Range(Float64, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float64{10, 13, 28, 40}, []float64{110, 114, 130, 143}, []float64{120, 127, 158, 183}, Shape{2, 2}, false, false, true},
// stupids: type mismatch (reuse)
{Range(Float32, 0, 6), Range(Float32, 0, 6), Shape{2, 3}, Shape{3, 2}, false, false,
Range(Float64, 52, 56), Range(Float32, 100, 104), Shape{2, 2}, Shape{2, 2},
[]float32{10, 13, 28, 40}, []float32{110, 114, 130, 143}, []float32{120, 127, 158, 183}, Shape{2, 2}, false, false, true},
}
func TestDense_MatMul(t *testing.T) {
assert := assert.New(t)
for i, mmt := range matMulTests {
a := New(WithBacking(mmt.a), WithShape(mmt.shapeA...))
b := New(WithBacking(mmt.b), WithShape(mmt.shapeB...))
T, err := a.MatMul(b)
if checkErr(t, mmt.err, err, "Safe", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correct, T.Data())
// incr
incr := New(WithBacking(mmt.incr), WithShape(mmt.shapeI...))
T, err = a.MatMul(b, WithIncr(incr))
if checkErr(t, mmt.errIncr, err, "WithIncr", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correctIncr, T.Data())
// reuse
reuse := New(WithBacking(mmt.reuse), WithShape(mmt.shapeR...))
T, err = a.MatMul(b, WithReuse(reuse))
if checkErr(t, mmt.errReuse, err, "WithReuse", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correct, T.Data())
// reuse AND incr
T, err = a.MatMul(b, WithIncr(incr), WithReuse(reuse))
if checkErr(t, mmt.err, err, "WithIncr and WithReuse", i) {
continue
}
assert.True(mmt.correctShape.Eq(T.Shape()))
assert.Equal(mmt.correctIncrReuse, T.Data())
}
}
var outerTests = []linalgTest{
// Float64s
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
false, false, false},
// Float32s
{Range(Float32, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float32, 52, 61), Range(Float32, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float32{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float32{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float32{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
false, false, false},
// stupids - a or b not vector
{Range(Float64, 0, 3), Range(Float64, 0, 6), Shape{3}, Shape{3, 2}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
true, false, false},
// stupids - bad incr shape
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 106), Shape{3, 3}, Shape{3, 2},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
false, true, false},
// stupids - bad reuse shape
{Range(Float64, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 58), Range(Float64, 100, 109), Shape{3, 2}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
false, false, true},
// stupids - b not Float
{Range(Float64, 0, 3), Range(Int, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
true, false, false},
// stupids - a not Float
{Range(Int, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
true, false, false},
// stupids - a-b type mismatch
{Range(Float64, 0, 3), Range(Float32, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
true, false, false},
// stupids a-b type mismatch
{Range(Float32, 0, 3), Range(Float64, 0, 3), Shape{3}, Shape{3}, false, false,
Range(Float64, 52, 61), Range(Float64, 100, 109), Shape{3, 3}, Shape{3, 3},
[]float64{0, 0, 0, 0, 1, 2, 0, 2, 4}, []float64{100, 101, 102, 103, 105, 107, 106, 109, 112}, []float64{100, 101, 102, 103, 106, 109, 106, 111, 116}, Shape{3, 3},
true, false, false},
}
func TestDense_Outer(t *testing.T) {
assert := assert.New(t)
for i, ot := range outerTests {
a := New(WithBacking(ot.a), WithShape(ot.shapeA...))
b := New(WithBacking(ot.b), WithShape(ot.shapeB...))
T, err := a.Outer(b)
if checkErr(t, ot.err, err, "Safe", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.Equal(ot.correct, T.Data())
// incr
incr := New(WithBacking(ot.incr), WithShape(ot.shapeI...))
T, err = a.Outer(b, WithIncr(incr))
if checkErr(t, ot.errIncr, err, "WithIncr", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.Equal(ot.correctIncr, T.Data())
// reuse
reuse := New(WithBacking(ot.reuse), WithShape(ot.shapeR...))
T, err = a.Outer(b, WithReuse(reuse))
if checkErr(t, ot.errReuse, err, "WithReuse", i) {
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.Equal(ot.correct, T.Data())
// reuse AND incr
T, err = a.Outer(b, WithIncr(incr), WithReuse(reuse))
if err != nil {
t.Errorf("Reuse and Incr error'd %+v", err)
continue
}
assert.True(ot.correctShape.Eq(T.Shape()))
assert.Equal(ot.correctIncrReuse, T.Data())
}
}
var tensorMulTests = []struct {
a, b interface{}
shapeA, shapeB Shape
reuse, incr interface{}
shapeR, shapeI Shape
correct interface{}
correctIncr interface{}
correctIncrReuse interface{}
correctShape Shape
err bool
errIncr bool
errReuse bool
axesA, axesB []int
}{
{a: Range(Float64, 0, 60), b: Range(Float64, 0, 24), shapeA: Shape{3, 4, 5}, shapeB: Shape{4, 3, 2},
axesA: []int{1, 0}, axesB: []int{0, 1},
correct: []float64{4400, 4730, 4532, 4874, 4664, 5018, 4796, 5162, 4928, 5306}, correctShape: Shape{5, 2}},
}
func TestDense_TensorMul(t *testing.T) {
assert := assert.New(t)
for i, tmt := range tensorMulTests {
a := New(WithShape(tmt.shapeA...), WithBacking(tmt.a))
b := New(WithShape(tmt.shapeB...), WithBacking(tmt.b))
T, err := a.TensorMul(b, tmt.axesA, tmt.axesB)
if checkErr(t, tmt.err, err, "Safe", i) {
continue
}
assert.True(tmt.correctShape.Eq(T.Shape()))
assert.Equal(tmt.correct, T.Data())
}
}
func TestDot(t *testing.T) {
assert := assert.New(t)
var a, b, c, r Tensor
var A, B, R, R2 Tensor
var s, s2 Tensor
var incr Tensor
var err error
var expectedShape Shape
var expectedData []float64
var expectedScalar float64
// vector-vector
t.Log("Vec⋅Vec")
a = New(Of(Float64), WithShape(3, 1), WithBacking(Range(Float64, 0, 3)))
b = New(Of(Float64), WithShape(3, 1), WithBacking(Range(Float64, 0, 3)))
r, err = Dot(a, b)
expectedShape = Shape{1}
expectedScalar = float64(5)
assert.Nil(err)
assert.Equal(expectedScalar, r.Data())
assert.True(ScalarShape().Eq(r.Shape()))
// vector-mat (which is the same as matᵀ*vec)
t.Log("Vec⋅Mat dot, should be equal to Aᵀb")
A = New(Of(Float64), WithShape(3, 2), WithBacking(Range(Float64, 0, 6)))
R, err = Dot(b, A)
expectedShape = Shape{2}
expectedData = []float64{10, 13}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// mat-mat
t.Log("Mat⋅Mat")
A = New(Of(Float64), WithShape(4, 5), WithBacking(Range(Float64, 0, 20)))
B = New(Of(Float64), WithShape(5, 10), WithBacking(Range(Float64, 2, 52)))
R, err = Dot(A, B)
expectedShape = Shape{4, 10}
expectedData = []float64{
320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 870,
905, 940, 975, 1010, 1045, 1080, 1115, 1150, 1185, 1420, 1480,
1540, 1600, 1660, 1720, 1780, 1840, 1900, 1960, 1970, 2055, 2140,
2225, 2310, 2395, 2480, 2565, 2650, 2735,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// T-T
t.Log("3T⋅3T")
A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24)))
B = New(Of(Float64), WithShape(3, 4, 2), WithBacking(Range(Float64, 0, 24)))
R, err = Dot(A, B)
expectedShape = Shape{2, 3, 3, 2}
expectedData = []float64{
28, 34,
76, 82,
124, 130,
76, 98,
252, 274,
428, 450,
124, 162,
428, 466,
732, 770,
//
172, 226,
604, 658,
1036, 1090,
220, 290,
780, 850,
1340, 1410,
268, 354,
956, 1042,
1644, 1730,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// T-T
t.Log("3T⋅4T")
A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24)))
B = New(Of(Float64), WithShape(2, 3, 4, 5), WithBacking(Range(Float64, 0, 120)))
R, err = Dot(A, B)
expectedShape = Shape{2, 3, 2, 3, 5}
expectedData = []float64{
70, 76, 82, 88, 94, 190, 196, 202, 208, 214, 310,
316, 322, 328, 334, 430, 436, 442, 448, 454, 550, 556,
562, 568, 574, 670, 676, 682, 688, 694, 190, 212, 234,
256, 278, 630, 652, 674, 696, 718, 1070, 1092, 1114, 1136,
1158, 1510, 1532, 1554, 1576, 1598, 1950, 1972, 1994, 2016, 2038,
2390, 2412, 2434, 2456, 2478, 310, 348, 386, 424, 462, 1070,
1108, 1146, 1184, 1222, 1830, 1868, 1906, 1944, 1982, 2590, 2628,
2666, 2704, 2742, 3350, 3388, 3426, 3464, 3502, 4110, 4148, 4186,
4224, 4262, 430, 484, 538, 592, 646, 1510, 1564, 1618, 1672,
1726, 2590, 2644, 2698, 2752, 2806, 3670, 3724, 3778, 3832, 3886,
4750, 4804, 4858, 4912, 4966, 5830, 5884, 5938, 5992, 6046, 550,
620, 690, 760, 830, 1950, 2020, 2090, 2160, 2230, 3350, 3420,
3490, 3560, 3630, 4750, 4820, 4890, 4960, 5030, 6150, 6220, 6290,
6360, 6430, 7550, 7620, 7690, 7760, 7830, 670, 756, 842, 928,
1014, 2390, 2476, 2562, 2648, 2734, 4110, 4196, 4282, 4368, 4454,
5830, 5916, 6002, 6088, 6174, 7550, 7636, 7722, 7808, 7894, 9270,
9356, 9442, 9528, 9614,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// T-v
t.Log("3T⋅Vec")
b = New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4)))
R, err = Dot(A, b)
expectedShape = Shape{2, 3}
expectedData = []float64{
14, 38, 62,
86, 110, 134,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// v-T
t.Log("Vec⋅3T")
R2, err = Dot(b, B)
expectedShape = Shape{2, 3, 5}
expectedData = []float64{
70, 76, 82, 88, 94,
190, 196, 202, 208, 214,
310, 316, 322, 328, 334,
430, 436, 442, 448, 454,
550, 556, 562, 568, 574,
670, 676, 682, 688, 694,
}
assert.Nil(err)
assert.Equal(expectedData, R2.Data())
assert.Equal(expectedShape, R2.Shape())
// m-3T
t.Log("Mat⋅3T")
A = New(Of(Float64), WithShape(2, 4), WithBacking(Range(Float64, 0, 8)))
B = New(Of(Float64), WithShape(2, 4, 5), WithBacking(Range(Float64, 0, 40)))
R, err = Dot(A, B)
expectedShape = Shape{2, 2, 5}
expectedData = []float64{
70, 76, 82, 88, 94,
190, 196, 202, 208, 214,
190, 212, 234, 256, 278,
630, 652, 674, 696, 718,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// test reuse
// m-v with reuse
t.Log("Mat⋅Vec with reuse")
R = New(Of(Float64), WithShape(2))
R2, err = Dot(A, b, WithReuse(R))
expectedShape = Shape{2}
expectedData = []float64{14, 38}
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// 3T-vec with reuse
t.Logf("3T⋅vec with reuse")
R = New(Of(Float64), WithShape(6))
A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24)))
R2, err = Dot(A, b, WithReuse(R))
expectedShape = Shape{2, 3}
expectedData = []float64{
14, 38, 62,
86, 110, 134,
}
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(expectedData, R2.Data())
assert.Equal(expectedShape, R2.Shape())
// v-m
t.Log("vec⋅Mat with reuse")
R = New(Of(Float64), WithShape(2))
a = New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4)))
B = New(Of(Float64), WithShape(4, 2), WithBacking(Range(Float64, 0, 8)))
R2, err = Dot(a, B, WithReuse(R))
expectedShape = Shape{2}
expectedData = []float64{28, 34}
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// test incr
incrBack := make([]float64, 2)
copy(incrBack, expectedData)
incr = New(Of(Float64), WithBacking(incrBack), WithShape(2))
R, err = Dot(a, B, WithIncr(incr))
vecf64.Scale(expectedData, 2)
assert.Nil(err)
assert.Equal(incr, R)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// The Nearly Stupids
s = New(FromScalar(5.0))
s2 = New(FromScalar(10.0))
R, err = Dot(s, s2)
assert.Nil(err)
assert.True(R.IsScalar())
assert.Equal(float64(50), R.Data())
R.Zero()
R2, err = Dot(s, s2, WithReuse(R))
assert.Nil(err)
assert.True(R2.IsScalar())
assert.Equal(float64(50), R2.Data())
R, err = Dot(s, A)
expectedData = vecf64.Range(0, 24)
vecf64.Scale(expectedData, 5)
assert.Nil(err)
assert.Equal(A.Shape(), R.Shape())
assert.Equal(expectedData, R.Data())
R.Zero()
R2, err = Dot(s, A, WithReuse(R))
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(A.Shape(), R2.Shape())
assert.Equal(expectedData, R2.Data())
R, err = Dot(A, s)
assert.Nil(err)
assert.Equal(A.Shape(), R.Shape())
assert.Equal(expectedData, R.Data())
R.Zero()
R2, err = Dot(A, s, WithReuse(R))
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(A.Shape(), R2.Shape())
assert.Equal(expectedData, R2.Data())
incr = New(Of(Float64), WithShape(R2.Shape()...))
copy(incr.Data().([]float64), expectedData)
incr2 := incr.Clone().(*Dense) // backup a copy for the following test
vecf64.Scale(expectedData, 2)
R, err = Dot(A, s, WithIncr(incr))
assert.Nil(err)
assert.Equal(incr, R)
assert.Equal(A.Shape(), R.Shape())
assert.Equal(expectedData, R.Data())
incr = incr2
R, err = Dot(s, A, WithIncr(incr))
assert.Nil(err)
assert.Equal(incr, R)
assert.Equal(A.Shape(), R.Shape())
assert.Equal(expectedData, R.Data())
incr = New(Of(Float64), FromScalar(float64(50)))
R, err = Dot(s, s2, WithIncr(incr))
assert.Nil(err)
assert.Equal(R, incr)
assert.True(R.IsScalar())
assert.Equal(float64(100), R.Data())
/* HERE BE STUPIDS */
// different sizes of vectors
c = New(Of(Float64), WithShape(1, 100))
_, err = Dot(a, c)
assert.NotNil(err)
// vector mat, but with shape mismatch
B = New(Of(Float64), WithShape(2, 3), WithBacking(Range(Float64, 0, 6)))
_, err = Dot(b, B)
assert.NotNil(err)
// mat-mat but wrong reuse size
A = New(Of(Float64), WithShape(2, 2))
R = New(Of(Float64), WithShape(5, 10))
_, err = Dot(A, B, WithReuse(R))
assert.NotNil(err)
// mat-vec but wrong reuse size
b = New(Of(Float64), WithShape(2))
_, err = Dot(A, b, WithReuse(R))
assert.NotNil(err)
// T-T but misaligned shape
A = New(Of(Float64), WithShape(2, 3, 4))
B = New(Of(Float64), WithShape(4, 2, 3))
_, err = Dot(A, B)
assert.NotNil(err)
}
func TestOneDot(t *testing.T) {
assert := assert.New(t)
A := New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24)))
b := New(Of(Float64), WithShape(4), WithBacking(Range(Float64, 0, 4)))
R, err := Dot(A, b)
expectedShape := Shape{2, 3}
expectedData := []float64{
14, 38, 62,
86, 110, 134,
}
assert.Nil(err)
assert.Equal(expectedData, R.Data())
assert.Equal(expectedShape, R.Shape())
// 3T-vec with reuse
t.Logf("3T⋅vec with reuse")
R.Zero()
A = New(Of(Float64), WithShape(2, 3, 4), WithBacking(Range(Float64, 0, 24)))
R2, err := Dot(A, b, WithReuse(R))
expectedShape = Shape{2, 3}
expectedData = []float64{
14, 38, 62,
86, 110, 134,
}
assert.Nil(err)
assert.Equal(R, R2)
assert.Equal(expectedData, R2.Data())
assert.Equal(expectedShape, R2.Shape())
}
tensor-0.9.24/dense_mapreduce.go 0000664 0000000 0000000 00000001616 14265126151 0016601 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
// Apply applies a function to all the values in the tensor.
func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
var e Engine = t.e
if e == nil {
e = StdEng{}
}
if m, ok := e.(Mapper); ok {
return m.Map(fn, t, opts...)
}
return nil, errors.Errorf("Execution engine %T for %v not a mapper", e, t)
}
// Reduce applies a reduction function and reduces the values along the given axis.
func (t *Dense) Reduce(fn interface{}, axis int, defaultValue interface{}) (retVal *Dense, err error) {
var e Engine = t.e
if e == nil {
e = StdEng{}
}
if rd, ok := e.(Reducer); ok {
var val Tensor
if val, err = rd.Reduce(fn, t, axis, defaultValue); err != nil {
err = errors.Wrapf(err, opFail, "Dense.Reduce")
return
}
retVal = val.(*Dense)
return
}
return nil, errors.Errorf("Engine %v is not a Reducer", e)
}
tensor-0.9.24/dense_mask_filling.go 0000664 0000000 0000000 00000004561 14265126151 0017275 0 ustar 00root root 0000000 0000000 package tensor
import (
"unsafe"
)
// FillValue returns the value used to fill the invalid entries of a masked array
func (t *Dense) FillValue() interface{} {
switch t.Dtype() {
case Bool:
return true
case Int:
return int(999999)
case Int8:
return int8(99)
case Int16:
return int16(9999)
case Int32:
return int32(999999)
case Int64:
return int64(999999)
case Uint:
return uint(999999)
case Byte:
return byte(99)
case Uint8:
return uint8(99)
case Uint16:
return uint16(9999)
case Uint32:
return uint32(999999)
case Uint64:
return uint64(999999)
case Float32:
return float32(1.0e20)
case Float64:
return float64(1.0e20)
case Complex64:
return complex64(1.0e20 + 0i)
case Complex128:
return complex128(1.0e20 + 0i)
case String:
return `N/A`
case Uintptr:
return uintptr(0x999999)
case UnsafePointer:
return unsafe.Pointer(nil)
default:
return nil
}
}
// Filled returns a tensor with masked data replaced by default fill value,
// or by optional passed value
func (t *Dense) Filled(val ...interface{}) (interface{}, error) {
tc := t.Clone().(*Dense)
if !t.IsMasked() {
return tc, nil
}
fillval := t.FillValue()
if len(val) > 0 {
fillval = val[0]
}
switch {
case tc.IsScalar():
if tc.mask[0] {
tc.Set(0, fillval)
}
case tc.IsRowVec() || tc.IsColVec():
sliceList := t.FlatMaskedContiguous()
for i := range sliceList {
tt, err := tc.Slice(nil, sliceList[i])
if err != nil {
ts := tt.(*Dense)
ts.Memset(fillval)
}
}
default:
it := IteratorFromDense(tc)
for i, _, err := it.NextInvalid(); err == nil; i, _, err = it.NextInvalid() {
tc.Set(i, fillval)
}
}
return tc, nil
}
// FilledInplace replaces masked data with default fill value,
// or by optional passed value
func (t *Dense) FilledInplace(val ...interface{}) (interface{}, error) {
if !t.IsMasked() {
return t, nil
}
fillval := t.FillValue()
if len(val) > 0 {
fillval = val[0]
}
switch {
case t.IsScalar():
if t.mask[0] {
t.Set(0, fillval)
}
case t.IsRowVec() || t.IsColVec():
sliceList := t.FlatMaskedContiguous()
for i := range sliceList {
tt, err := t.Slice(nil, sliceList[i])
if err != nil {
ts := tt.(*Dense)
ts.Memset(fillval)
}
}
default:
it := IteratorFromDense(t)
for i, _, err := it.NextInvalid(); err == nil; i, _, err = it.NextInvalid() {
t.Set(i, fillval)
}
}
return t, nil
}
tensor-0.9.24/dense_mask_inspection.go 0000664 0000000 0000000 00000014367 14265126151 0020031 0 ustar 00root root 0000000 0000000 package tensor
type maskedReduceFn func(Tensor) interface{}
// MaskedReduce applies a reduction function of type maskedReduceFn to mask, and returns
// either an int, or another array
func MaskedReduce(t *Dense, retType Dtype, fn maskedReduceFn, axis ...int) interface{} {
if len(axis) == 0 || t.IsVector() {
return fn(t)
}
ax := axis[0]
if ax >= t.Dims() {
return -1
}
// create object to be used for slicing
slices := make([]Slice, t.Dims())
// calculate shape of tensor to be returned
slices[ax] = makeRS(0, 0)
tt, _ := t.Slice(slices...)
ts := tt.(*Dense)
retVal := NewDense(retType, ts.shape) //retVal is array to be returned
it := NewIterator(retVal.Info())
// iterate through retVal
slices[ax] = makeRS(0, t.shape[ax])
for _, err := it.Next(); err == nil; _, err = it.Next() {
coord := it.Coord()
k := 0
for d := range slices {
if d != ax {
slices[d] = makeRS(coord[k], coord[k]+1)
k++
} else {
slices[d] = nil
}
}
tt, _ = t.Slice(slices...)
ts = tt.(*Dense)
retVal.SetAt(fn(ts), coord...)
}
return retVal
}
// MaskedAny returns True if any mask elements evaluate to True.
// If object is not masked, returns false
// !!! Not the same as numpy's, which looks at data elements and not at mask
// Instead, equivalent to numpy ma.getmask(t).any(axis)
func (t *Dense) MaskedAny(axis ...int) interface{} {
return MaskedReduce(t, Bool, doMaskAny, axis...)
}
// MaskedAll returns True if all mask elements evaluate to True.
// If object is not masked, returns false
// !!! Not the same as numpy's, which looks at data elements and not at mask
// Instead, equivalent to numpy ma.getmask(t).all(axis)
func (t *Dense) MaskedAll(axis ...int) interface{} {
return MaskedReduce(t, Bool, doMaskAll, axis...)
}
// MaskedCount counts the masked elements of the array (optionally along the given axis)
// returns -1 if axis out of bounds
func (t *Dense) MaskedCount(axis ...int) interface{} {
return MaskedReduce(t, Int, doMaskCt, axis...)
}
// NonMaskedCount counts the non-masked elements of the array (optionally along the given axis)
// returns -1 if axis out of bounds
// MaskedCount counts the masked elements of the array (optionally along the given axis)
// returns -1 if axis out of bounds
func (t *Dense) NonMaskedCount(axis ...int) interface{} {
return MaskedReduce(t, Int, doNonMaskCt, axis...)
}
func doMaskAll(T Tensor) interface{} {
switch t := T.(type) {
case *Dense:
if !t.IsMasked() {
return false
}
m := t.mask
if len(t.mask) == t.Size() {
for _, v := range m {
if !v {
return false
}
}
} else {
it := IteratorFromDense(t)
i, _, _ := it.NextValid()
if i != -1 {
return false
}
}
return true
default:
panic("Incompatible type")
}
}
func doMaskAny(T Tensor) interface{} {
switch t := T.(type) {
case *Dense:
if !t.IsMasked() {
return false
}
m := t.mask
if len(t.mask) == t.Size() {
for _, v := range m {
if v {
return true
}
}
} else {
it := IteratorFromDense(t)
i, _, _ := it.NextInvalid()
if i != -1 {
return true
}
}
return false
default:
panic("Incompatible type")
}
}
func doMaskCt(T Tensor) interface{} {
switch t := T.(type) {
case *Dense:
// non masked case
if !t.IsMasked() {
return 0
}
count := 0
m := t.mask
if len(t.mask) == t.Size() {
for _, v := range m {
if v {
count++
}
}
} else {
it := IteratorFromDense(t)
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
count++
}
}
return count
default:
panic("Incompatible type")
}
}
func doNonMaskCt(T Tensor) interface{} {
switch t := T.(type) {
case *Dense:
if !t.IsMasked() {
return t.Size()
}
return t.Size() - doMaskCt(t).(int)
default:
panic("Incompatible type")
}
}
/* -----------
************ Finding masked data
----------*/
// FlatNotMaskedContiguous is used to find contiguous unmasked data in a masked array.
// Applies to a flattened version of the array.
// Returns:A sorted sequence of slices (start index, end index).
func (t *Dense) FlatNotMaskedContiguous() []Slice {
sliceList := make([]Slice, 0, 4)
it := IteratorFromDense(t)
for start, _, err := it.NextValid(); err == nil; start, _, err = it.NextValid() {
end, _, _ := it.NextInvalid()
if end == -1 {
end = t.Size()
}
sliceList = append(sliceList, makeRS(start, end))
}
return sliceList
}
// FlatMaskedContiguous is used to find contiguous masked data in a masked array.
// Applies to a flattened version of the array.
// Returns:A sorted sequence of slices (start index, end index).
func (t *Dense) FlatMaskedContiguous() []Slice {
sliceList := make([]Slice, 0, 4)
it := IteratorFromDense(t)
for start, _, err := it.NextInvalid(); err == nil; start, _, err = it.NextInvalid() {
end, _, _ := it.NextValid()
if end == -1 {
end = t.Size()
}
sliceList = append(sliceList, makeRS(start, end))
}
return sliceList
}
// FlatNotMaskedEdges is used to find the indices of the first and last unmasked values
// Applies to a flattened version of the array.
// Returns: A pair of ints. -1 if all values are masked.
func (t *Dense) FlatNotMaskedEdges() (int, int) {
if !t.IsMasked() {
return 0, t.Size() - 1
}
var start, end int
it := IteratorFromDense(t)
it.SetForward()
start, _, err := it.NextValid()
if err != nil {
return -1, -1
}
it.SetReverse()
end, _, _ = it.NextValid()
return start, end
}
// FlatMaskedEdges is used to find the indices of the first and last masked values
// Applies to a flattened version of the array.
// Returns: A pair of ints. -1 if all values are unmasked.
func (t *Dense) FlatMaskedEdges() (int, int) {
if !t.IsMasked() {
return 0, t.Size() - 1
}
var start, end int
it := IteratorFromDense(t)
it.SetForward()
start, _, err := it.NextInvalid()
if err != nil {
return -1, -1
}
it.SetReverse()
end, _, _ = it.NextInvalid()
return start, end
}
// ClumpMasked returns a list of slices corresponding to the masked clumps of a 1-D array
// Added to match numpy function names
func (t *Dense) ClumpMasked() []Slice {
return t.FlatMaskedContiguous()
}
// ClumpUnmasked returns a list of slices corresponding to the unmasked clumps of a 1-D array
// Added to match numpy function names
func (t *Dense) ClumpUnmasked() []Slice {
return t.FlatNotMaskedContiguous()
}
tensor-0.9.24/dense_mask_inspection_test.go 0000664 0000000 0000000 00000011657 14265126151 0021067 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMaskedInspection(t *testing.T) {
assert := assert.New(t)
var retT *Dense
//vector case
T := New(Of(Bool), WithShape(1, 12))
T.ResetMask(false)
assert.False(T.MaskedAny().(bool))
for i := 0; i < 12; i += 2 {
T.mask[i] = true
}
assert.True(T.MaskedAny().(bool))
assert.True(T.MaskedAny(0).(bool))
assert.False(T.MaskedAll().(bool))
assert.False(T.MaskedAll(0).(bool))
assert.Equal(6, T.MaskedCount())
assert.Equal(6, T.MaskedCount(0))
assert.Equal(6, T.NonMaskedCount())
assert.Equal(6, T.NonMaskedCount(0))
//contiguous mask case
/*equivalent python code
---------
import numpy.ma as ma
a = ma.arange(12).reshape((2, 3, 2))
a[0,0,0]=ma.masked
a[0,2,0]=ma.masked
print(ma.getmask(a).all())
print(ma.getmask(a).any())
print(ma.count_masked(a))
print(ma.count(a))
print(ma.getmask(a).all(0))
print(ma.getmask(a).any(0))
print(ma.count_masked(a,0))
print(ma.count(a,0))
print(ma.getmask(a).all(1))
print(ma.getmask(a).any(1))
print(ma.count_masked(a,1))
print(ma.count(a,1))
print(ma.getmask(a).all(2))
print(ma.getmask(a).any(2))
print(ma.count_masked(a,2))
print(ma.count(a,2))
-----------
*/
T = New(Of(Bool), WithShape(2, 3, 2))
T.ResetMask(false)
for i := 0; i < 2; i += 2 {
for j := 0; j < 3; j += 2 {
for k := 0; k < 2; k += 2 {
a, b, c := T.strides[0], T.strides[1], T.strides[2]
T.mask[i*a+b*j+c*k] = true
}
}
}
assert.Equal([]bool{true, false, false, false, true, false,
false, false, false, false, false, false}, T.mask)
assert.Equal(false, T.MaskedAll())
assert.Equal(true, T.MaskedAny())
assert.Equal(2, T.MaskedCount())
assert.Equal(10, T.NonMaskedCount())
retT = T.MaskedAll(0).(*Dense)
assert.Equal([]int{3, 2}, []int(retT.shape))
assert.Equal([]bool{false, false, false, false, false, false}, retT.Bools())
retT = T.MaskedAny(0).(*Dense)
assert.Equal([]int{3, 2}, []int(retT.shape))
assert.Equal([]bool{true, false, false, false, true, false}, retT.Bools())
retT = T.MaskedCount(0).(*Dense)
assert.Equal([]int{3, 2}, []int(retT.shape))
assert.Equal([]int{1, 0, 0, 0, 1, 0}, retT.Ints())
retT = T.NonMaskedCount(0).(*Dense)
assert.Equal([]int{1, 2, 2, 2, 1, 2}, retT.Ints())
retT = T.MaskedAll(1).(*Dense)
assert.Equal([]int{2, 2}, []int(retT.shape))
assert.Equal([]bool{false, false, false, false}, retT.Bools())
retT = T.MaskedAny(1).(*Dense)
assert.Equal([]int{2, 2}, []int(retT.shape))
assert.Equal([]bool{true, false, false, false}, retT.Bools())
retT = T.MaskedCount(1).(*Dense)
assert.Equal([]int{2, 2}, []int(retT.shape))
assert.Equal([]int{2, 0, 0, 0}, retT.Ints())
retT = T.NonMaskedCount(1).(*Dense)
assert.Equal([]int{1, 3, 3, 3}, retT.Ints())
retT = T.MaskedAll(2).(*Dense)
assert.Equal([]int{2, 3}, []int(retT.shape))
assert.Equal([]bool{false, false, false, false, false, false}, retT.Bools())
retT = T.MaskedAny(2).(*Dense)
assert.Equal([]int{2, 3}, []int(retT.shape))
assert.Equal([]bool{true, false, true, false, false, false}, retT.Bools())
retT = T.MaskedCount(2).(*Dense)
assert.Equal([]int{2, 3}, []int(retT.shape))
assert.Equal([]int{1, 0, 1, 0, 0, 0}, retT.Ints())
retT = T.NonMaskedCount(2).(*Dense)
assert.Equal([]int{1, 2, 1, 2, 2, 2}, retT.Ints())
}
func TestMaskedFindContiguous(t *testing.T) {
assert := assert.New(t)
T := NewDense(Int, []int{1, 100})
T.ResetMask(false)
retSL := T.FlatNotMaskedContiguous()
assert.Equal(1, len(retSL))
assert.Equal(rs{0, 100, 1}, retSL[0].(rs))
// test ability to find unmasked regions
sliceList := make([]Slice, 0, 4)
sliceList = append(sliceList, makeRS(3, 9), makeRS(14, 27), makeRS(51, 72), makeRS(93, 100))
T.ResetMask(true)
for i := range sliceList {
tt, _ := T.Slice(nil, sliceList[i])
ts := tt.(*Dense)
ts.ResetMask(false)
}
retSL = T.FlatNotMaskedContiguous()
assert.Equal(sliceList, retSL)
retSL = T.ClumpUnmasked()
assert.Equal(sliceList, retSL)
// test ability to find masked regions
T.ResetMask(false)
for i := range sliceList {
tt, _ := T.Slice(nil, sliceList[i])
ts := tt.(*Dense)
ts.ResetMask(true)
}
retSL = T.FlatMaskedContiguous()
assert.Equal(sliceList, retSL)
retSL = T.ClumpMasked()
assert.Equal(sliceList, retSL)
}
func TestMaskedFindEdges(t *testing.T) {
assert := assert.New(t)
T := NewDense(Int, []int{1, 100})
sliceList := make([]Slice, 0, 4)
sliceList = append(sliceList, makeRS(0, 9), makeRS(14, 27), makeRS(51, 72), makeRS(93, 100))
// test ability to find unmasked edges
T.ResetMask(false)
for i := range sliceList {
tt, _ := T.Slice(nil, sliceList[i])
ts := tt.(*Dense)
ts.ResetMask(true)
}
start, end := T.FlatNotMaskedEdges()
assert.Equal(9, start)
assert.Equal(92, end)
// test ability to find masked edges
T.ResetMask(true)
for i := range sliceList {
tt, _ := T.Slice(nil, sliceList[i])
ts := tt.(*Dense)
ts.ResetMask(false)
}
start, end = T.FlatMaskedEdges()
assert.Equal(9, start)
assert.Equal(92, end)
}
tensor-0.9.24/dense_maskcmp_methods.go 0000664 0000000 0000000 00000100351 14265126151 0020006 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"math"
"reflect"
"github.com/pkg/errors"
)
/* MaskedEqual */
// MaskedEqual sets the mask to true where the corresponding data is equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
}
return nil
}
/* MaskedNotEqual */
// MaskedNotEqual sets the mask to true where the corresponding data is not equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedNotEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
}
return nil
}
/* MaskedValues */
// MaskedValues sets the mask to true where the corresponding data is equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedValues(val1 interface{}, val2 interface{}, val3 ...interface{}) (err error) {
if !isFloat(t.t) {
err = errors.Errorf("Can only do MaskedValues with floating point types")
return
}
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
delta := float64(1.0e-8)
if len(val3) > 0 {
delta = float64(val3[0].(float32)) + float64(y)*math.Abs(float64(x))
}
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (math.Abs(float64(a-x)) <= delta)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
delta := float64(1.0e-8)
if len(val3) > 0 {
delta = float64(val3[0].(float64)) + float64(y)*math.Abs(float64(x))
}
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (math.Abs(float64(a-x)) <= delta)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta)
}
}
}
return nil
}
/* MaskedGreater */
// MaskedGreater sets the mask to true where the corresponding data is greater than val
// Any values must be the same type as the tensor
func (t *Dense) MaskedGreater(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
}
return nil
}
/* MaskedGreaterEqual */
// MaskedGreaterEqual sets the mask to true where the corresponding data is greater than or equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedGreaterEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
}
return nil
}
/* MaskedLess */
// MaskedLess sets the mask to true where the corresponding data is less than val
// Any values must be the same type as the tensor
func (t *Dense) MaskedLess(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
}
return nil
}
/* MaskedLessEqual */
// MaskedLessEqual sets the mask to true where the corresponding data is less than or equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedLessEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
}
return nil
}
/* MaskedInside */
// MaskedInside sets the mask to true where the corresponding data is inside range of val
// Any values must be the same type as the tensor
func (t *Dense) MaskedInside(val1 interface{}, val2 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
y := val2.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
y := val2.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
y := val2.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
y := val2.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
y := val2.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
y := val2.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
y := val2.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
y := val2.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
y := val2.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
y := val2.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
y := val2.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
}
return nil
}
/* MaskedOutside */
// MaskedOutside sets the mask to true where the corresponding data is outside range of val
// Any values must be the same type as the tensor
func (t *Dense) MaskedOutside(val1 interface{}, val2 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
y := val2.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
y := val2.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
y := val2.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
y := val2.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
y := val2.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
y := val2.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
y := val2.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
y := val2.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
y := val2.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
y := val2.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
y := val2.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
}
return nil
}
tensor-0.9.24/dense_maskcmp_methods_test.go 0000664 0000000 0000000 00000347071 14265126151 0021061 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
/* MaskedEqual */
func TestDense_MaskedEqual_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedEqual_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedNotEqual */
func TestDense_MaskedNotEqual_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedNotEqual_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedValues */
func TestDense_MaskedValues_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedValues_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedGreater */
func TestDense_MaskedGreater_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreater_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedGreaterEqual */
func TestDense_MaskedGreaterEqual_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedGreaterEqual_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedLess */
func TestDense_MaskedLess_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLess_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedLessEqual */
func TestDense_MaskedLessEqual_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedLessEqual_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedInside */
func TestDense_MaskedInside_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedInside_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
/* MaskedOutside */
func TestDense_MaskedOutside_I(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Ints()
for i := range data {
data[i] = int(i)
}
T.MaskedEqual(int(0))
assert.True(T.IsMasked())
T.MaskedEqual(int(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int(1), int(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int(1), int(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_I8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int8s()
for i := range data {
data[i] = int8(i)
}
T.MaskedEqual(int8(0))
assert.True(T.IsMasked())
T.MaskedEqual(int8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int8(1), int8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int8(1), int8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_I16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int16s()
for i := range data {
data[i] = int16(i)
}
T.MaskedEqual(int16(0))
assert.True(T.IsMasked())
T.MaskedEqual(int16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int16(1), int16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int16(1), int16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_I32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int32s()
for i := range data {
data[i] = int32(i)
}
T.MaskedEqual(int32(0))
assert.True(T.IsMasked())
T.MaskedEqual(int32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int32(1), int32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int32(1), int32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_I64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Int64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Int64s()
for i := range data {
data[i] = int64(i)
}
T.MaskedEqual(int64(0))
assert.True(T.IsMasked())
T.MaskedEqual(int64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(int64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(int64(1), int64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(int64(1), int64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(int64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_U(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uints()
for i := range data {
data[i] = uint(i)
}
T.MaskedEqual(uint(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint(1), uint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint(1), uint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_U8(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint8), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint8s()
for i := range data {
data[i] = uint8(i)
}
T.MaskedEqual(uint8(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint8(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint8(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint8(1), uint8(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint8(1), uint8(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint8(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_U16(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint16), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint16s()
for i := range data {
data[i] = uint16(i)
}
T.MaskedEqual(uint16(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint16(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint16(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint16(1), uint16(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint16(1), uint16(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint16(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_U32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint32s()
for i := range data {
data[i] = uint32(i)
}
T.MaskedEqual(uint32(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint32(1), uint32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint32(1), uint32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_U64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Uint64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Uint64s()
for i := range data {
data[i] = uint64(i)
}
T.MaskedEqual(uint64(0))
assert.True(T.IsMasked())
T.MaskedEqual(uint64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(uint64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(uint64(1), uint64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(uint64(1), uint64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(uint64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_F32(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float32), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float32s()
for i := range data {
data[i] = float32(i)
}
T.MaskedEqual(float32(0))
assert.True(T.IsMasked())
T.MaskedEqual(float32(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float32(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float32(1), float32(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float32(1), float32(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float32(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_F64(t *testing.T) {
assert := assert.New(t)
T := New(Of(Float64), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Float64s()
for i := range data {
data[i] = float64(i)
}
T.MaskedEqual(float64(0))
assert.True(T.IsMasked())
T.MaskedEqual(float64(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(float64(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(float64(1), float64(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(float64(1), float64(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(float64(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
func TestDense_MaskedOutside_Str(t *testing.T) {
assert := assert.New(t)
T := New(Of(String), WithShape(2, 3, 4, 5))
assert.False(T.IsMasked())
data := T.Strings()
for i := range data {
data[i] = fmt.Sprint(i)
}
T.MaskedEqual(fmt.Sprint(0))
assert.True(T.IsMasked())
T.MaskedEqual(fmt.Sprint(1))
assert.True(T.mask[0] && T.mask[1])
T.MaskedNotEqual(fmt.Sprint(2))
assert.False(T.mask[2] && !(T.mask[0]))
T.ResetMask()
T.MaskedInside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(!T.mask[0] && !T.mask[23] && T.mask[1] && T.mask[22])
T.ResetMask()
T.MaskedOutside(fmt.Sprint(1), fmt.Sprint(22))
assert.True(T.mask[0] && T.mask[23] && !T.mask[1] && !T.mask[22])
T.ResetMask()
for i := 0; i < 5; i++ {
T.MaskedEqual(fmt.Sprint(i * 10))
}
it := IteratorFromDense(T)
j := 0
for _, err := it.Next(); err == nil; _, err = it.Next() {
j++
}
it.Reset()
assert.Equal(120, j)
j = 0
for _, _, err := it.NextValid(); err == nil; _, _, err = it.NextValid() {
j++
}
it.Reset()
assert.Equal(115, j)
j = 0
for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() {
j++
}
it.Reset()
assert.Equal(5, j)
}
tensor-0.9.24/dense_matop.go 0000664 0000000 0000000 00000022410 14265126151 0015747 0 ustar 00root root 0000000 0000000 package tensor
import (
"github.com/pkg/errors"
)
// T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides
// Usually this is more than enough, as BLAS will handle the rest of the transpose
func (t *Dense) T(axes ...int) (err error) {
var transform AP
if transform, axes, err = t.AP.T(axes...); err != nil {
return handleNoOp(err)
}
// is there any old transposes that need to be done first?
// this is important, because any old transposes for dim >=3 are merely permutations of the strides
if !t.old.IsZero() {
if t.IsVector() {
// the transform that was calculated was a waste of time - return it to the pool then untranspose
t.UT()
return
}
// check if the current axes are just a reverse of the previous transpose's
isReversed := true
for i, s := range t.oshape() {
if transform.Shape()[i] != s {
isReversed = false
break
}
}
// if it is reversed, well, we just restore the backed up one
if isReversed {
t.UT()
return
}
// cool beans. No funny reversals. We'd have to actually do transpose then
t.Transpose()
}
// swap out the old and the new
t.old = t.AP
t.transposeWith = axes
t.AP = transform
return nil
}
// UT is a quick way to untranspose a currently transposed *Dense
// The reason for having this is quite simply illustrated by this problem:
// T = NewTensor(WithShape(2,3,4))
// T.T(1,2,0)
//
// To untranspose that, we'd need to apply a transpose of (2,0,1).
// This means having to keep track and calculate the transposes.
// Instead, here's a helpful convenience function to instantly untranspose any previous transposes.
//
// Nothing will happen if there was no previous transpose
func (t *Dense) UT() {
if !t.old.IsZero() {
ReturnInts(t.transposeWith)
t.AP = t.old
t.old.zeroOnly()
t.transposeWith = nil
}
}
// SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved.
func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) {
var transform AP
if transform, axes, err = t.AP.T(axes...); err != nil {
if err = handleNoOp(err); err != nil {
return
}
}
retVal = recycledDense(t.t, Shape{t.len()}, WithEngine(t.e))
copyDense(retVal, t)
retVal.e = t.e
retVal.oe = t.oe
retVal.AP = transform
t.AP.CloneTo(&retVal.old)
retVal.transposeWith = axes
return
}
// At returns the value at the given coordinate
func (t *Dense) At(coords ...int) (interface{}, error) {
if !t.IsNativelyAccessible() {
return nil, errors.Errorf(inaccessibleData, t)
}
if len(coords) != t.Dims() {
return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return nil, errors.Wrap(err, "At()")
}
return t.Get(at), nil
}
// MaskAt returns the value of the mask at a given coordinate
// returns false (valid) if not tensor is not masked
func (t *Dense) MaskAt(coords ...int) (bool, error) {
if !t.IsMasked() {
return false, nil
}
if !t.IsNativelyAccessible() {
return false, errors.Errorf(inaccessibleData, t)
}
if len(coords) != t.Dims() {
return true, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return true, errors.Wrap(err, "MaskAt()")
}
return t.mask[at], nil
}
// SetAt sets the value at the given coordinate
func (t *Dense) SetAt(v interface{}, coords ...int) error {
if !t.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, t)
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.Set(at, v)
return nil
}
// SetMaskAtDataIndex set the value of the mask at a given index
func (t *Dense) SetMaskAtIndex(v bool, i int) error {
if !t.IsMasked() {
return nil
}
t.mask[i] = v
return nil
}
// SetMaskAt sets the mask value at the given coordinate
func (t *Dense) SetMaskAt(v bool, coords ...int) error {
if !t.IsMasked() {
return nil
}
if !t.IsNativelyAccessible() {
return errors.Errorf(inaccessibleData, t)
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.mask[at] = v
return nil
}
// CopyTo copies the underlying data to the destination *Dense. The original data is untouched.
// Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example:
// T = NewTensor(WithShape(6))
// T2 = NewTensor(WithShape(2,3))
// err = T.CopyTo(T2) // err == nil
//
// The only time that this will fail is if the underlying sizes are different
func (t *Dense) CopyTo(other *Dense) error {
if other == t {
return nil // nothing to copy to. Maybe return NoOpErr?
}
if other.Size() != t.Size() {
return errors.Errorf(sizeMismatch, t.Size(), other.Size())
}
// easy peasy lemon squeezy
if t.viewOf == 0 && other.viewOf == 0 {
copyDense(other, t)
return nil
}
// TODO: use copyDenseIter
return errors.Errorf(methodNYI, "CopyTo", "views")
}
// Narrow narrows the tensor.
func (t *Dense) Narrow(dim, start, length int) (View, error) {
dim = resolveAxis(dim, t.Dims())
slices := make([]Slice, MinInt(dim+1, t.Dims()))
slices[dim] = S(start, start+length, 1)
return t.Slice(slices...)
}
// Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense.
//
// Given:
// T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4)))
// V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1]
//
// Any modification to the values in V, will be reflected in T as well.
//
// The method treats as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax
func (t *Dense) Slice(slices ...Slice) (retVal View, err error) {
var newAP AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view := borrowDense()
view.t = t.t
view.e = t.e
view.oe = t.oe
view.flag = t.flag
view.AP = newAP
view.setParentTensor(t)
t.sliceInto(ndStart, ndEnd, &view.array)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// SliceInto is a convenience method. It does NOT copy the values - it simply updates the AP of the view.
// The underlying data is the same.
// This method will override ALL the metadata in view.
func (t *Dense) SliceInto(view *Dense, slices ...Slice) (retVal View, err error) {
var newAP AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view.AP.zero()
view.t = t.t
view.e = t.e
view.oe = t.oe
view.flag = t.flag
view.AP = newAP
view.setParentTensor(t)
t.sliceInto(ndStart, ndEnd, &view.array)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// RollAxis rolls the axis backwards until it lies in the given position.
//
// This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt
//
// As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj)
func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {
dims := t.Dims()
if !(axis >= 0 && axis < dims) {
err = errors.Errorf(invalidAxis, axis, dims)
return
}
if !(start >= 0 && start <= dims) {
err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong")
return
}
if axis < start {
start--
}
if axis == start {
retVal = t
return
}
axes := BorrowInts(dims)
defer ReturnInts(axes)
for i := 0; i < dims; i++ {
axes[i] = i
}
copy(axes[axis:], axes[axis+1:])
copy(axes[start+1:], axes[start:])
axes[start] = axis
if safe {
return t.SafeT(axes...)
}
err = t.T(axes...)
retVal = t
return
}
/* Private Methods */
// returns the new index given the old index
func (t *Dense) transposeIndex(i int, transposePat, strides []int) int {
oldCoord, err := Itol(i, t.oshape(), t.ostrides())
if err != nil {
err = errors.Wrapf(err, "transposeIndex ItoL failure. i %d original shape %v. original strides %v", i, t.oshape(), t.ostrides())
panic(err)
}
/*
coordss, _ := Permute(transposePat, oldCoord)
coords := coordss[0]
expShape := t.Shape()
index, _ := Ltoi(expShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range transposePat {
index += oldCoord[axis] * strides[i]
}
return index
}
// at returns the index at which the coordinate is referring to.
// This function encapsulates the addressing of elements in a contiguous block.
// For a 2D ndarray, ndarray.at(i,j) is
// at = ndarray.strides[0]*i + ndarray.strides[1]*j
// This is of course, extensible to any number of dimensions.
func (t *Dense) at(coords ...int) (at int, err error) {
return Ltoi(t.Shape(), t.Strides(), coords...)
}
// maskat returns the mask index at which the coordinate is referring to.
func (t *Dense) maskAt(coords ...int) (at int, err error) {
//TODO: Add check for non-masked tensor
return t.at(coords...)
}
tensor-0.9.24/dense_matop_memmove.go 0000664 0000000 0000000 00000007340 14265126151 0017501 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
// This file contains code pertaining to tensor operations that actually move memory
// Transpose() actually transposes the data.
// This is a generalized version of the inplace matrix transposition algorithm from Wikipedia:
// https://en.wikipedia.org/wiki/In-place_matrix_transposition
func (t *Dense) Transpose() error {
// if there is no oldinfo, that means the current info is the latest, and not the transpose
if t.old.IsZero() {
return nil
}
if t.IsScalar() {
return nil // cannot transpose scalars - no data movement
}
defer func() {
t.old.zero()
t.transposeWith = nil
}()
expShape := t.Shape()
// important! because the strides would have changed once the underlying data changed
var expStrides []int
if t.AP.o.IsColMajor() {
expStrides = expShape.CalcStridesColMajor()
} else {
expStrides = expShape.CalcStrides()
}
defer ReturnInts(expStrides)
defer func() {
copy(t.AP.strides, expStrides) // dimensions do not change, so it's actually safe to do this
t.sanity()
}()
if t.IsVector() {
// no data movement
return nil
}
// actually move data
var e Engine = t.e
transposer, ok := e.(Transposer)
if !ok {
return errors.Errorf("Engine does not support Transpose()")
}
return transposer.Transpose(t, expStrides)
}
// Repeat is like Numpy's repeat. It repeats the elements of an array.
// The repeats param defines how many times each element in the axis is repeated.
// Just like NumPy, the repeats param is broadcasted to fit the size of the given axis.
func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) {
e := t.Engine()
if rp, ok := e.(Repeater); ok {
return rp.Repeat(t, axis, repeats...)
}
return nil, errors.New("Engine does not support Repeat")
}
// Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function.
func (t *Dense) Concat(axis int, Ts ...*Dense) (retVal *Dense, err error) {
e := t.Engine()
if c, ok := e.(Concater); ok {
var ret Tensor
others := densesToTensors(Ts)
if ret, err = c.Concat(t, axis, others...); err != nil {
return nil, errors.Wrapf(err, opFail, "Concat")
}
return ret.(*Dense), nil
}
return nil, errors.New("Engine does not support Concat")
}
// Hstack stacks other tensors columnwise (horizontal stacking)
func (t *Dense) Hstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 1D
if t.Dims() == 0 {
return nil, errors.Errorf(atleastDims, 1)
}
for _, d := range others {
if d.Dims() < 1 {
return nil, errors.Errorf(atleastDims, 1)
}
}
if t.Dims() == 1 {
return t.Concat(0, others...)
}
return t.Concat(1, others...)
}
// Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions
func (t *Dense) Vstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 2D
if t.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
for _, d := range others {
if d.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
}
return t.Concat(0, others...)
}
// Stack stacks the other tensors along the axis specified. It is like Numpy's stack function.
func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) {
var ret DenseTensor
var ok bool
if ret, err = t.stackDense(axis, densesToDenseTensors(others)...); err != nil {
return nil, err
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf("Return not *Dense")
}
return
}
func (t *Dense) stackDense(axis int, others ...DenseTensor) (retVal DenseTensor, err error) {
if ds, ok := t.Engine().(DenseStacker); ok {
return ds.StackDense(t, axis, others...)
}
return nil, errors.Errorf("Engine does not support DenseStacker")
}
tensor-0.9.24/dense_matop_test.go 0000664 0000000 0000000 00000122151 14265126151 0017011 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"gorgonia.org/vecf64"
)
func cloneArray(a interface{}) interface{} {
switch at := a.(type) {
case []float64:
retVal := make([]float64, len(at))
copy(retVal, at)
return retVal
case []float32:
retVal := make([]float32, len(at))
copy(retVal, at)
return retVal
case []int:
retVal := make([]int, len(at))
copy(retVal, at)
return retVal
case []int64:
retVal := make([]int64, len(at))
copy(retVal, at)
return retVal
case []int32:
retVal := make([]int32, len(at))
copy(retVal, at)
return retVal
case []byte:
retVal := make([]byte, len(at))
copy(retVal, at)
return retVal
case []bool:
retVal := make([]bool, len(at))
copy(retVal, at)
return retVal
}
return nil
}
func castToDt(val float64, dt Dtype) interface{} {
switch dt {
case Bool:
return false
case Int:
return int(val)
case Int8:
return int8(val)
case Int16:
return int16(val)
case Int32:
return int32(val)
case Int64:
return int64(val)
case Uint:
return uint(val)
case Uint8:
return uint8(val)
case Uint16:
return uint16(val)
case Uint32:
return uint32(val)
case Uint64:
return uint64(val)
case Float32:
return float32(val)
case Float64:
return float64(val)
default:
return 0
}
}
var atTests = []struct {
data interface{}
shape Shape
coord []int
correct interface{}
err bool
}{
// matrix
{[]float64{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{0, 1}, float64(1), false},
{[]float32{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{1, 1}, float32(4), false},
{[]float64{0, 1, 2, 3, 4, 5}, Shape{2, 3}, []int{1, 2, 3}, nil, true},
// 3-tensor
{[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{2, 3, 4}, []int{1, 1, 1}, 17, false},
{[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{2, 3, 4}, []int{1, 2, 3}, int64(23), false},
{[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{2, 3, 4}, []int{0, 3, 2}, 23, true},
}
func TestDense_At(t *testing.T) {
for i, ats := range atTests {
T := New(WithShape(ats.shape...), WithBacking(ats.data))
got, err := T.At(ats.coord...)
if checkErr(t, ats.err, err, "At", i) {
continue
}
if got != ats.correct {
t.Errorf("Expected %v. Got %v", ats.correct, got)
}
}
}
func Test_transposeIndex(t *testing.T) {
a := []byte{0, 1, 2, 3}
T := New(WithShape(2, 2), WithBacking(a))
correct := []int{0, 2, 1, 3}
for i, v := range correct {
got := T.transposeIndex(i, []int{1, 0}, []int{2, 1})
if v != got {
t.Errorf("transposeIndex error. Expected %v. Got %v", v, got)
}
}
}
var transposeTests = []struct {
name string
shape Shape
transposeWith []int
data interface{}
correctShape Shape
correctStrides []int // after .T()
correctStrides2 []int // after .Transpose()
correctData interface{}
}{
{"c.T()", Shape{4, 1}, nil, []float64{0, 1, 2, 3},
Shape{1, 4}, []int{1, 1}, []int{4, 1}, []float64{0, 1, 2, 3}},
{"r.T()", Shape{1, 4}, nil, []float32{0, 1, 2, 3},
Shape{4, 1}, []int{1, 1}, []int{1, 1}, []float32{0, 1, 2, 3}},
{"v.T()", Shape{4}, nil, []int{0, 1, 2, 3},
Shape{4}, []int{1}, []int{1}, []int{0, 1, 2, 3}},
{"M.T()", Shape{2, 3}, nil, []int64{0, 1, 2, 3, 4, 5},
Shape{3, 2}, []int{1, 3}, []int{2, 1}, []int64{0, 3, 1, 4, 2, 5}},
{"M.T(0,1) (NOOP)", Shape{2, 3}, []int{0, 1}, []int32{0, 1, 2, 3, 4, 5},
Shape{2, 3}, []int{3, 1}, []int{3, 1}, []int32{0, 1, 2, 3, 4, 5}},
{"3T.T()", Shape{2, 3, 4}, nil,
[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1},
[]byte{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}},
{"3T.T(2, 1, 0) (Same as .T())", Shape{2, 3, 4}, []int{2, 1, 0},
[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1},
[]int{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}},
{"3T.T(2, 1, 0) (Same as .T())", Shape{2, 3, 4}, []int{2, 1, 0},
[]int16{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{4, 3, 2}, []int{1, 4, 12}, []int{6, 2, 1},
[]int16{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}},
{"3T.T(0, 2, 1)", Shape{2, 3, 4}, []int{0, 2, 1},
[]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{2, 4, 3}, []int{12, 1, 4}, []int{12, 3, 1},
[]int32{0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}},
{"3T.T{1, 0, 2)", Shape{2, 3, 4}, []int{1, 0, 2},
[]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{3, 2, 4}, []int{4, 12, 1}, []int{8, 4, 1},
[]float64{0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}},
{"3T.T{1, 2, 0)", Shape{2, 3, 4}, []int{1, 2, 0},
[]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{3, 4, 2}, []int{4, 1, 12}, []int{8, 2, 1},
[]float64{0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}},
{"3T.T{2, 0, 1)", Shape{2, 3, 4}, []int{2, 0, 1},
[]float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
Shape{4, 2, 3}, []int{1, 12, 4}, []int{6, 3, 1},
[]float32{0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}},
{"3T.T{0, 1, 2} (NOOP)", Shape{2, 3, 4}, []int{0, 1, 2},
[]bool{true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false},
Shape{2, 3, 4}, []int{12, 4, 1}, []int{12, 4, 1},
[]bool{true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}},
{"M[2,2].T for bools, just for completeness sake", Shape{2, 2}, nil,
[]bool{true, true, false, false},
Shape{2, 2}, []int{1, 2}, []int{2, 1},
[]bool{true, false, true, false},
},
{"M[2,2].T for strings, just for completeness sake", Shape{2, 2}, nil,
[]string{"hello", "world", "今日は", "世界"},
Shape{2, 2}, []int{1, 2}, []int{2, 1},
[]string{"hello", "今日は", "world", "世界"},
},
}
func TestDense_Transpose(t *testing.T) {
assert := assert.New(t)
var err error
// standard transposes
for _, tts := range transposeTests {
T := New(WithShape(tts.shape...), WithBacking(tts.data))
if err = T.T(tts.transposeWith...); err != nil {
t.Errorf("%v - %v", tts.name, err)
continue
}
assert.True(tts.correctShape.Eq(T.Shape()), "Transpose %v Expected shape: %v. Got %v", tts.name, tts.correctShape, T.Shape())
assert.Equal(tts.correctStrides, T.Strides(), "Transpose %v. Expected stride: %v. Got %v", tts.name, tts.correctStrides, T.Strides())
T.Transpose()
assert.True(tts.correctShape.Eq(T.Shape()), "Transpose %v Expected shape: %v. Got %v", tts.name, tts.correctShape, T.Shape())
assert.Equal(tts.correctStrides2, T.Strides(), "Transpose2 %v - Expected stride %v. Got %v", tts.name, tts.correctStrides2, T.Strides())
assert.Equal(tts.correctData, T.Data(), "Transpose %v", tts.name)
}
// test stacked .T() calls
var T *Dense
// column vector
T = New(WithShape(4, 1), WithBacking(Range(Int, 0, 4)))
if err = T.T(); err != nil {
t.Errorf("Stacked .T() #1 for vector. Error: %v", err)
goto matrev
}
if err = T.T(); err != nil {
t.Errorf("Stacked .T() #1 for vector. Error: %v", err)
goto matrev
}
assert.True(T.old.IsZero())
assert.Nil(T.transposeWith)
assert.True(T.IsColVec())
matrev:
// matrix, reversed
T = New(WithShape(2, 3), WithBacking(Range(Byte, 0, 6)))
if err = T.T(); err != nil {
t.Errorf("Stacked .T() #1 for matrix reverse. Error: %v", err)
goto matnorev
}
if err = T.T(); err != nil {
t.Errorf("Stacked .T() #2 for matrix reverse. Error: %v", err)
goto matnorev
}
assert.True(T.old.IsZero())
assert.Nil(T.transposeWith)
assert.True(Shape{2, 3}.Eq(T.Shape()))
matnorev:
// 3-tensor, non reversed
T = New(WithShape(2, 3, 4), WithBacking(Range(Int64, 0, 24)))
if err = T.T(); err != nil {
t.Fatalf("Stacked .T() #1 for tensor with no reverse. Error: %v", err)
}
if err = T.T(2, 0, 1); err != nil {
t.Fatalf("Stacked .T() #2 for tensor with no reverse. Error: %v", err)
}
correctData := []int64{0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}
assert.Equal(correctData, T.Data())
assert.Equal([]int{2, 0, 1}, T.transposeWith)
assert.NotNil(T.old)
}
func TestTUT(t *testing.T) {
assert := assert.New(t)
var T *Dense
T = New(Of(Float64), WithShape(2, 3, 4))
T.T()
T.UT()
assert.True(T.old.IsZero())
assert.Nil(T.transposeWith)
T.T(2, 0, 1)
T.UT()
assert.True(T.old.IsZero())
assert.Nil(T.transposeWith)
}
type repeatTest struct {
name string
tensor *Dense
ne bool // should assert tensor not equal
axis int
repeats []int
correct interface{}
shape Shape
err bool
}
var repeatTests = []repeatTest{
{"Scalar Repeat on axis 0", New(FromScalar(true)),
true, 0, []int{3},
[]bool{true, true, true},
Shape{3}, false,
},
{"Scalar Repeat on axis 1", New(FromScalar(byte(255))),
false, 1, []int{3},
[]byte{255, 255, 255},
Shape{1, 3}, false,
},
{"Vector Repeat on axis 0", New(WithShape(2), WithBacking([]int32{1, 2})),
false, 0, []int{3},
[]int32{1, 1, 1, 2, 2, 2},
Shape{6}, false,
},
{"ColVec Repeat on axis 0", New(WithShape(2, 1), WithBacking([]int64{1, 2})),
false, 0, []int{3},
[]int64{1, 1, 1, 2, 2, 2},
Shape{6, 1}, false,
},
{"RowVec Repeat on axis 0", New(WithShape(1, 2), WithBacking([]int{1, 2})),
false, 0, []int{3},
[]int{1, 2, 1, 2, 1, 2},
Shape{3, 2}, false,
},
{"ColVec Repeat on axis 1", New(WithShape(2, 1), WithBacking([]float32{1, 2})),
false, 1, []int{3},
[]float32{1, 1, 1, 2, 2, 2},
Shape{2, 3}, false,
},
{"RowVec Repeat on axis 1", New(WithShape(1, 2), WithBacking([]float64{1, 2})),
false, 1, []int{3},
[]float64{1, 1, 1, 2, 2, 2},
Shape{1, 6}, false,
},
{"Vector Repeat on all axes", New(WithShape(2), WithBacking([]byte{1, 2})),
false, AllAxes, []int{3},
[]byte{1, 1, 1, 2, 2, 2},
Shape{6}, false,
},
{"ColVec Repeat on all axes", New(WithShape(2, 1), WithBacking([]int32{1, 2})),
false, AllAxes, []int{3},
[]int32{1, 1, 1, 2, 2, 2},
Shape{6}, false,
},
{"RowVec Repeat on all axes", New(WithShape(1, 2), WithBacking([]int64{1, 2})),
false, AllAxes, []int{3},
[]int64{1, 1, 1, 2, 2, 2},
Shape{6}, false,
},
{"M[2,2] Repeat on all axes with repeats = (1,2,1,1)", New(WithShape(2, 2), WithBacking([]int{1, 2, 3, 4})),
false, AllAxes, []int{1, 2, 1, 1},
[]int{1, 2, 2, 3, 4},
Shape{5}, false,
},
{"M[2,2] Repeat on axis 1 with repeats = (2, 1)", New(WithShape(2, 2), WithBacking([]float32{1, 2, 3, 4})),
false, 1, []int{2, 1},
[]float32{1, 1, 2, 3, 3, 4},
Shape{2, 3}, false,
},
{"M[2,2] Repeat on axis 1 with repeats = (1, 2)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})),
false, 1, []int{1, 2},
[]float64{1, 2, 2, 3, 4, 4},
Shape{2, 3}, false,
},
{"M[2,2] Repeat on axis 0 with repeats = (1, 2)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})),
false, 0, []int{1, 2},
[]float64{1, 2, 3, 4, 3, 4},
Shape{3, 2}, false,
},
{"M[2,2] Repeat on axis 0 with repeats = (2, 1)", New(WithShape(2, 2), WithBacking([]float64{1, 2, 3, 4})),
false, 0, []int{2, 1},
[]float64{1, 2, 1, 2, 3, 4},
Shape{3, 2}, false,
},
{"3T[2,3,2] Repeat on axis 1 with repeats = (1,2,1)", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))),
false, 1, []int{1, 2, 1},
[]float64{1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 9, 10, 9, 10, 11, 12},
Shape{2, 4, 2}, false,
},
{"3T[2,3,2] Generic Repeat by 2", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))),
false, AllAxes, []int{2},
[]float64{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12},
Shape{24}, false,
},
{"3T[2,3,2] repeat with broadcast errors", New(WithShape(2, 3, 2), WithBacking(vecf64.Range(1, 2*3*2+1))),
false, 0, []int{1, 2, 1},
nil, nil, true,
},
// idiots
{"Nonexistent axis", New(WithShape(2, 1), WithBacking([]bool{true, false})),
false, 2, []int{3}, nil, nil, true,
},
}
func TestDense_Repeat(t *testing.T) {
assert := assert.New(t)
for i, test := range repeatTests {
T, err := test.tensor.Repeat(test.axis, test.repeats...)
if checkErr(t, test.err, err, "Repeat", i) {
continue
}
var D DenseTensor
if D, err = getDenseTensor(T); err != nil {
t.Errorf("Expected Repeat to return a *Dense. got %v of %T instead", T, T)
continue
}
if test.ne {
assert.NotEqual(test.tensor, D, test.name)
}
assert.Equal(test.correct, D.Data(), test.name)
assert.Equal(test.shape, D.Shape(), test.name)
}
}
func TestDense_Repeat_Slow(t *testing.T) {
rt2 := make([]repeatTest, len(repeatTests))
for i, rt := range repeatTests {
rt2[i] = repeatTest{
name: rt.name,
ne: rt.ne,
axis: rt.axis,
repeats: rt.repeats,
correct: rt.correct,
shape: rt.shape,
err: rt.err,
tensor: rt.tensor.Clone().(*Dense),
}
}
for i := range rt2 {
maskLen := rt2[i].tensor.len()
mask := make([]bool, maskLen)
rt2[i].tensor.mask = mask
}
assert := assert.New(t)
for i, test := range rt2 {
T, err := test.tensor.Repeat(test.axis, test.repeats...)
if checkErr(t, test.err, err, "Repeat", i) {
continue
}
var D DenseTensor
if D, err = getDenseTensor(T); err != nil {
t.Errorf("Expected Repeat to return a *Dense. got %v of %T instead", T, T)
continue
}
if test.ne {
assert.NotEqual(test.tensor, D, test.name)
}
assert.Equal(test.correct, D.Data(), test.name)
assert.Equal(test.shape, D.Shape(), test.name)
}
}
func TestDense_CopyTo(t *testing.T) {
assert := assert.New(t)
var T, T2 *Dense
var T3 Tensor
var err error
T = New(WithShape(2), WithBacking([]float64{1, 2}))
T2 = New(Of(Float64), WithShape(1, 2))
err = T.CopyTo(T2)
if err != nil {
t.Fatal(err)
}
assert.Equal(T2.Data(), T.Data())
// now, modify T1's data
T.Set(0, float64(5000))
assert.NotEqual(T2.Data(), T.Data())
// test views
T = New(Of(Byte), WithShape(3, 3))
T2 = New(Of(Byte), WithShape(2, 2))
T3, _ = T.Slice(makeRS(0, 2), makeRS(0, 2)) // T[0:2, 0:2], shape == (2,2)
if err = T2.CopyTo(T3.(*Dense)); err != nil {
t.Log(err) // for now it's a not yet implemented error. TODO: FIX THIS
}
// dumbass time
T = New(Of(Float32), WithShape(3, 3))
T2 = New(Of(Float32), WithShape(2, 2))
if err = T.CopyTo(T2); err == nil {
t.Error("Expected an error")
}
if err = T.CopyTo(T); err != nil {
t.Error("Copying a *Tensor to itself should yield no error. ")
}
}
var denseSliceTests = []struct {
name string
data interface{}
shape Shape
slices []Slice
correctShape Shape
correctStride []int
correctData interface{}
}{
// scalar-equiv vector (issue 102)
{"a[0], a is scalar-equiv", []float64{2},
Shape{1}, []Slice{ss(0)}, ScalarShape(), nil, 2.0},
// vector
{"a[0]", []bool{true, true, false, false, false},
Shape{5}, []Slice{ss(0)}, ScalarShape(), nil, true},
{"a[0:2]", Range(Byte, 0, 5), Shape{5}, []Slice{makeRS(0, 2)}, Shape{2}, []int{1}, []byte{0, 1}},
{"a[1:5:2]", Range(Int32, 0, 5), Shape{5}, []Slice{makeRS(1, 5, 2)}, Shape{2}, []int{2}, []int32{1, 2, 3, 4}},
// colvec
{"c[0]", Range(Int64, 0, 5), Shape{5, 1}, []Slice{ss(0)}, ScalarShape(), nil, int64(0)},
{"c[0:2]", Range(Float32, 0, 5), Shape{5, 1}, []Slice{makeRS(0, 2)}, Shape{2, 1}, []int{1, 1}, []float32{0, 1}},
{"c[1:5:2]", Range(Float64, 0, 5), Shape{5, 1}, []Slice{makeRS(0, 5, 2)}, Shape{2, 1}, []int{2, 1}, []float64{0, 1, 2, 3, 4}},
// // rowvec
{"r[0]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{ss(0)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}},
{"r[0:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{makeRS(0, 2)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}},
{"r[0:5:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{makeRS(0, 5, 2)}, Shape{1, 5}, []int{1}, []float64{0, 1, 2, 3, 4}},
{"r[:, 0]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, ss(0)}, ScalarShape(), nil, float64(0)},
{"r[:, 0:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, makeRS(0, 2)}, Shape{1, 2}, []int{5, 1}, []float64{0, 1}},
{"r[:, 1:5:2]", Range(Float64, 0, 5), Shape{1, 5}, []Slice{nil, makeRS(1, 5, 2)}, Shape{1, 2}, []int{5, 2}, []float64{1, 2, 3, 4}},
// // matrix
{"A[0]", Range(Float64, 0, 6), Shape{2, 3}, []Slice{ss(0)}, Shape{1, 3}, []int{1}, Range(Float64, 0, 3)},
{"A[0:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{makeRS(0, 2)}, Shape{2, 5}, []int{5, 1}, Range(Float64, 0, 10)},
{"A[0, 0]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), ss(0)}, ScalarShape(), nil, float64(0)},
{"A[0, 1:5]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), makeRS(1, 5)}, Shape{4}, []int{1}, Range(Float64, 1, 5)},
{"A[0, 1:5:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{ss(0), makeRS(1, 5, 2)}, Shape{1, 2}, []int{2}, Range(Float64, 1, 5)},
{"A[:, 0]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, ss(0)}, Shape{4, 1}, []int{5}, Range(Float64, 0, 16)},
{"A[:, 1:5]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, makeRS(1, 5)}, Shape{4, 4}, []int{5, 1}, Range(Float64, 1, 20)},
{"A[:, 1:5:2]", Range(Float64, 0, 20), Shape{4, 5}, []Slice{nil, makeRS(1, 5, 2)}, Shape{4, 2}, []int{5, 2}, Range(Float64, 1, 20)},
// 3tensor with leading and trailing 1s
{"3T1[0]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{ss(0)}, Shape{9, 1}, []int{1, 1}, Range(Float64, 0, 9)},
{"3T1[nil, 0:2]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(0, 2)}, Shape{1, 2, 1}, []int{9, 1, 1}, Range(Float64, 0, 2)},
{"3T1[nil, 0:5:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(0, 5, 3)}, Shape{1, 2, 1}, []int{9, 3, 1}, Range(Float64, 0, 5)},
{"3T1[nil, 1:5:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(1, 5, 3)}, Shape{1, 2, 1}, []int{9, 3, 1}, Range(Float64, 1, 5)},
{"3T1[nil, 1:9:3]", Range(Float64, 0, 9), Shape{1, 9, 1}, []Slice{nil, makeRS(1, 9, 3)}, Shape{1, 3, 1}, []int{9, 3, 1}, Range(Float64, 1, 9)},
// 3tensor
{"3T[0]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(0)}, Shape{9, 2}, []int{2, 1}, Range(Float64, 0, 18)},
{"3T[1]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1)}, Shape{9, 2}, []int{2, 1}, Range(Float64, 18, 36)},
{"3T[1, 2]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), ss(2)}, Shape{2}, []int{1}, Range(Float64, 22, 24)},
{"3T[1, 2:4]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 4)}, Shape{2, 2}, []int{2, 1}, Range(Float64, 22, 26)},
{"3T[1, 2:8:2]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 8, 2)}, Shape{3, 2}, []int{4, 1}, Range(Float64, 22, 34)},
{"3T[1, 2:8:3]", Range(Float64, 0, 36), Shape{2, 9, 2}, []Slice{ss(1), makeRS(2, 8, 3)}, Shape{2, 2}, []int{6, 1}, Range(Float64, 22, 34)},
{"3T[1, 2:9:2]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2)}, Shape{4, 7}, []int{14, 1}, Range(Float64, 77, 126)},
{"3T[1, 2:9:2, 1]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2), ss(1)}, Shape{4}, []int{14}, Range(Float64, 78, 121)}, // should this be a colvec?
{"3T[1, 2:9:2, 1:4:2]", Range(Float64, 0, 126), Shape{2, 9, 7}, []Slice{ss(1), makeRS(2, 9, 2), makeRS(1, 4, 2)}, Shape{4, 2}, []int{14, 2}, Range(Float64, 78, 123)},
}
func TestDense_Slice(t *testing.T) {
assert := assert.New(t)
var T *Dense
var V Tensor
var err error
for _, sts := range denseSliceTests {
T = New(WithShape(sts.shape...), WithBacking(sts.data))
t.Log(sts.name)
if V, err = T.Slice(sts.slices...); err != nil {
t.Error(err)
continue
}
assert.True(sts.correctShape.Eq(V.Shape()), "Test: %v - Incorrect Shape. Correct: %v. Got %v", sts.name, sts.correctShape, V.Shape())
assert.Equal(sts.correctStride, V.Strides(), "Test: %v - Incorrect Stride", sts.name)
assert.Equal(sts.correctData, V.Data(), "Test: %v - Incorrect Data", sts.name)
}
// Transposed slice
T = New(WithShape(2, 3), WithBacking(Range(Float32, 0, 6)))
T.T()
V, err = T.Slice(ss(0))
assert.True(Shape{2}.Eq(V.Shape()))
assert.Equal([]int{3}, V.Strides())
assert.Equal([]float32{0, 1, 2, 3}, V.Data())
assert.True(V.(*Dense).old.IsZero())
// slice a sliced
t.Logf("%v", V)
V, err = V.Slice(makeRS(1, 2))
t.Logf("%v", V)
assert.True(ScalarShape().Eq(V.Shape()))
assert.Equal(float32(3), V.Data())
// And now, ladies and gentlemen, the idiots!
// too many slices
_, err = T.Slice(ss(1), ss(2), ss(3), ss(4))
if err == nil {
t.Error("Expected a DimMismatchError error")
}
// out of range sliced
_, err = T.Slice(makeRS(20, 5))
if err == nil {
t.Error("Expected a IndexError")
}
// surely nobody can be this dumb? Having a start of negatives
_, err = T.Slice(makeRS(-1, 1))
if err == nil {
t.Error("Expected a IndexError")
}
}
func TestDense_Narrow(t *testing.T) {
testCases := []struct {
x *Dense
dim, start, length int
expected *Dense
}{
{
x: New(
WithShape(3),
WithBacking([]int{1, 2, 3}),
),
dim: 0,
start: 1,
length: 1,
expected: New(
WithShape(),
WithBacking([]int{2}),
),
},
{
x: New(
WithShape(3, 3),
WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}),
),
dim: 0,
start: 0,
length: 2,
expected: New(
WithShape(2, 3),
WithBacking([]int{1, 2, 3, 4, 5, 6}),
),
},
{
x: New(
WithShape(3, 3),
WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}),
),
dim: 1,
start: 1,
length: 2,
expected: New(
WithShape(3, 2),
WithBacking([]int{2, 3, 5, 6, 8, 9}),
),
},
{
x: New(
WithShape(3, 3),
WithBacking([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}),
),
dim: 1,
start: 0,
length: 1,
expected: New(
WithShape(3),
WithBacking([]int{1, 4, 7}),
),
},
}
for i, tC := range testCases {
t.Run(fmt.Sprintf("Example #%d narrow(%v,%d,%d,%v)", i+1, tC.x.Shape(), tC.dim, tC.start, tC.length), func(t *testing.T) {
c := assert.New(t)
// t.Logf("X:\n%v", tC.x)
y, err := tC.x.Narrow(tC.dim, tC.start, tC.length)
c.NoError(err)
// t.Logf("y:\n%v", y)
yMat := y.Materialize()
c.Equal(tC.expected.Shape(), yMat.Shape())
c.Equal(tC.expected.Data(), yMat.Data())
// err = y.Memset(1024)
// c.NoError(err)
// t.Logf("After Memset\nY: %v\nX:\n%v", y, tC.x)
})
}
}
func TestDense_SliceInto(t *testing.T) {
V := New(WithShape(100), Of(Byte))
T := New(WithBacking([]float64{1, 2, 3, 4, 5, 6}), WithShape(2, 3))
T.SliceInto(V, ss(0))
assert.True(t, Shape{3}.Eq(V.Shape()), "Got %v", V.Shape())
assert.Equal(t, []float64{1, 2, 3}, V.Data())
}
var rollaxisTests = []struct {
axis, start int
correctShape Shape
}{
{0, 0, Shape{1, 2, 3, 4}},
{0, 1, Shape{1, 2, 3, 4}},
{0, 2, Shape{2, 1, 3, 4}},
{0, 3, Shape{2, 3, 1, 4}},
{0, 4, Shape{2, 3, 4, 1}},
{1, 0, Shape{2, 1, 3, 4}},
{1, 1, Shape{1, 2, 3, 4}},
{1, 2, Shape{1, 2, 3, 4}},
{1, 3, Shape{1, 3, 2, 4}},
{1, 4, Shape{1, 3, 4, 2}},
{2, 0, Shape{3, 1, 2, 4}},
{2, 1, Shape{1, 3, 2, 4}},
{2, 2, Shape{1, 2, 3, 4}},
{2, 3, Shape{1, 2, 3, 4}},
{2, 4, Shape{1, 2, 4, 3}},
{3, 0, Shape{4, 1, 2, 3}},
{3, 1, Shape{1, 4, 2, 3}},
{3, 2, Shape{1, 2, 4, 3}},
{3, 3, Shape{1, 2, 3, 4}},
{3, 4, Shape{1, 2, 3, 4}},
}
// The RollAxis tests are directly adapted from Numpy's test cases.
func TestDense_RollAxis(t *testing.T) {
assert := assert.New(t)
var T *Dense
var err error
for _, rats := range rollaxisTests {
T = New(Of(Byte), WithShape(1, 2, 3, 4))
if _, err = T.RollAxis(rats.axis, rats.start, false); assert.NoError(err) {
assert.True(rats.correctShape.Eq(T.Shape()), "%d %d Expected %v, got %v", rats.axis, rats.start, rats.correctShape, T.Shape())
}
}
}
var concatTests = []struct {
name string
dt Dtype
a interface{}
b interface{}
shape Shape
shapeB Shape
axis int
correctShape Shape
correctData interface{}
}{
// Float64
{"vector", Float64, nil, nil, Shape{2}, nil, 0, Shape{4}, []float64{0, 1, 0, 1}},
{"matrix; axis 0 ", Float64, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []float64{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Float64, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []float64{0, 1, 0, 1, 2, 3, 2, 3}},
// Float32
{"vector", Float32, nil, nil, Shape{2}, nil, 0, Shape{4}, []float32{0, 1, 0, 1}},
{"matrix; axis 0 ", Float32, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []float32{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Float32, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []float32{0, 1, 0, 1, 2, 3, 2, 3}},
// Int
{"vector", Int, nil, nil, Shape{2}, nil, 0, Shape{4}, []int{0, 1, 0, 1}},
{"matrix; axis 0 ", Int, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Int, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int{0, 1, 0, 1, 2, 3, 2, 3}},
// Int64
{"vector", Int64, nil, nil, Shape{2}, nil, 0, Shape{4}, []int64{0, 1, 0, 1}},
{"matrix; axis 0 ", Int64, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int64{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Int64, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int64{0, 1, 0, 1, 2, 3, 2, 3}},
// Int32
{"vector", Int32, nil, nil, Shape{2}, nil, 0, Shape{4}, []int32{0, 1, 0, 1}},
{"matrix; axis 0 ", Int32, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []int32{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Int32, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []int32{0, 1, 0, 1, 2, 3, 2, 3}},
// Byte
{"vector", Byte, nil, nil, Shape{2}, nil, 0, Shape{4}, []byte{0, 1, 0, 1}},
{"matrix; axis 0 ", Byte, nil, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []byte{0, 1, 2, 3, 0, 1, 2, 3}},
{"matrix; axis 1 ", Byte, nil, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []byte{0, 1, 0, 1, 2, 3, 2, 3}},
// Bool
{"vector", Bool, []bool{true, false}, nil, Shape{2}, nil, 0, Shape{4}, []bool{true, false, true, false}},
{"matrix; axis 0 ", Bool, []bool{true, false, true, false}, nil, Shape{2, 2}, nil, 0, Shape{4, 2}, []bool{true, false, true, false, true, false, true, false}},
{"matrix; axis 1 ", Bool, []bool{true, false, true, false}, nil, Shape{2, 2}, nil, 1, Shape{2, 4}, []bool{true, false, true, false, true, false, true, false}},
// gorgonia/gorgonia#218 related
{"matrix; axis 0", Float64, nil, nil, Shape{2, 2}, Shape{1, 2}, 0, Shape{3, 2}, []float64{0, 1, 2, 3, 0, 1}},
{"matrix; axis 1", Float64, nil, nil, Shape{2, 2}, Shape{2, 1}, 1, Shape{2, 3}, []float64{0, 1, 0, 2, 3, 1}},
{"colvec matrix, axis 0", Float64, nil, nil, Shape{2, 1}, Shape{1, 1}, 0, Shape{3, 1}, []float64{0, 1, 0}},
{"rowvec matrix, axis 1", Float64, nil, nil, Shape{1, 2}, Shape{1, 1}, 1, Shape{1, 3}, []float64{0, 1, 0}},
{"3tensor; axis 0", Float64, nil, nil, Shape{2, 3, 2}, Shape{1, 3, 2}, 0, Shape{3, 3, 2}, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5}},
{"3tensor; axis 2", Float64, nil, nil, Shape{2, 3, 2}, Shape{2, 3, 1}, 2, Shape{2, 3, 3}, []float64{0, 1, 0, 2, 3, 1, 4, 5, 2, 6, 7, 3, 8, 9, 4, 10, 11, 5}},
{"3tensor; axis 1", Float64, nil, nil, Shape{2, 3, 2}, Shape{2, 1, 2}, 1, Shape{2, 4, 2}, []float64{0, 1, 2, 3, 4, 5, 0, 1, 6, 7, 8, 9, 10, 11, 2, 3}},
}
func TestDense_Concat(t *testing.T) {
assert := assert.New(t)
for _, cts := range concatTests {
var T0, T1 *Dense
if cts.a == nil {
T0 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize())))
} else {
T0 = New(WithShape(cts.shape...), WithBacking(cts.a))
}
switch {
case cts.shapeB == nil && cts.a == nil:
T1 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize())))
case cts.shapeB == nil && cts.a != nil:
T1 = New(WithShape(cts.shape...), WithBacking(cloneArray(cts.a)))
case cts.shapeB != nil && cts.b == nil:
T1 = New(WithShape(cts.shapeB...), WithBacking(Range(cts.dt, 0, cts.shapeB.TotalSize())))
case cts.shapeB != nil && cts.b != nil:
T1 = New(WithShape(cts.shapeB...), WithBacking(cts.b))
}
T2, err := T0.Concat(cts.axis, T1)
if err != nil {
t.Errorf("Test %v failed: %v", cts.name, err)
continue
}
assert.True(cts.correctShape.Eq(T2.Shape()))
assert.Equal(cts.correctData, T2.Data())
}
//Masked case
for _, cts := range concatTests {
var T0, T1 *Dense
if cts.a == nil {
T0 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize())))
T0.MaskedEqual(castToDt(0.0, cts.dt))
} else {
T0 = New(WithShape(cts.shape...), WithBacking(cts.a))
T0.MaskedEqual(castToDt(0.0, cts.dt))
}
switch {
case cts.shapeB == nil && cts.a == nil:
T1 = New(WithShape(cts.shape...), WithBacking(Range(cts.dt, 0, cts.shape.TotalSize())))
case cts.shapeB == nil && cts.a != nil:
T1 = New(WithShape(cts.shape...), WithBacking(cloneArray(cts.a)))
case cts.shapeB != nil && cts.b == nil:
T1 = New(WithShape(cts.shapeB...), WithBacking(Range(cts.dt, 0, cts.shapeB.TotalSize())))
case cts.shapeB != nil && cts.b != nil:
T1 = New(WithShape(cts.shapeB...), WithBacking(cts.b))
}
T1.MaskedEqual(castToDt(0.0, cts.dt))
T2, err := T0.Concat(cts.axis, T1)
if err != nil {
t.Errorf("Test %v failed: %v", cts.name, err)
continue
}
T3 := New(WithShape(cts.correctShape...), WithBacking(cts.correctData))
T3.MaskedEqual(castToDt(0.0, cts.dt))
assert.True(cts.correctShape.Eq(T2.Shape()))
assert.Equal(cts.correctData, T2.Data())
assert.Equal(T3.mask, T2.mask)
}
}
func TestDense_Concat_sliced(t *testing.T) {
v := New(
WithShape(1, 5),
WithBacking([]float64{0, 1, 2, 3, 4}),
)
cols := make([]Tensor, v.Shape().TotalSize())
for i := 0; i < v.Shape().TotalSize(); i++ {
sliced, err := v.Slice(nil, ss(i))
if err != nil {
t.Fatalf("Failed to slice %d. Error: %v", i, err)
}
if err = sliced.Reshape(sliced.Shape().TotalSize(), 1); err != nil {
t.Fatalf("Failed to reshape %d. Error %v", i, err)
}
cols[i] = sliced
}
result, err := Concat(1, cols[0], cols[1:]...)
if err != nil {
t.Error(err)
}
assert.Equal(t, v.Data(), result.Data())
if v.Uintptr() == result.Uintptr() {
t.Error("They should not share the same backing data!")
}
}
var simpleStackTests = []struct {
name string
dt Dtype
shape Shape
axis int
stackCount int
correctShape Shape
correctData interface{}
}{
// Size 8
{"vector, axis 0, stack 2", Float64, Shape{2}, 0, 2, Shape{2, 2}, []float64{0, 1, 100, 101}},
{"vector, axis 1, stack 2", Float64, Shape{2}, 1, 2, Shape{2, 2}, []float64{0, 100, 1, 101}},
{"matrix, axis 0, stack 2", Float64, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []float64{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}},
{"matrix, axis 1, stack 2", Float64, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []float64{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}},
{"matrix, axis 2, stack 2", Float64, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []float64{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}},
{"matrix, axis 0, stack 3", Float64, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []float64{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}},
{"matrix, axis 1, stack 3", Float64, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []float64{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}},
{"matrix, axis 2, stack 3", Float64, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []float64{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}},
// Size 4
{"vector, axis 0, stack 2 (f32)", Float32, Shape{2}, 0, 2, Shape{2, 2}, []float32{0, 1, 100, 101}},
{"vector, axis 1, stack 2 (f32)", Float32, Shape{2}, 1, 2, Shape{2, 2}, []float32{0, 100, 1, 101}},
{"matrix, axis 0, stack 2 (f32)", Float32, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []float32{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}},
{"matrix, axis 1, stack 2 (f32)", Float32, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []float32{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}},
{"matrix, axis 2, stack 2 (f32)", Float32, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []float32{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}},
{"matrix, axis 0, stack 3 (f32)", Float32, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []float32{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}},
{"matrix, axis 1, stack 3 (f32)", Float32, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []float32{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}},
{"matrix, axis 2, stack 3 (f32)", Float32, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []float32{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}},
// Size 2
{"vector, axis 0, stack 2 (i16)", Int16, Shape{2}, 0, 2, Shape{2, 2}, []int16{0, 1, 100, 101}},
{"vector, axis 1, stack 2 (i16)", Int16, Shape{2}, 1, 2, Shape{2, 2}, []int16{0, 100, 1, 101}},
{"matrix, axis 0, stack 2 (i16)", Int16, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []int16{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}},
{"matrix, axis 1, stack 2 (i16)", Int16, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []int16{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}},
{"matrix, axis 2, stack 2 (i16)", Int16, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []int16{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}},
{"matrix, axis 0, stack 3 (i16)", Int16, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []int16{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}},
{"matrix, axis 1, stack 3 (i16)", Int16, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []int16{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}},
{"matrix, axis 2, stack 3 (i16)", Int16, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []int16{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}},
// Size 1
{"vector, axis 0, stack 2 (u8)", Byte, Shape{2}, 0, 2, Shape{2, 2}, []byte{0, 1, 100, 101}},
{"vector, axis 1, stack 2 (u8)", Byte, Shape{2}, 1, 2, Shape{2, 2}, []byte{0, 100, 1, 101}},
{"matrix, axis 0, stack 2 (u8)", Byte, Shape{2, 3}, 0, 2, Shape{2, 2, 3}, []byte{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105}},
{"matrix, axis 1, stack 2 (u8)", Byte, Shape{2, 3}, 1, 2, Shape{2, 2, 3}, []byte{0, 1, 2, 100, 101, 102, 3, 4, 5, 103, 104, 105}},
{"matrix, axis 2, stack 2 (u8)", Byte, Shape{2, 3}, 2, 2, Shape{2, 3, 2}, []byte{0, 100, 1, 101, 2, 102, 3, 103, 4, 104, 5, 105}},
{"matrix, axis 0, stack 3 (u8)", Byte, Shape{2, 3}, 0, 3, Shape{3, 2, 3}, []byte{0, 1, 2, 3, 4, 5, 100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205}},
{"matrix, axis 1, stack 3 (u8)", Byte, Shape{2, 3}, 1, 3, Shape{2, 3, 3}, []byte{0, 1, 2, 100, 101, 102, 200, 201, 202, 3, 4, 5, 103, 104, 105, 203, 204, 205}},
{"matrix, axis 2, stack 3 (u8)", Byte, Shape{2, 3}, 2, 3, Shape{2, 3, 3}, []byte{0, 100, 200, 1, 101, 201, 2, 102, 202, 3, 103, 203, 4, 104, 204, 5, 105, 205}},
}
var viewStackTests = []struct {
name string
dt Dtype
shape Shape
transform []int
slices []Slice
axis int
stackCount int
correctShape Shape
correctData interface{}
}{
// Size 8
{"matrix(4x4)[1:3, 1:3] axis 0", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []float64{5, 6, 9, 10, 105, 106, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 1", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []float64{5, 6, 105, 106, 9, 10, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 2", Float64, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []float64{5, 105, 6, 106, 9, 109, 10, 110}},
// Size 4
{"matrix(4x4)[1:3, 1:3] axis 0 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []uint32{5, 6, 9, 10, 105, 106, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 1 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []uint32{5, 6, 105, 106, 9, 10, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 2 (u32)", Uint32, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []uint32{5, 105, 6, 106, 9, 109, 10, 110}},
// Size 2
{"matrix(4x4)[1:3, 1:3] axis 0 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []uint16{5, 6, 9, 10, 105, 106, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 1 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []uint16{5, 6, 105, 106, 9, 10, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 2 (u16)", Uint16, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []uint16{5, 105, 6, 106, 9, 109, 10, 110}},
// Size 1
{"matrix(4x4)[1:3, 1:3] axis 0 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 0, 2, Shape{2, 2, 2}, []byte{5, 6, 9, 10, 105, 106, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 1 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 1, 2, Shape{2, 2, 2}, []byte{5, 6, 105, 106, 9, 10, 109, 110}},
{"matrix(4x4)[1:3, 1:3] axis 2 (u8)", Byte, Shape{4, 4}, nil, []Slice{makeRS(1, 3), makeRS(1, 3)}, 2, 2, Shape{2, 2, 2}, []byte{5, 105, 6, 106, 9, 109, 10, 110}},
}
func TestDense_Stack(t *testing.T) {
assert := assert.New(t)
var err error
for _, sts := range simpleStackTests {
T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize())))
var stacked []*Dense
for i := 0; i < sts.stackCount-1; i++ {
offset := (i + 1) * 100
T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset)))
stacked = append(stacked, T1)
}
T2, err := T.Stack(sts.axis, stacked...)
if err != nil {
t.Error(err)
continue
}
assert.True(sts.correctShape.Eq(T2.Shape()))
assert.Equal(sts.correctData, T2.Data())
}
for _, sts := range viewStackTests {
T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize())))
switch {
case sts.slices != nil && sts.transform == nil:
var sliced Tensor
if sliced, err = T.Slice(sts.slices...); err != nil {
t.Error(err)
continue
}
T = sliced.(*Dense)
case sts.transform != nil && sts.slices == nil:
T.T(sts.transform...)
}
var stacked []*Dense
for i := 0; i < sts.stackCount-1; i++ {
offset := (i + 1) * 100
T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset)))
switch {
case sts.slices != nil && sts.transform == nil:
var sliced Tensor
if sliced, err = T1.Slice(sts.slices...); err != nil {
t.Error(err)
continue
}
T1 = sliced.(*Dense)
case sts.transform != nil && sts.slices == nil:
T1.T(sts.transform...)
}
stacked = append(stacked, T1)
}
T2, err := T.Stack(sts.axis, stacked...)
if err != nil {
t.Error(err)
continue
}
assert.True(sts.correctShape.Eq(T2.Shape()))
assert.Equal(sts.correctData, T2.Data(), "%q failed", sts.name)
}
// Repeat tests with masks
for _, sts := range simpleStackTests {
T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize())))
var stacked []*Dense
for i := 0; i < sts.stackCount-1; i++ {
offset := (i + 1) * 100
T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset)))
T1.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt))
stacked = append(stacked, T1)
}
T2, err := T.Stack(sts.axis, stacked...)
if err != nil {
t.Error(err)
continue
}
T3 := New(WithShape(sts.correctShape...), WithBacking(sts.correctData))
T3.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt))
assert.True(sts.correctShape.Eq(T2.Shape()))
assert.Equal(sts.correctData, T2.Data())
assert.Equal(T3.mask, T2.mask)
}
for _, sts := range viewStackTests {
T := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, 0, sts.shape.TotalSize())))
switch {
case sts.slices != nil && sts.transform == nil:
var sliced Tensor
if sliced, err = T.Slice(sts.slices...); err != nil {
t.Error(err)
continue
}
T = sliced.(*Dense)
case sts.transform != nil && sts.slices == nil:
T.T(sts.transform...)
}
var stacked []*Dense
for i := 0; i < sts.stackCount-1; i++ {
offset := (i + 1) * 100
T1 := New(WithShape(sts.shape...), WithBacking(Range(sts.dt, offset, sts.shape.TotalSize()+offset)))
T1.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt))
switch {
case sts.slices != nil && sts.transform == nil:
var sliced Tensor
if sliced, err = T1.Slice(sts.slices...); err != nil {
t.Error(err)
continue
}
T1 = sliced.(*Dense)
case sts.transform != nil && sts.slices == nil:
T1.T(sts.transform...)
}
stacked = append(stacked, T1)
}
T2, err := T.Stack(sts.axis, stacked...)
if err != nil {
t.Error(err)
continue
}
T3 := New(WithShape(sts.correctShape...), WithBacking(sts.correctData))
T3.MaskedInside(castToDt(102.0, sts.dt), castToDt(225.0, sts.dt))
assert.True(sts.correctShape.Eq(T2.Shape()))
assert.Equal(sts.correctData, T2.Data())
assert.Equal(T3.mask, T2.mask)
}
// arbitrary view slices
T := New(WithShape(2, 2), WithBacking([]string{"hello", "world", "nihao", "sekai"}))
var stacked []*Dense
for i := 0; i < 1; i++ {
T1 := New(WithShape(2, 2), WithBacking([]string{"blah1", "blah2", "blah3", "blah4"}))
var sliced Tensor
if sliced, err = T1.Slice(nil, nil); err != nil {
t.Error(err)
break
}
T1 = sliced.(*Dense)
stacked = append(stacked, T1)
}
T2, err := T.Stack(0, stacked...)
if err != nil {
t.Error(err)
return
}
correctShape := Shape{2, 2, 2}
correctData := []string{"hello", "world", "nihao", "sekai", "blah1", "blah2", "blah3", "blah4"}
assert.True(correctShape.Eq(T2.Shape()))
assert.Equal(correctData, T2.Data(), "%q failed", "arbitrary view slice")
}
tensor-0.9.24/dense_norms.go 0000664 0000000 0000000 00000017436 14265126151 0016001 0 ustar 00root root 0000000 0000000 package tensor
import (
"math"
"github.com/chewxy/math32"
"github.com/pkg/errors"
)
func (t *Dense) multiSVDNorm(rowAxis, colAxis int) (retVal *Dense, err error) {
if rowAxis > colAxis {
rowAxis--
}
dims := t.Dims()
if retVal, err = t.RollAxis(colAxis, dims, true); err != nil {
return
}
if retVal, err = retVal.RollAxis(rowAxis, dims, true); err != nil {
return
}
// manual, since SVD only works on matrices. In the future, this needs to be fixed when gonum's lapack works for float32
// TODO: SVDFuture
switch dims {
case 2:
retVal, _, _, err = retVal.SVD(false, false)
case 3:
toStack := make([]*Dense, retVal.Shape()[0])
for i := 0; i < retVal.Shape()[0]; i++ {
var sliced, ithS *Dense
if sliced, err = sliceDense(retVal, ss(i)); err != nil {
return
}
if ithS, _, _, err = sliced.SVD(false, false); err != nil {
return
}
toStack[i] = ithS
}
retVal, err = toStack[0].Stack(0, toStack[1:]...)
return
default:
err = errors.Errorf("multiSVDNorm for dimensions greater than 3")
}
return
}
// Norm returns the p-ordered norm of the *Dense, given the axes.
//
// This implementation is directly adapted from Numpy, which is licenced under a BSD-like licence, and can be found here: https://docs.scipy.org/doc/numpy-1.9.1/license.html
func (t *Dense) Norm(ord NormOrder, axes ...int) (retVal *Dense, err error) {
var ret Tensor
var ok bool
var abs, norm0, normN interface{}
var oneOverOrd interface{}
switch t.t {
case Float64:
abs = math.Abs
norm0 = func(x float64) float64 {
if x != 0 {
return 1
}
return 0
}
normN = func(x float64) float64 {
return math.Pow(math.Abs(x), float64(ord))
}
oneOverOrd = float64(1) / float64(ord)
case Float32:
abs = math32.Abs
norm0 = func(x float32) float32 {
if x != 0 {
return 1
}
return 0
}
normN = func(x float32) float32 {
return math32.Pow(math32.Abs(x), float32(ord))
}
oneOverOrd = float32(1) / float32(ord)
default:
err = errors.Errorf("Norms only works on float types")
return
}
dims := t.Dims()
// simple case
if len(axes) == 0 {
if ord.IsUnordered() || (ord.IsFrobenius() && dims == 2) || (ord == Norm(2) && dims == 1) {
backup := t.AP
ap := makeAP(1)
defer ap.zero()
ap.unlock()
ap.SetShape(t.Size())
ap.lock()
t.AP = ap
if ret, err = Dot(t, t); err != nil { // returns a scalar
err = errors.Wrapf(err, opFail, "Norm-0")
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "Norm-0")
}
switch t.t {
case Float64:
retVal.SetF64(0, math.Sqrt(retVal.GetF64(0)))
case Float32:
retVal.SetF32(0, math32.Sqrt(retVal.GetF32(0)))
}
t.AP = backup
return
}
axes = make([]int, dims)
for i := range axes {
axes[i] = i
}
}
switch len(axes) {
case 1:
cloned := t.Clone().(*Dense)
switch {
case ord.IsUnordered() || ord == Norm(2):
if ret, err = Square(cloned); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "UnorderedNorm-1")
}
if retVal, err = retVal.Sum(axes...); err != nil {
return
}
if ret, err = Sqrt(retVal); err != nil {
return
}
return assertDense(ret)
case ord.IsInf(1):
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "InfNorm-1")
}
return retVal.Max(axes...)
case ord.IsInf(-1):
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "-InfNorm-1")
}
return retVal.Min(axes...)
case ord == Norm(0):
if ret, err = cloned.Apply(norm0); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "Norm-0")
}
return retVal.Sum(axes...)
case ord == Norm(1):
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "Norm-1")
}
return retVal.Sum(axes...)
default:
if ret, err = cloned.Apply(normN); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "Norm-N")
}
if retVal, err = retVal.Sum(axes...); err != nil {
return
}
return retVal.PowScalar(oneOverOrd, true)
}
case 2:
rowAxis := axes[0]
colAxis := axes[1]
// checks
if rowAxis < 0 {
return nil, errors.Errorf("Row Axis %d is < 0", rowAxis)
}
if colAxis < 0 {
return nil, errors.Errorf("Col Axis %d is < 0", colAxis)
}
if rowAxis == colAxis {
return nil, errors.Errorf("Duplicate axes found. Row Axis: %d, Col Axis %d", rowAxis, colAxis)
}
cloned := t.Clone().(*Dense)
switch {
case ord == Norm(2):
// svd norm
if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil {
return nil, errors.Wrapf(err, opFail, "MultiSVDNorm, case 2 with Ord == Norm(2)")
}
dims := retVal.Dims()
return retVal.Max(dims - 1)
case ord == Norm(-2):
// svd norm
if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil {
return nil, errors.Wrapf(err, opFail, "MultiSVDNorm, case 2 with Ord == Norm(-2)")
}
dims := retVal.Dims()
return retVal.Min(dims - 1)
case ord == Norm(1):
if colAxis > rowAxis {
colAxis--
}
if ret, err = cloned.Apply(abs); err != nil {
return nil, errors.Wrapf(err, opFail, "Apply abs in Norm. ord == Norm(1")
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Norm-1, axis=2")
}
if retVal, err = retVal.Sum(rowAxis); err != nil {
return
}
return retVal.Max(colAxis)
case ord == Norm(-1):
if colAxis > rowAxis {
colAxis--
}
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Norm-(-1), axis=2")
}
if retVal, err = retVal.Sum(rowAxis); err != nil {
return
}
return retVal.Min(colAxis)
case ord == Norm(0):
return nil, errors.Errorf("Norm of order 0 undefined for matrices")
case ord.IsInf(1):
if rowAxis > colAxis {
rowAxis--
}
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "InfNorm, axis=2")
}
if retVal, err = retVal.Sum(colAxis); err != nil {
return nil, errors.Wrapf(err, "Sum in infNorm")
}
return retVal.Max(rowAxis)
case ord.IsInf(-1):
if rowAxis > colAxis {
rowAxis--
}
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "-InfNorm, axis=2")
}
if retVal, err = retVal.Sum(colAxis); err != nil {
return nil, errors.Wrapf(err, opFail, "Sum with InfNorm")
}
return retVal.Min(rowAxis)
case ord.IsUnordered() || ord.IsFrobenius():
if ret, err = cloned.Apply(abs); err != nil {
return
}
if retVal, ok = ret.(*Dense); !ok {
return nil, errors.Errorf(opFail, "Frobenius Norm, axis = 2")
}
if ret, err = Square(retVal); err != nil {
return
}
if retVal, err = assertDense(ret); err != nil {
return nil, errors.Wrapf(err, opFail, "Norm-0, axis=2")
}
if retVal, err = retVal.Sum(axes...); err != nil {
return
}
if ret, err = Sqrt(retVal); err != nil {
return
}
return assertDense(ret)
case ord.IsNuclear():
// svd norm
if retVal, err = t.multiSVDNorm(rowAxis, colAxis); err != nil {
return
}
return retVal.Sum(len(t.Shape()) - 1)
case ord == Norm(0):
err = errors.Errorf("Norm order 0 undefined for matrices")
return
default:
return nil, errors.Errorf("Not yet implemented: Norm for Axes %v, ord %v", axes, ord)
}
default:
err = errors.Errorf(dimMismatch, 2, len(axes))
return
}
panic("Unreachable")
}
tensor-0.9.24/dense_norms_test.go 0000664 0000000 0000000 00000015072 14265126151 0017032 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"math"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func testNormVal(T *Dense, ord NormOrder, want float64) error {
retVal, err := T.Norm(ord)
if err != nil {
err = errors.Wrap(err, "testNormVal")
return err
}
if !retVal.IsScalar() {
return errors.New("Expected Scalar")
}
got := retVal.ScalarValue().(float64)
if !closef64(want, got) && !(math.IsNaN(want) && alikef64(want, got)) {
return errors.New(fmt.Sprintf("Norm %v, Backing %v: Want %f, got %f instead", ord, T.Data(), want, got))
}
return nil
}
func TestTensor_Norm(t *testing.T) {
var T *Dense
var err error
var backing, backing1, backing2 []float64
var corrects map[NormOrder]float64
var wrongs []NormOrder
// empty
backing = make([]float64, 0)
T = New(WithBacking(backing))
//TODO
// vecktor
backing = []float64{1, 2, 3, 4}
backing1 = []float64{-1, -2, -3, -4}
backing2 = []float64{-1, 2, -3, 4}
corrects = map[NormOrder]float64{
UnorderedNorm(): math.Pow(30, 0.5), // Unordered
FrobeniusNorm(): math.NaN(), // Frobenius
NuclearNorm(): math.NaN(), // Nuclear
InfNorm(): 4, // Inf
NegInfNorm(): 1, // -Inf
Norm(0): 4, // 0
Norm(1): 10, // 1
Norm(-1): 12.0 / 25.0, // -1
Norm(2): math.Pow(30, 0.5), // 2
Norm(-2): math.Pow((205.0 / 144.0), -0.5), // -2
}
backings := [][]float64{backing, backing1, backing2}
for ord, want := range corrects {
for _, b := range backings {
T = New(WithShape(len(backing)), WithBacking(b))
if err = testNormVal(T, ord, want); err != nil {
t.Error(errors.Cause(err))
}
}
}
// 2x2 mat
backing = []float64{1, 3, 5, 7}
corrects = map[NormOrder]float64{
UnorderedNorm(): math.Pow(84, 0.5), // Unordered
FrobeniusNorm(): math.Pow(84, 0.5), // Frobenius
NuclearNorm(): 10, // Nuclear
InfNorm(): 12, // Inf
NegInfNorm(): 4, // -Inf
Norm(1): 10, // 1
Norm(-1): 6, // -1
Norm(2): 9.1231056256176615, // 2
Norm(-2): 0.87689437438234041, // -2
}
T = New(WithShape(2, 2), WithBacking(backing))
for ord, want := range corrects {
if err = testNormVal(T, ord, want); err != nil {
t.Errorf("ORD %v: %v", ord, err)
}
}
// impossible values
wrongs = []NormOrder{
Norm(-3),
Norm(0),
}
for _, ord := range wrongs {
if err = testNormVal(T, ord, math.NaN()); err == nil {
t.Errorf("Expected an error when finding norm of order %v", ord)
}
}
// 3x3 mat
// this test is added because the 2x2 example happens to have equal nuclear norm and induced 1-norm.
// the 1/10 scaling factor accommodates the absolute tolerance used.
backing = []float64{0.1, 0.2, 0.3, 0.6, 0, 0.5, 0.3, 0.2, 0.1}
corrects = map[NormOrder]float64{
FrobeniusNorm(): (1.0 / 10.0) * math.Pow(89, 0.5),
NuclearNorm(): 1.3366836911774836,
InfNorm(): 1.1,
NegInfNorm(): 0.6,
Norm(1): 1,
Norm(-1): 0.4,
Norm(2): 0.88722940323461277,
Norm(-2): 0.19456584790481812,
}
T = New(WithShape(3, 3), WithBacking(backing))
for ord, want := range corrects {
if err = testNormVal(T, ord, want); err != nil {
t.Error(err)
}
}
}
func TestTensor_Norm_Axis(t *testing.T) {
assert := assert.New(t)
var T, s, expected, retVal *Dense
var sliced Tensor
var err error
var backing []float64
var ords []NormOrder
t.Log("Vector Norm Tests: compare the use of axis with computing of each row or column separately")
ords = []NormOrder{
UnorderedNorm(),
InfNorm(),
NegInfNorm(),
Norm(-1),
Norm(0),
Norm(1),
Norm(2),
Norm(3),
}
backing = []float64{1, 2, 3, 4, 5, 6}
T = New(WithShape(2, 3), WithBacking(backing))
for _, ord := range ords {
var expecteds []*Dense
for k := 0; k < T.Shape()[1]; k++ {
sliced, _ = T.Slice(nil, ss(k))
s = sliced.(View).Materialize().(*Dense)
expected, _ = s.Norm(ord)
expecteds = append(expecteds, expected)
}
if retVal, err = T.Norm(ord, 0); err != nil {
t.Error(err)
continue
}
assert.Equal(len(expecteds), retVal.Shape()[0])
for i, e := range expecteds {
sliced, _ = retVal.Slice(ss(i))
sliced = sliced.(View).Materialize()
if !allClose(e.Data(), sliced.Data()) {
t.Errorf("Axis = 0; Ord = %v; Expected %v. Got %v instead. ret %v, i: %d", ord, e.Data(), sliced.Data(), retVal, i)
}
}
// reset and do axis = 1
expecteds = expecteds[:0]
for k := 0; k < T.Shape()[0]; k++ {
sliced, _ = T.Slice(ss(k))
s = sliced.(*Dense)
expected, _ = s.Norm(ord)
expecteds = append(expecteds, expected)
}
if retVal, err = T.Norm(ord, 1); err != nil {
t.Error(err)
continue
}
assert.Equal(len(expecteds), retVal.Shape()[0])
for i, e := range expecteds {
sliced, _ = retVal.Slice(ss(i))
sliced = sliced.(View).Materialize().(*Dense)
if !allClose(e.Data(), sliced.Data()) {
t.Errorf("Axis = 1; Ord = %v; Expected %v. Got %v instead", ord, e.Data(), sliced.Data())
}
}
}
t.Log("Matrix Norms")
ords = []NormOrder{
UnorderedNorm(),
FrobeniusNorm(),
InfNorm(),
NegInfNorm(),
Norm(-2),
Norm(-1),
Norm(1),
Norm(2),
}
axeses := [][]int{
{0, 0},
{0, 1},
{0, 2},
{1, 0},
{1, 1},
{1, 2},
{2, 0},
{2, 1},
{2, 2},
}
backing = Range(Float64, 1, 25).([]float64)
T = New(WithShape(2, 3, 4), WithBacking(backing))
dims := T.Dims()
for _, ord := range ords {
for _, axes := range axeses {
rowAxis := axes[0]
colAxis := axes[1]
if rowAxis < 0 {
rowAxis += dims
}
if colAxis < 0 {
colAxis += dims
}
if rowAxis == colAxis {
} else {
kthIndex := dims - (rowAxis + colAxis)
var expecteds []*Dense
for k := 0; k < T.Shape()[kthIndex]; k++ {
var slices []Slice
for s := 0; s < kthIndex; s++ {
slices = append(slices, nil)
}
slices = append(slices, ss(k))
sliced, _ = T.Slice(slices...)
if rowAxis > colAxis {
sliced.T()
}
sliced = sliced.(View).Materialize().(*Dense)
s = sliced.(*Dense)
expected, _ = s.Norm(ord)
expecteds = append(expecteds, expected)
}
if retVal, err = T.Norm(ord, rowAxis, colAxis); err != nil {
t.Error(err)
continue
}
for i, e := range expecteds {
sliced, _ = retVal.Slice(ss(i))
assert.Equal(e.Data(), sliced.Data(), "ord %v, rowAxis: %v, colAxis %v", ord, rowAxis, colAxis)
}
}
}
}
}
tensor-0.9.24/dense_reduction_methods.go 0000664 0000000 0000000 00000001630 14265126151 0020347 0 ustar 00root root 0000000 0000000 package tensor
import "github.com/pkg/errors"
func (t *Dense) Sum(along ...int) (retVal *Dense, err error) {
var e Engine = t.e
if sumer, ok := e.(Sumer); ok {
var ret Tensor
if ret, err = sumer.Sum(t, along...); err != nil {
return
}
return ret.(*Dense), nil
}
return nil, errors.Errorf("Engine does not support Sum")
}
func (t *Dense) Max(along ...int) (retVal *Dense, err error) {
var e Engine = t.e
if maxer, ok := e.(Maxer); ok {
var ret Tensor
if ret, err = maxer.Max(t, along...); err != nil {
return
}
return ret.(*Dense), nil
}
return nil, errors.Errorf("Engine does not support Max")
}
func (t *Dense) Min(along ...int) (retVal *Dense, err error) {
var e Engine = t.e
if miner, ok := e.(Miner); ok {
var ret Tensor
if ret, err = miner.Min(t, along...); err != nil {
return
}
return ret.(*Dense), nil
}
return nil, errors.Errorf("Engine does not support Min")
}
tensor-0.9.24/dense_reduction_test.go 0000664 0000000 0000000 00000105721 14265126151 0017671 0 ustar 00root root 0000000 0000000 // Code generated by genlib2. DO NOT EDIT.
package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
"gorgonia.org/tensor/internal/execution"
)
var denseReductionTests = []struct {
of Dtype
fn interface{}
def interface{}
axis int
correct interface{}
correctShape Shape
}{
// int
{Int, execution.AddI, int(0), 0, []int{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Int, execution.AddI, int(0), 1, []int{6, 9, 24, 27}, Shape{2, 2}},
{Int, execution.AddI, int(0), 2, []int{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// int8
{Int8, execution.AddI8, int8(0), 0, []int8{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Int8, execution.AddI8, int8(0), 1, []int8{6, 9, 24, 27}, Shape{2, 2}},
{Int8, execution.AddI8, int8(0), 2, []int8{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// int16
{Int16, execution.AddI16, int16(0), 0, []int16{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Int16, execution.AddI16, int16(0), 1, []int16{6, 9, 24, 27}, Shape{2, 2}},
{Int16, execution.AddI16, int16(0), 2, []int16{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// int32
{Int32, execution.AddI32, int32(0), 0, []int32{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Int32, execution.AddI32, int32(0), 1, []int32{6, 9, 24, 27}, Shape{2, 2}},
{Int32, execution.AddI32, int32(0), 2, []int32{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// int64
{Int64, execution.AddI64, int64(0), 0, []int64{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Int64, execution.AddI64, int64(0), 1, []int64{6, 9, 24, 27}, Shape{2, 2}},
{Int64, execution.AddI64, int64(0), 2, []int64{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// uint
{Uint, execution.AddU, uint(0), 0, []uint{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Uint, execution.AddU, uint(0), 1, []uint{6, 9, 24, 27}, Shape{2, 2}},
{Uint, execution.AddU, uint(0), 2, []uint{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// uint8
{Uint8, execution.AddU8, uint8(0), 0, []uint8{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Uint8, execution.AddU8, uint8(0), 1, []uint8{6, 9, 24, 27}, Shape{2, 2}},
{Uint8, execution.AddU8, uint8(0), 2, []uint8{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// uint16
{Uint16, execution.AddU16, uint16(0), 0, []uint16{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Uint16, execution.AddU16, uint16(0), 1, []uint16{6, 9, 24, 27}, Shape{2, 2}},
{Uint16, execution.AddU16, uint16(0), 2, []uint16{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// uint32
{Uint32, execution.AddU32, uint32(0), 0, []uint32{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Uint32, execution.AddU32, uint32(0), 1, []uint32{6, 9, 24, 27}, Shape{2, 2}},
{Uint32, execution.AddU32, uint32(0), 2, []uint32{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// uint64
{Uint64, execution.AddU64, uint64(0), 0, []uint64{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Uint64, execution.AddU64, uint64(0), 1, []uint64{6, 9, 24, 27}, Shape{2, 2}},
{Uint64, execution.AddU64, uint64(0), 2, []uint64{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// float32
{Float32, execution.AddF32, float32(0), 0, []float32{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Float32, execution.AddF32, float32(0), 1, []float32{6, 9, 24, 27}, Shape{2, 2}},
{Float32, execution.AddF32, float32(0), 2, []float32{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// float64
{Float64, execution.AddF64, float64(0), 0, []float64{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Float64, execution.AddF64, float64(0), 1, []float64{6, 9, 24, 27}, Shape{2, 2}},
{Float64, execution.AddF64, float64(0), 2, []float64{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// complex64
{Complex64, execution.AddC64, complex64(0), 0, []complex64{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Complex64, execution.AddC64, complex64(0), 1, []complex64{6, 9, 24, 27}, Shape{2, 2}},
{Complex64, execution.AddC64, complex64(0), 2, []complex64{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
// complex128
{Complex128, execution.AddC128, complex128(0), 0, []complex128{6, 8, 10, 12, 14, 16}, Shape{3, 2}},
{Complex128, execution.AddC128, complex128(0), 1, []complex128{6, 9, 24, 27}, Shape{2, 2}},
{Complex128, execution.AddC128, complex128(0), 2, []complex128{1, 5, 9, 13, 17, 21}, Shape{2, 3}},
}
func TestDense_Reduce(t *testing.T) {
assert := assert.New(t)
for _, drt := range denseReductionTests {
T := New(WithShape(2, 3, 2), WithBacking(Range(drt.of, 0, 2*3*2)))
T2, err := T.Reduce(drt.fn, drt.axis, drt.def)
if err != nil {
t.Error(err)
continue
}
assert.True(drt.correctShape.Eq(T2.Shape()))
assert.Equal(drt.correct, T2.Data())
// stupids:
_, err = T.Reduce(drt.fn, 1000, drt.def)
assert.NotNil(err)
// wrong function type
var f interface{}
f = func(a, b float64) float64 { return 0 }
if drt.of == Float64 {
f = func(a, b int) int { return 0 }
}
_, err = T.Reduce(f, 0, drt.correct)
assert.NotNil(err)
// wrong default value type
var def2 interface{}
def2 = 3.14
if drt.of == Float64 {
def2 = int(1)
}
_, err = T.Reduce(drt.fn, 3, def2) // only last axis requires a default value
assert.NotNil(err)
}
}
var sumTests = []struct {
name string
of Dtype
shape Shape
along []int
correctShape Shape
correct interface{}
}{
{"common case: T.Sum() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(15)},
{"A.Sum(0) for int", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{3, 5, 7}},
{"A.Sum(1) for int", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{3, 12}},
{"A.Sum(0,1) for int", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(15)},
{"A.Sum(1,0) for int", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(15)},
{"3T.Sum(1,2) for int", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{66, 210}},
{"4T.Sum() for int", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(120)},
{"4T.Sum(1,3) for int", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for int", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{44, 76}},
{"common case: T.Sum() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(15)},
{"A.Sum(0) for int8", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{3, 5, 7}},
{"A.Sum(1) for int8", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{3, 12}},
{"A.Sum(0,1) for int8", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(15)},
{"A.Sum(1,0) for int8", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(15)},
{"3T.Sum(1,2) for int8", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{66, -46}},
{"4T.Sum() for int8", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(120)},
{"4T.Sum(1,3) for int8", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for int8", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{44, 76}},
{"common case: T.Sum() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(15)},
{"A.Sum(0) for int16", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{3, 5, 7}},
{"A.Sum(1) for int16", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{3, 12}},
{"A.Sum(0,1) for int16", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(15)},
{"A.Sum(1,0) for int16", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(15)},
{"3T.Sum(1,2) for int16", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{66, 210}},
{"4T.Sum() for int16", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(120)},
{"4T.Sum(1,3) for int16", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for int16", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{44, 76}},
{"common case: T.Sum() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(15)},
{"A.Sum(0) for int32", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{3, 5, 7}},
{"A.Sum(1) for int32", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{3, 12}},
{"A.Sum(0,1) for int32", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(15)},
{"A.Sum(1,0) for int32", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(15)},
{"3T.Sum(1,2) for int32", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{66, 210}},
{"4T.Sum() for int32", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(120)},
{"4T.Sum(1,3) for int32", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for int32", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{44, 76}},
{"common case: T.Sum() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(15)},
{"A.Sum(0) for int64", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{3, 5, 7}},
{"A.Sum(1) for int64", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{3, 12}},
{"A.Sum(0,1) for int64", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(15)},
{"A.Sum(1,0) for int64", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(15)},
{"3T.Sum(1,2) for int64", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{66, 210}},
{"4T.Sum() for int64", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(120)},
{"4T.Sum(1,3) for int64", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for int64", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{44, 76}},
{"common case: T.Sum() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(15)},
{"A.Sum(0) for uint", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{3, 5, 7}},
{"A.Sum(1) for uint", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{3, 12}},
{"A.Sum(0,1) for uint", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(15)},
{"A.Sum(1,0) for uint", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(15)},
{"3T.Sum(1,2) for uint", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{66, 210}},
{"4T.Sum() for uint", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(120)},
{"4T.Sum(1,3) for uint", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for uint", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{44, 76}},
{"common case: T.Sum() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(15)},
{"A.Sum(0) for uint8", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{3, 5, 7}},
{"A.Sum(1) for uint8", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{3, 12}},
{"A.Sum(0,1) for uint8", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(15)},
{"A.Sum(1,0) for uint8", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(15)},
{"3T.Sum(1,2) for uint8", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{66, 210}},
{"4T.Sum() for uint8", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(120)},
{"4T.Sum(1,3) for uint8", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for uint8", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{44, 76}},
{"common case: T.Sum() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(15)},
{"A.Sum(0) for uint16", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{3, 5, 7}},
{"A.Sum(1) for uint16", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{3, 12}},
{"A.Sum(0,1) for uint16", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(15)},
{"A.Sum(1,0) for uint16", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(15)},
{"3T.Sum(1,2) for uint16", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{66, 210}},
{"4T.Sum() for uint16", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(120)},
{"4T.Sum(1,3) for uint16", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for uint16", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{44, 76}},
{"common case: T.Sum() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(15)},
{"A.Sum(0) for uint32", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{3, 5, 7}},
{"A.Sum(1) for uint32", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{3, 12}},
{"A.Sum(0,1) for uint32", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(15)},
{"A.Sum(1,0) for uint32", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(15)},
{"3T.Sum(1,2) for uint32", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{66, 210}},
{"4T.Sum() for uint32", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(120)},
{"4T.Sum(1,3) for uint32", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for uint32", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{44, 76}},
{"common case: T.Sum() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(15)},
{"A.Sum(0) for uint64", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{3, 5, 7}},
{"A.Sum(1) for uint64", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{3, 12}},
{"A.Sum(0,1) for uint64", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(15)},
{"A.Sum(1,0) for uint64", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(15)},
{"3T.Sum(1,2) for uint64", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{66, 210}},
{"4T.Sum() for uint64", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(120)},
{"4T.Sum(1,3) for uint64", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for uint64", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{44, 76}},
{"common case: T.Sum() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(15)},
{"A.Sum(0) for float32", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{3, 5, 7}},
{"A.Sum(1) for float32", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{3, 12}},
{"A.Sum(0,1) for float32", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(15)},
{"A.Sum(1,0) for float32", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(15)},
{"3T.Sum(1,2) for float32", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{66, 210}},
{"4T.Sum() for float32", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(120)},
{"4T.Sum(1,3) for float32", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for float32", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{44, 76}},
{"common case: T.Sum() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(15)},
{"A.Sum(0) for float64", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{3, 5, 7}},
{"A.Sum(1) for float64", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{3, 12}},
{"A.Sum(0,1) for float64", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(15)},
{"A.Sum(1,0) for float64", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(15)},
{"3T.Sum(1,2) for float64", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{66, 210}},
{"4T.Sum() for float64", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(120)},
{"4T.Sum(1,3) for float64", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for float64", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{44, 76}},
{"common case: T.Sum() for complex64", Complex64, Shape{2, 3}, []int{}, ScalarShape(), complex64(15)},
{"A.Sum(0) for complex64", Complex64, Shape{2, 3}, []int{0}, Shape{3}, []complex64{3, 5, 7}},
{"A.Sum(1) for complex64", Complex64, Shape{2, 3}, []int{1}, Shape{2}, []complex64{3, 12}},
{"A.Sum(0,1) for complex64", Complex64, Shape{2, 3}, []int{0, 1}, ScalarShape(), complex64(15)},
{"A.Sum(1,0) for complex64", Complex64, Shape{2, 3}, []int{1, 0}, ScalarShape(), complex64(15)},
{"3T.Sum(1,2) for complex64", Complex64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []complex64{66, 210}},
{"4T.Sum() for complex64", Complex64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), complex64(120)},
{"4T.Sum(1,3) for complex64", Complex64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []complex64{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for complex64", Complex64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []complex64{44, 76}},
{"common case: T.Sum() for complex128", Complex128, Shape{2, 3}, []int{}, ScalarShape(), complex128(15)},
{"A.Sum(0) for complex128", Complex128, Shape{2, 3}, []int{0}, Shape{3}, []complex128{3, 5, 7}},
{"A.Sum(1) for complex128", Complex128, Shape{2, 3}, []int{1}, Shape{2}, []complex128{3, 12}},
{"A.Sum(0,1) for complex128", Complex128, Shape{2, 3}, []int{0, 1}, ScalarShape(), complex128(15)},
{"A.Sum(1,0) for complex128", Complex128, Shape{2, 3}, []int{1, 0}, ScalarShape(), complex128(15)},
{"3T.Sum(1,2) for complex128", Complex128, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []complex128{66, 210}},
{"4T.Sum() for complex128", Complex128, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), complex128(120)},
{"4T.Sum(1,3) for complex128", Complex128, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []complex128{10, 18, 42, 50}},
{"4T.Sum(0, 2, 3) for complex128", Complex128, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []complex128{44, 76}},
}
func TestDense_Sum(t *testing.T) {
assert := assert.New(t)
var T, T2 *Dense
var err error
for _, sts := range sumTests {
T = New(WithShape(sts.shape...), WithBacking(Range(sts.of, 0, sts.shape.TotalSize())))
if T2, err = T.Sum(sts.along...); err != nil {
t.Error(err)
continue
}
assert.True(sts.correctShape.Eq(T2.Shape()))
assert.Equal(sts.correct, T2.Data())
}
// idiots
_, err = T.Sum(1000)
assert.NotNil(err)
}
var maxTests = []struct {
name string
of Dtype
shape Shape
along []int
correctShape Shape
correct interface{}
}{
{"common case: T.Max() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(5)},
{"A.Max(0)", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{3, 4, 5}},
{"A.Max(1)", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{2, 5}},
{"A.Max(0,1)", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(5)},
{"A.Max(1,0)", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(5)},
{"3T.Max(1,2)", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{11, 23}},
{"4T.Max()", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(15)},
{"4T.Max(1,3)", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{11, 15}},
{"common case: T.Max() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(5)},
{"A.Max(0)", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{3, 4, 5}},
{"A.Max(1)", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{2, 5}},
{"A.Max(0,1)", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(5)},
{"A.Max(1,0)", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(5)},
{"3T.Max(1,2)", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{11, 23}},
{"4T.Max()", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(15)},
{"4T.Max(1,3)", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{11, 15}},
{"common case: T.Max() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(5)},
{"A.Max(0)", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{3, 4, 5}},
{"A.Max(1)", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{2, 5}},
{"A.Max(0,1)", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(5)},
{"A.Max(1,0)", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(5)},
{"3T.Max(1,2)", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{11, 23}},
{"4T.Max()", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(15)},
{"4T.Max(1,3)", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{11, 15}},
{"common case: T.Max() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(5)},
{"A.Max(0)", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{3, 4, 5}},
{"A.Max(1)", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{2, 5}},
{"A.Max(0,1)", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(5)},
{"A.Max(1,0)", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(5)},
{"3T.Max(1,2)", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{11, 23}},
{"4T.Max()", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(15)},
{"4T.Max(1,3)", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{11, 15}},
{"common case: T.Max() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(5)},
{"A.Max(0)", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{3, 4, 5}},
{"A.Max(1)", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{2, 5}},
{"A.Max(0,1)", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(5)},
{"A.Max(1,0)", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(5)},
{"3T.Max(1,2)", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{11, 23}},
{"4T.Max()", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(15)},
{"4T.Max(1,3)", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{11, 15}},
{"common case: T.Max() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(5)},
{"A.Max(0)", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{3, 4, 5}},
{"A.Max(1)", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{2, 5}},
{"A.Max(0,1)", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(5)},
{"A.Max(1,0)", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(5)},
{"3T.Max(1,2)", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{11, 23}},
{"4T.Max()", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(15)},
{"4T.Max(1,3)", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{11, 15}},
{"common case: T.Max() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(5)},
{"A.Max(0)", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{3, 4, 5}},
{"A.Max(1)", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{2, 5}},
{"A.Max(0,1)", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(5)},
{"A.Max(1,0)", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(5)},
{"3T.Max(1,2)", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{11, 23}},
{"4T.Max()", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(15)},
{"4T.Max(1,3)", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{11, 15}},
{"common case: T.Max() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(5)},
{"A.Max(0)", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{3, 4, 5}},
{"A.Max(1)", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{2, 5}},
{"A.Max(0,1)", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(5)},
{"A.Max(1,0)", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(5)},
{"3T.Max(1,2)", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{11, 23}},
{"4T.Max()", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(15)},
{"4T.Max(1,3)", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{11, 15}},
{"common case: T.Max() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(5)},
{"A.Max(0)", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{3, 4, 5}},
{"A.Max(1)", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{2, 5}},
{"A.Max(0,1)", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(5)},
{"A.Max(1,0)", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(5)},
{"3T.Max(1,2)", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{11, 23}},
{"4T.Max()", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(15)},
{"4T.Max(1,3)", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{11, 15}},
{"common case: T.Max() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(5)},
{"A.Max(0)", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{3, 4, 5}},
{"A.Max(1)", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{2, 5}},
{"A.Max(0,1)", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(5)},
{"A.Max(1,0)", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(5)},
{"3T.Max(1,2)", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{11, 23}},
{"4T.Max()", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(15)},
{"4T.Max(1,3)", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{11, 15}},
{"common case: T.Max() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(5)},
{"A.Max(0)", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{3, 4, 5}},
{"A.Max(1)", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{2, 5}},
{"A.Max(0,1)", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(5)},
{"A.Max(1,0)", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(5)},
{"3T.Max(1,2)", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{11, 23}},
{"4T.Max()", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(15)},
{"4T.Max(1,3)", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{11, 15}},
{"common case: T.Max() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(5)},
{"A.Max(0)", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{3, 4, 5}},
{"A.Max(1)", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{2, 5}},
{"A.Max(0,1)", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(5)},
{"A.Max(1,0)", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(5)},
{"3T.Max(1,2)", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{11, 23}},
{"4T.Max()", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(15)},
{"4T.Max(1,3)", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{5, 7, 13, 15}},
{"4T.Max(0, 2, 3)", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{11, 15}},
}
func TestDense_Max(t *testing.T) {
assert := assert.New(t)
var T, T2 *Dense
var err error
for _, mts := range maxTests {
T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize())))
if T2, err = T.Max(mts.along...); err != nil {
t.Error(err)
continue
}
assert.True(mts.correctShape.Eq(T2.Shape()))
assert.Equal(mts.correct, T2.Data())
}
/* IDIOT TESTING TIME */
_, err = T.Max(1000)
assert.NotNil(err)
}
var minTests = []struct {
name string
of Dtype
shape Shape
along []int
correctShape Shape
correct interface{}
}{
{"common case: T.Min() for int", Int, Shape{2, 3}, []int{}, ScalarShape(), int(0)},
{"A.Min(0)", Int, Shape{2, 3}, []int{0}, Shape{3}, []int{0, 1, 2}},
{"A.Min(1)", Int, Shape{2, 3}, []int{1}, Shape{2}, []int{0, 3}},
{"A.Min(0,1)", Int, Shape{2, 3}, []int{0, 1}, ScalarShape(), int(0)},
{"A.Min(1,0)", Int, Shape{2, 3}, []int{1, 0}, ScalarShape(), int(0)},
{"3T.Min(1,2)", Int, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int{0, 12}},
{"4T.Min()", Int, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int(0)},
{"4T.Min(1,3)", Int, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Int, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int{0, 4}},
{"common case: T.Min() for int8", Int8, Shape{2, 3}, []int{}, ScalarShape(), int8(0)},
{"A.Min(0)", Int8, Shape{2, 3}, []int{0}, Shape{3}, []int8{0, 1, 2}},
{"A.Min(1)", Int8, Shape{2, 3}, []int{1}, Shape{2}, []int8{0, 3}},
{"A.Min(0,1)", Int8, Shape{2, 3}, []int{0, 1}, ScalarShape(), int8(0)},
{"A.Min(1,0)", Int8, Shape{2, 3}, []int{1, 0}, ScalarShape(), int8(0)},
{"3T.Min(1,2)", Int8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int8{0, 12}},
{"4T.Min()", Int8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int8(0)},
{"4T.Min(1,3)", Int8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int8{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Int8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int8{0, 4}},
{"common case: T.Min() for int16", Int16, Shape{2, 3}, []int{}, ScalarShape(), int16(0)},
{"A.Min(0)", Int16, Shape{2, 3}, []int{0}, Shape{3}, []int16{0, 1, 2}},
{"A.Min(1)", Int16, Shape{2, 3}, []int{1}, Shape{2}, []int16{0, 3}},
{"A.Min(0,1)", Int16, Shape{2, 3}, []int{0, 1}, ScalarShape(), int16(0)},
{"A.Min(1,0)", Int16, Shape{2, 3}, []int{1, 0}, ScalarShape(), int16(0)},
{"3T.Min(1,2)", Int16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int16{0, 12}},
{"4T.Min()", Int16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int16(0)},
{"4T.Min(1,3)", Int16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int16{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Int16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int16{0, 4}},
{"common case: T.Min() for int32", Int32, Shape{2, 3}, []int{}, ScalarShape(), int32(0)},
{"A.Min(0)", Int32, Shape{2, 3}, []int{0}, Shape{3}, []int32{0, 1, 2}},
{"A.Min(1)", Int32, Shape{2, 3}, []int{1}, Shape{2}, []int32{0, 3}},
{"A.Min(0,1)", Int32, Shape{2, 3}, []int{0, 1}, ScalarShape(), int32(0)},
{"A.Min(1,0)", Int32, Shape{2, 3}, []int{1, 0}, ScalarShape(), int32(0)},
{"3T.Min(1,2)", Int32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int32{0, 12}},
{"4T.Min()", Int32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int32(0)},
{"4T.Min(1,3)", Int32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int32{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Int32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int32{0, 4}},
{"common case: T.Min() for int64", Int64, Shape{2, 3}, []int{}, ScalarShape(), int64(0)},
{"A.Min(0)", Int64, Shape{2, 3}, []int{0}, Shape{3}, []int64{0, 1, 2}},
{"A.Min(1)", Int64, Shape{2, 3}, []int{1}, Shape{2}, []int64{0, 3}},
{"A.Min(0,1)", Int64, Shape{2, 3}, []int{0, 1}, ScalarShape(), int64(0)},
{"A.Min(1,0)", Int64, Shape{2, 3}, []int{1, 0}, ScalarShape(), int64(0)},
{"3T.Min(1,2)", Int64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []int64{0, 12}},
{"4T.Min()", Int64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), int64(0)},
{"4T.Min(1,3)", Int64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []int64{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Int64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []int64{0, 4}},
{"common case: T.Min() for uint", Uint, Shape{2, 3}, []int{}, ScalarShape(), uint(0)},
{"A.Min(0)", Uint, Shape{2, 3}, []int{0}, Shape{3}, []uint{0, 1, 2}},
{"A.Min(1)", Uint, Shape{2, 3}, []int{1}, Shape{2}, []uint{0, 3}},
{"A.Min(0,1)", Uint, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint(0)},
{"A.Min(1,0)", Uint, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint(0)},
{"3T.Min(1,2)", Uint, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint{0, 12}},
{"4T.Min()", Uint, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint(0)},
{"4T.Min(1,3)", Uint, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Uint, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint{0, 4}},
{"common case: T.Min() for uint8", Uint8, Shape{2, 3}, []int{}, ScalarShape(), uint8(0)},
{"A.Min(0)", Uint8, Shape{2, 3}, []int{0}, Shape{3}, []uint8{0, 1, 2}},
{"A.Min(1)", Uint8, Shape{2, 3}, []int{1}, Shape{2}, []uint8{0, 3}},
{"A.Min(0,1)", Uint8, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint8(0)},
{"A.Min(1,0)", Uint8, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint8(0)},
{"3T.Min(1,2)", Uint8, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint8{0, 12}},
{"4T.Min()", Uint8, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint8(0)},
{"4T.Min(1,3)", Uint8, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint8{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Uint8, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint8{0, 4}},
{"common case: T.Min() for uint16", Uint16, Shape{2, 3}, []int{}, ScalarShape(), uint16(0)},
{"A.Min(0)", Uint16, Shape{2, 3}, []int{0}, Shape{3}, []uint16{0, 1, 2}},
{"A.Min(1)", Uint16, Shape{2, 3}, []int{1}, Shape{2}, []uint16{0, 3}},
{"A.Min(0,1)", Uint16, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint16(0)},
{"A.Min(1,0)", Uint16, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint16(0)},
{"3T.Min(1,2)", Uint16, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint16{0, 12}},
{"4T.Min()", Uint16, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint16(0)},
{"4T.Min(1,3)", Uint16, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint16{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Uint16, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint16{0, 4}},
{"common case: T.Min() for uint32", Uint32, Shape{2, 3}, []int{}, ScalarShape(), uint32(0)},
{"A.Min(0)", Uint32, Shape{2, 3}, []int{0}, Shape{3}, []uint32{0, 1, 2}},
{"A.Min(1)", Uint32, Shape{2, 3}, []int{1}, Shape{2}, []uint32{0, 3}},
{"A.Min(0,1)", Uint32, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint32(0)},
{"A.Min(1,0)", Uint32, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint32(0)},
{"3T.Min(1,2)", Uint32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint32{0, 12}},
{"4T.Min()", Uint32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint32(0)},
{"4T.Min(1,3)", Uint32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint32{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Uint32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint32{0, 4}},
{"common case: T.Min() for uint64", Uint64, Shape{2, 3}, []int{}, ScalarShape(), uint64(0)},
{"A.Min(0)", Uint64, Shape{2, 3}, []int{0}, Shape{3}, []uint64{0, 1, 2}},
{"A.Min(1)", Uint64, Shape{2, 3}, []int{1}, Shape{2}, []uint64{0, 3}},
{"A.Min(0,1)", Uint64, Shape{2, 3}, []int{0, 1}, ScalarShape(), uint64(0)},
{"A.Min(1,0)", Uint64, Shape{2, 3}, []int{1, 0}, ScalarShape(), uint64(0)},
{"3T.Min(1,2)", Uint64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []uint64{0, 12}},
{"4T.Min()", Uint64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), uint64(0)},
{"4T.Min(1,3)", Uint64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []uint64{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Uint64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []uint64{0, 4}},
{"common case: T.Min() for float32", Float32, Shape{2, 3}, []int{}, ScalarShape(), float32(0)},
{"A.Min(0)", Float32, Shape{2, 3}, []int{0}, Shape{3}, []float32{0, 1, 2}},
{"A.Min(1)", Float32, Shape{2, 3}, []int{1}, Shape{2}, []float32{0, 3}},
{"A.Min(0,1)", Float32, Shape{2, 3}, []int{0, 1}, ScalarShape(), float32(0)},
{"A.Min(1,0)", Float32, Shape{2, 3}, []int{1, 0}, ScalarShape(), float32(0)},
{"3T.Min(1,2)", Float32, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float32{0, 12}},
{"4T.Min()", Float32, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float32(0)},
{"4T.Min(1,3)", Float32, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float32{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Float32, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float32{0, 4}},
{"common case: T.Min() for float64", Float64, Shape{2, 3}, []int{}, ScalarShape(), float64(0)},
{"A.Min(0)", Float64, Shape{2, 3}, []int{0}, Shape{3}, []float64{0, 1, 2}},
{"A.Min(1)", Float64, Shape{2, 3}, []int{1}, Shape{2}, []float64{0, 3}},
{"A.Min(0,1)", Float64, Shape{2, 3}, []int{0, 1}, ScalarShape(), float64(0)},
{"A.Min(1,0)", Float64, Shape{2, 3}, []int{1, 0}, ScalarShape(), float64(0)},
{"3T.Min(1,2)", Float64, Shape{2, 3, 4}, []int{1, 2}, Shape{2}, []float64{0, 12}},
{"4T.Min()", Float64, Shape{2, 2, 2, 2}, []int{}, ScalarShape(), float64(0)},
{"4T.Min(1,3)", Float64, Shape{2, 2, 2, 2}, []int{1, 3}, Shape{2, 2}, []float64{0, 2, 8, 10}},
{"4T.Min(0, 2, 3)", Float64, Shape{2, 2, 2, 2}, []int{0, 2, 3}, Shape{2}, []float64{0, 4}},
}
func TestDense_Min(t *testing.T) {
assert := assert.New(t)
var T, T2 *Dense
var err error
for _, mts := range minTests {
T = New(WithShape(mts.shape...), WithBacking(Range(mts.of, 0, mts.shape.TotalSize())))
if T2, err = T.Min(mts.along...); err != nil {
t.Error(err)
continue
}
assert.True(mts.correctShape.Eq(T2.Shape()))
assert.Equal(mts.correct, T2.Data())
}
/* IDIOT TESTING TIME */
_, err = T.Min(1000)
assert.NotNil(err)
}
tensor-0.9.24/dense_selbyidx_test.go 0000664 0000000 0000000 00000010565 14265126151 0017521 0 ustar 00root root 0000000 0000000 package tensor
import (
"testing"
"github.com/stretchr/testify/assert"
)
type selByIndicesTest struct {
Name string
Data interface{}
Shape Shape
Indices []int
Axis int
WillErr bool
Correct interface{}
CorrectShape Shape
}
var selByIndicesTests = []selByIndicesTest{
{Name: "Basic", Data: Range(Float64, 0, 4), Shape: Shape{2, 2}, Indices: []int{0, 1}, Axis: 0, WillErr: false,
Correct: []float64{0, 1, 2, 3}, CorrectShape: Shape{2, 2},
},
{Name: "3-tensor, axis 0", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 0, WillErr: false,
Correct: []float64{8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15}, CorrectShape: Shape{2, 2, 4}},
{Name: "3-tensor, axis 1", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 1, WillErr: false,
Correct: []float64{4, 5, 6, 7, 4, 5, 6, 7, 12, 13, 14, 15, 12, 13, 14, 15, 20, 21, 22, 23, 20, 21, 22, 23}, CorrectShape: Shape{3, 2, 4}},
{Name: "3-tensor, axis 2", Data: Range(Float64, 0, 24), Shape: Shape{3, 2, 4}, Indices: []int{1, 1}, Axis: 2, WillErr: false,
Correct: []float64{1, 1, 5, 5, 9, 9, 13, 13, 17, 17, 21, 21}, CorrectShape: Shape{3, 2, 2}},
{Name: "Vector, axis 0", Data: Range(Int, 0, 5), Shape: Shape{5}, Indices: []int{1, 1}, Axis: 0, WillErr: false,
Correct: []int{1, 1}, CorrectShape: Shape{2}},
{Name: "Vector, axis 1", Data: Range(Int, 0, 5), Shape: Shape{5}, Indices: []int{1, 1}, Axis: 1, WillErr: true,
Correct: []int{1, 1}, CorrectShape: Shape{2}},
{Name: "(4,2) Matrix, with (10) indices", Data: Range(Float32, 0, 8), Shape: Shape{4, 2}, Indices: []int{1, 1, 1, 1, 0, 2, 2, 2, 2, 0}, Axis: 0, WillErr: false,
Correct: []float32{2, 3, 2, 3, 2, 3, 2, 3, 0, 1, 4, 5, 4, 5, 4, 5, 4, 5, 0, 1}, CorrectShape: Shape{10, 2}},
{Name: "(2,1) Matrx (colvec) with (10) indices", Data: Range(Float64, 0, 2), Shape: Shape{2, 1}, Indices: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, Axis: 0, WillErr: false,
Correct: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, CorrectShape: Shape{10},
},
}
func TestDense_SelectByIndices(t *testing.T) {
assert := assert.New(t)
for i, tc := range selByIndicesTests {
T := New(WithShape(tc.Shape...), WithBacking(tc.Data))
indices := New(WithBacking(tc.Indices))
ret, err := ByIndices(T, indices, tc.Axis)
if checkErr(t, tc.WillErr, err, tc.Name, i) {
continue
}
assert.Equal(tc.Correct, ret.Data())
assert.True(tc.CorrectShape.Eq(ret.Shape()))
}
}
var selByIndicesBTests = []struct {
selByIndicesTest
CorrectGrad interface{}
CorrectGradShape Shape
}{
// Basic
{
CorrectGrad: []float64{1, 1, 1, 1},
},
// 3-tensor, axis 0
{
CorrectGrad: []float64{0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 3-tensor, axis 1
{
CorrectGrad: []float64{0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2},
},
// 3-tensor, axis 2
{
CorrectGrad: []float64{0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0},
},
// vector, axis 0
{
CorrectGrad: []int{0, 2, 0, 0, 0},
},
// vector, axis 1
{
CorrectGrad: []float32{4, 6, 8, 12, 8, 12, 0, 0},
},
// (4,2) Matrix with (10) indices
{
CorrectGrad: []float32{2, 2, 4, 4, 4, 4, 0, 0},
},
// (2, 1) Matrix (colvec) with (10) indices
{
CorrectGrad: []float64{0, 10},
},
}
func init() {
for i := range selByIndicesBTests {
selByIndicesBTests[i].selByIndicesTest = selByIndicesTests[i]
selByIndicesBTests[i].CorrectGradShape = selByIndicesTests[i].Shape
}
}
func TestDense_SelectByIndicesB(t *testing.T) {
assert := assert.New(t)
for i, tc := range selByIndicesBTests {
T := New(WithShape(tc.Shape...), WithBacking(tc.Data))
indices := New(WithBacking(tc.Indices))
ret, err := ByIndices(T, indices, tc.Axis)
if checkErr(t, tc.WillErr, err, tc.Name, i) {
continue
}
outGrad := ret.Clone().(*Dense)
switch outGrad.Dtype() {
case Float64:
outGrad.Memset(1.0)
case Float32:
outGrad.Memset(float32(1.0))
}
grad, err := ByIndicesB(T, outGrad, indices, tc.Axis)
if checkErr(t, tc.WillErr, err, tc.Name, i) {
continue
}
assert.Equal(tc.CorrectGrad, grad.Data(), "%v - x:\n%v\nindices:\n%#v\ny:\n%#v\ngrad:\n%v", tc.Name, T, indices, ret, grad)
assert.True(tc.CorrectGradShape.Eq(grad.Shape()), "%v - Grad shape should be %v. Got %v instead.\n\nx:\n%v\nindices:\n%#v\ny:\n%#v\ngrad:\n%v", tc.Name, tc.CorrectGradShape, grad.Shape(), T, indices, ret, grad)
}
}
tensor-0.9.24/dense_softmax_test.go 0000664 0000000 0000000 00000031332 14265126151 0017352 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSoftMax(t *testing.T) {
testCases := []struct {
fn func(x Tensor, axis int, opts ...FuncOpt) (Tensor, error)
x Tensor
axis int
expectedOutput interface{}
}{
{
fn: LogSoftMax,
x: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float64{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628},
},
{
fn: LogSoftMax,
x: New(
Of(Float32),
WithShape(3, 4),
WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float32{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628},
},
{
fn: LogSoftMax,
x: New(
Of(Float32),
WithShape(3, 2, 2),
WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float32{-0.7443967, -0.64439666, -0.7443967, -0.64439666, -0.7443967, -0.64439666, -0.7443966, -0.64439666, -0.7443966, -0.64439666, -0.7443967, -0.64439666},
},
{
fn: LogSoftMax,
x: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: 1,
expectedOutput: []float64{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918},
},
{
fn: SoftMax,
x: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: 1,
expectedOutput: []float64{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478},
},
{
fn: SoftMax,
x: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float64{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894},
},
{
fn: SoftMax,
x: New(
Of(Float32),
WithShape(3, 4),
WithBacking([]float32{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float32{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514},
},
{
fn: SoftMax,
x: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1}),
),
axis: -1,
expectedOutput: []float64{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514},
},
}
for i, tC := range testCases {
t.Run(fmt.Sprintf("Example #%d - %v %v", i+1, tC.x.Shape(), tC.x.Dtype()), func(t *testing.T) {
c := assert.New(t)
output, err := tC.fn(tC.x, tC.axis)
t.Logf("output: %#v", output.Data())
c.NoError(err)
c.NotNil(output)
c.Equal(tC.x.Shape(), output.Shape())
c.InDeltaSlice(tC.expectedOutput, output.Data(), 1e-6)
})
}
}
func TestSoftMaxB(t *testing.T) {
testCases := []struct {
fn func(output, grad Tensor, axis int, opts ...FuncOpt) (Tensor, error)
output Tensor
grad Tensor
axis int
expectedOutput interface{}
}{
{
fn: SoftMaxB,
output: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}),
),
grad: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float64{-0.003474116568224552, -0.0014762147035963322, 0.0009803563066858392, 0.00396997522759976, -0.003474116880376028, -0.001476214931490494, 0.0009803561238580223, 0.003969975025543781, -0.0034741159267098936, -0.0014762139946130218, 0.0009803570151630109, 0.003969976093553957},
},
{
fn: LogSoftMaxB,
output: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}),
),
grad: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float64{-0.011383822036598441, -0.003632778232153768, 0.0038817407844924366, 0.01113485948425977, -0.005597937295155945, -0.001445223403599799, 0.0020925260396803457, 0.004950634659075405, 0.00018794744628654992, 0.0007423314249541871, 0.00030331129486827163, -0.0012335901661089598},
},
{
fn: SoftMaxB,
output: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894}),
),
grad: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float64{-0.002493760401928919, 0.0024937604019289205, -0.0024937604019289183, 0.002493760401928922, -0.002493760401928915, 0.002493760401928922, -0.002493760401928912, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289183},
},
{
fn: SoftMaxB,
output: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478}),
),
grad: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: 1,
expectedOutput: []float64{-0.004950331454237199, -0.004950331454237198, 0.004950331454237199, 0.0049503314542372, -0.004950331454237196, -0.004950331454237193, 0.004950331454237203, 0.0049503314542372065, -0.004950331454237193, -0.0049503314542372, 0.0049503314542372065, 0.004950331454237193},
},
{
fn: LogSoftMaxB,
output: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918}),
),
grad: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float64{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: 1,
expectedOutput: []float64{-0.008006640107500884, -0.007009960161251325, 0.00800664010750088, 0.007009960161251332, -0.004019920322502654, -0.003023240376253103, 0.004019920322502661, 0.0030232403762530968, -3.32005375044292e-05, 0.0009634794087451421, 3.320053750442642e-05, -0.0009634794087451543},
},
{
fn: LogSoftMaxB,
output: New(
Of(Float32),
WithShape(3, 2, 2),
WithBacking([]float32{-0.7981388693815918, -0.7981388693815918, -0.5981388693815918, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815919, -0.7981388693815918, -0.7981388693815918, -0.5981388693815919, -0.5981388693815918}),
),
grad: New(
Of(Float32),
WithShape(3, 2, 2),
WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: 1,
expectedOutput: []float64{-0.008006640107500884, -0.007009960161251325, 0.00800664010750088, 0.007009960161251332, -0.004019920322502654, -0.003023240376253103, 0.004019920322502661, 0.0030232403762530968, -3.32005375044292e-05, 0.0009634794087451421, 3.320053750442642e-05, -0.0009634794087451543},
},
{
fn: SoftMaxB,
output: New(
Of(Float32),
WithShape(3, 2, 2),
WithBacking([]float32{0.4501660026875221, 0.45016600268752205, 0.549833997312478, 0.5498339973124778, 0.45016600268752205, 0.45016600268752205, 0.5498339973124778, 0.5498339973124778, 0.45016600268752205, 0.4501660026875221, 0.5498339973124778, 0.549833997312478}),
),
grad: New(
Of(Float32),
WithShape(3, 2, 2),
WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: 1,
expectedOutput: []float32{-0.004950331454237199, -0.004950331454237198, 0.004950331454237199, 0.0049503314542372, -0.004950331454237196, -0.004950331454237193, 0.004950331454237203, 0.0049503314542372065, -0.004950331454237193, -0.0049503314542372, 0.0049503314542372065, 0.004950331454237193},
},
{
fn: SoftMaxB,
output: New(
Of(Float32),
WithShape(3, 4),
WithBacking([]float32{0.21383822, 0.23632777, 0.2611826, 0.2886514, 0.21383823, 0.23632778, 0.2611826, 0.2886514, 0.21383822, 0.23632777, 0.26118258, 0.2886514}),
),
grad: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float32{-0.003474116568224552, -0.0014762147035963322, 0.0009803563066858392, 0.00396997522759976, -0.003474116880376028, -0.001476214931490494, 0.0009803561238580223, 0.003969975025543781, -0.0034741159267098936, -0.0014762139946130218, 0.0009803570151630109, 0.003969976093553957},
},
{
fn: LogSoftMaxB,
output: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float32{-1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551626, -1.2425355294551628, -1.5425355294551628, -1.4425355294551627, -1.3425355294551629, -1.2425355294551628}),
),
grad: New(
Of(Float64),
WithShape(3, 4),
WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float32{-0.011383822036598441, -0.003632778232153768, 0.0038817407844924366, 0.01113485948425977, -0.005597937295155945, -0.001445223403599799, 0.0020925260396803457, 0.004950634659075405, 0.00018794744628654992, 0.0007423314249541871, 0.00030331129486827163, -0.0012335901661089598},
},
{
fn: SoftMaxB,
output: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float32{0.47502081252106, 0.52497918747894, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.5249791874789399, 0.47502081252106, 0.52497918747894}),
),
grad: New(
Of(Float64),
WithShape(3, 2, 2),
WithBacking([]float32{0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12}),
),
axis: -1,
expectedOutput: []float32{-0.002493760401928919, 0.0024937604019289205, -0.0024937604019289183, 0.002493760401928922, -0.002493760401928915, 0.002493760401928922, -0.002493760401928912, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289253, -0.0024937604019289183, 0.0024937604019289183},
},
}
for i, tC := range testCases {
t.Run(fmt.Sprintf("Example #%d - %v %v", i+1, tC.output.Shape(), tC.output.Dtype()), func(t *testing.T) {
c := assert.New(t)
dx, err := tC.fn(tC.output, tC.grad, tC.axis)
t.Logf("output: %#v", tC.output.Data())
c.NoError(err)
c.NotNil(dx)
c.Equal(tC.output.Shape(), dx.Shape())
c.InDeltaSlice(tC.expectedOutput, dx.Data(), 1e-6)
})
}
}
tensor-0.9.24/dense_svd_test.go 0000664 0000000 0000000 00000013466 14265126151 0016475 0 ustar 00root root 0000000 0000000 package tensor
import (
"fmt"
"testing"
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
)
// tests for SVD adapted from Gonum's SVD tests.
// Gonum's licence is listed at https://gonum.org/v1/gonum/license
var svdtestsThin = []struct {
data []float64
shape Shape
correctSData []float64
correctSShape Shape
correctUData []float64
correctUShape Shape
correctVData []float64
correctVShape Shape
}{
{
[]float64{2, 4, 1, 3, 0, 0, 0, 0}, Shape{4, 2},
[]float64{5.464985704219041, 0.365966190626258}, Shape{2},
[]float64{-0.8174155604703632, -0.5760484367663209, -0.5760484367663209, 0.8174155604703633, 0, 0, 0, 0}, Shape{4, 2},
[]float64{-0.4045535848337571, -0.9145142956773044, -0.9145142956773044, 0.4045535848337571}, Shape{2, 2},
},
{
[]float64{1, 1, 0, 1, 0, 0, 0, 0, 0, 11, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 12, 2, 1, 1, 0, 0, 0, 0, 0, 0, 1, 13, 3}, Shape{3, 11},
[]float64{21.259500881097434, 1.5415021616856566, 1.2873979074613628}, Shape{3},
[]float64{-0.5224167862273765, 0.7864430360363114, 0.3295270133658976, -0.5739526766688285, -0.03852203026050301, -0.8179818935216693, -0.6306021141833781, -0.6164603833618163, 0.4715056408282468}, Shape{3, 3},
[]float64{
-0.08123293141915189, 0.08528085505260324, -0.013165501690885152,
-0.05423546426886932, 0.1102707844980355, 0.622210623111631,
0, 0, 0,
-0.0245733326078166, 0.510179651760153, 0.25596360803140994,
0, 0, 0,
0, 0, 0,
-0.026997467150282436, -0.024989929445430496, -0.6353761248025164,
0, 0, 0,
-0.029662131661052707, -0.3999088672621176, 0.3662470150802212,
-0.9798839760830571, 0.11328174160898856, -0.047702613241813366,
-0.16755466189153964, -0.7395268089170608, 0.08395240366704032}, Shape{11, 3},
},
}
var svdtestsFull = []Shape{
{5, 5},
{5, 3},
{3, 5},
{150, 150},
{200, 150},
{150, 200},
}
// calculate corrects
func calcSigma(s, T *Dense, shape Shape) (sigma *Dense, err error) {
sigma = New(Of(Float64), WithShape(shape...))
for i := 0; i < MinInt(shape[0], shape[1]); i++ {
var idx int
if idx, err = Ltoi(sigma.Shape(), sigma.Strides(), i, i); err != nil {
return
}
sigma.Float64s()[idx] = s.Float64s()[i]
}
return
}
// test svd by doing the SVD, then calculating the corrects
func testSVD(T, T2, s, u, v *Dense, t string, i int) (err error) {
var sigma, reconstructed *Dense
if !allClose(T2.Data(), T.Data(), closeenoughf64) {
return errors.Errorf("A call to SVD modified the underlying data! %s Test %d", t, i)
}
shape := T2.Shape()
if t == "thin" {
shape = Shape{MinInt(shape[0], shape[1]), MinInt(shape[0], shape[1])}
}
if sigma, err = calcSigma(s, T, shape); err != nil {
return
}
v.T()
if reconstructed, err = u.MatMul(sigma, UseSafe()); err != nil {
return
}
if reconstructed, err = reconstructed.MatMul(v, UseSafe()); err != nil {
return
}
if !allClose(T2.Data(), reconstructed.Data(), closeenoughf64) {
return errors.Errorf("Expected reconstructed to be %v. Got %v instead", T2.Data(), reconstructed.Data())
}
return nil
}
func ExampleDense_SVD() {
T := New(
WithShape(4, 5),
WithBacking([]float64{1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0}),
)
_, u, _, _ := T.SVD(true, true)
uT := u.Clone().(*Dense)
uT.T()
eye, err := u.MatMul(uT)
fmt.Println(eye)
fmt.Println(err)
// Output:
// ⎡1 0 0 0⎤
// ⎢0 1 0 0⎥
// ⎢0 0 1 0⎥
// ⎣0 0 0 1⎦
//
//
}
func TestDense_SVD(t *testing.T) {
var T, T2, s, u, v *Dense
var err error
// gonum specific thin special cases
for i, stts := range svdtestsThin {
T = New(WithShape(stts.shape...), WithBacking(stts.data))
T2 = T.Clone().(*Dense)
if s, u, v, err = T.SVD(true, false); err != nil {
t.Error(err)
continue
}
if !allClose(T2.Data(), T.Data(), closeenoughf64) {
t.Errorf("A call to SVD modified the underlying data! Thin Test %d", i)
continue
}
if !allClose(stts.correctSData, s.Data(), closeenoughf64) {
t.Errorf("Expected s = %v. Got %v instead", stts.correctSData, s.Data())
}
if !allClose(stts.correctUData, u.Data(), closeenoughf64) {
t.Errorf("Expected u = %v. Got %v instead", stts.correctUData, u.Data())
}
if !allClose(stts.correctVData, v.Data(), closeenoughf64) {
t.Errorf("Expected v = %v. Got %v instead", stts.correctVData, v.Data())
}
}
// standard tests
for i, stfs := range svdtestsFull {
T = New(WithShape(stfs...), WithBacking(Random(Float64, stfs.TotalSize())))
T2 = T.Clone().(*Dense)
// full
if s, u, v, err = T.SVD(true, true); err != nil {
t.Error(err)
fmt.Println(err)
continue
}
if err = testSVD(T, T2, s, u, v, "full", i); err != nil {
t.Error(err)
fmt.Println(err)
continue
}
// thin
if s, u, v, err = T.SVD(true, false); err != nil {
t.Error(err)
continue
}
if err = testSVD(T, T2, s, u, v, "thin", i); err != nil {
t.Error(err)
continue
}
// none
if s, u, v, err = T.SVD(false, false); err != nil {
t.Error(err)
continue
}
var svd mat.SVD
var m *mat.Dense
if m, err = ToMat64(T); err != nil {
t.Error(err)
continue
}
if !svd.Factorize(m, mat.SVDFull) {
t.Errorf("Unable to factorise %v", m)
continue
}
if !allClose(s.Data(), svd.Values(nil), closeenoughf64) {
t.Errorf("Singular value mismatch between Full and None decomposition. Expected %v. Got %v instead", svd.Values(nil), s.Data())
}
}
// this is illogical
T = New(Of(Float64), WithShape(2, 2))
if _, _, _, err = T.SVD(false, true); err == nil {
t.Errorf("Expected an error!")
}
// if you do this, it is bad and you should feel bad
T = New(Of(Float64), WithShape(2, 3, 4))
if _, _, _, err = T.SVD(true, true); err == nil {
t.Errorf("Expecetd an error: cannot SVD() a Tensor > 2 dimensions")
}
T = New(Of(Float64), WithShape(2))
if _, _, _, err = T.SVD(true, true); err == nil {
t.Errorf("Expecetd an error: cannot SVD() a Tensor < 2 dimensions")
}
}
tensor-0.9.24/dense_test.go 0000664 0000000 0000000 00000006432 14265126151 0015614 0 ustar 00root root 0000000 0000000 package tensor
import (
"math/rand"
"testing"
"testing/quick"
"time"
"github.com/stretchr/testify/assert"
)
func TestDense_ShallowClone(t *testing.T) {
T := New(Of(Float64), WithBacking([]float64{1, 2, 3, 4}))
T2 := T.ShallowClone()
T2.slice(0, 2)
T2.Float64s()[0] = 1000
assert.Equal(t, T.Data().([]float64)[0:2], T2.Data())
assert.Equal(t, T.Engine(), T2.Engine())
assert.Equal(t, T.oe, T2.oe)
assert.Equal(t, T.flag, T2.flag)
}
func TestDense_Clone(t *testing.T) {
assert := assert.New(t)
cloneChk := func(q *Dense) bool {
a := q.Clone().(*Dense)
if !q.Shape().Eq(a.Shape()) {
t.Errorf("Shape Difference: %v %v", q.Shape(), a.Shape())
return false
}
if len(q.Strides()) != len(a.Strides()) {
t.Errorf("Stride Difference: %v %v", q.Strides(), a.Strides())
return false
}
for i, s := range q.Strides() {
if a.Strides()[i] != s {
t.Errorf("Stride Difference: %v %v", q.Strides(), a.Strides())
return false
}
}
if q.o != a.o {
t.Errorf("Data Order difference : %v %v", q.o, a.o)
return false
}
if q.Δ != a.Δ {
t.Errorf("Triangle Difference: %v %v", q.Δ, a.Δ)
return false
}
if q.flag != a.flag {
t.Errorf("Flag difference : %v %v", q.flag, a.flag)
return false
}
if q.e != a.e {
t.Errorf("Engine difference; %T %T", q.e, a.e)
return false
}
if q.oe != a.oe {
t.Errorf("Optimized Engine difference; %T %T", q.oe, a.oe)
return false
}
if len(q.transposeWith) != len(a.transposeWith) {
t.Errorf("TransposeWith difference: %v %v", q.transposeWith, a.transposeWith)
return false
}
assert.Equal(q.mask, a.mask, "mask difference")
assert.Equal(q.maskIsSoft, a.maskIsSoft, "mask is soft ")
return true
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
if err := quick.Check(cloneChk, &quick.Config{Rand: r}); err != nil {
t.Error(err)
}
}
func TestDenseMasked(t *testing.T) {
T := New(Of(Float64), WithShape(3, 2))
T.ResetMask()
assert.Equal(t, []bool{false, false, false, false, false, false}, T.mask)
}
func TestFromScalar(t *testing.T) {
T := New(FromScalar(3.14))
data := T.Float64s()
assert.Equal(t, []float64{3.14}, data)
}
func Test_recycledDense(t *testing.T) {
T := recycledDense(Float64, ScalarShape())
assert.Equal(t, float64(0), T.Data())
assert.Equal(t, StdEng{}, T.e)
assert.Equal(t, StdEng{}, T.oe)
}
func TestDense_unsqueeze(t *testing.T) {
assert := assert.New(t)
T := New(WithShape(3, 3, 2), WithBacking([]float64{
1, 2, 3, 4, 5, 6,
60, 50, 40, 30, 20, 10,
100, 200, 300, 400, 500, 600,
}))
if err := T.unsqueeze(0); err != nil {
t.Fatal(err)
}
assert.True(T.Shape().Eq(Shape{1, 3, 3, 2}))
assert.Equal([]int{6, 6, 2, 1}, T.Strides()) // if you do shapes.CalcStrides() it'd be {18,6,2,1}
// reset
T.Reshape(3, 3, 2)
if err := T.unsqueeze(1); err != nil {
t.Fatal(err)
}
assert.True(T.Shape().Eq(Shape{3, 1, 3, 2}))
assert.Equal([]int{6, 2, 2, 1}, T.Strides())
// reset
T.Reshape(3, 3, 2)
if err := T.unsqueeze(2); err != nil {
t.Fatal(err)
}
t.Logf("%v", T)
assert.True(T.Shape().Eq(Shape{3, 3, 1, 2}))
assert.Equal([]int{6, 2, 1, 1}, T.Strides())
// reset
T.Reshape(3, 3, 2)
if err := T.unsqueeze(3); err != nil {
t.Fatal(err)
}
t.Logf("%v", T)
assert.True(T.Shape().Eq(Shape{3, 3, 2, 1}))
assert.Equal([]int{6, 2, 1, 1}, T.Strides())
}
tensor-0.9.24/dense_views.go 0000664 0000000 0000000 00000000772 14265126151 0015773 0 ustar 00root root 0000000 0000000 package tensor
// a View is a *Tensor with customized strides. The reason for not splitting them up into different types is complicated
// this file contains all the methods that deals with Views
// Materialize takes a view, copies its data and puts it in a new *Tensor.
func (t *Dense) Materialize() Tensor {
if !t.IsMaterializable() {
return t
}
retVal := recycledDense(t.t, t.shape.Clone(), WithEngine(t.e))
copyDenseIter(retVal, t, nil, nil)
retVal.e = t.e
retVal.oe = t.oe
return retVal
}
tensor-0.9.24/divmod_amd64.s 0000664 0000000 0000000 00000000507 14265126151 0015566 0 ustar 00root root 0000000 0000000 // +build !noasm
#include "textflag.h"
TEXT ·divmod(SB),NOSPLIT,$0
MOVQ a+0(FP), SI
MOVQ b+8(FP), CX
MOVQ SI, AX
CMPQ CX, $-1
JEQ $1, denomIsOne // if denominator is 1, then jump to end
CQO
IDIVQ CX
MOVQ AX, q+16(FP)
MOVQ DX, r+24(FP)
bye:
RET
denomIsOne:
NEGQ AX
MOVQ AX, q+16(FP)
MOVQ $0, r+24(FP)
JMP bye
tensor-0.9.24/engine.go 0000664 0000000 0000000 00000035510 14265126151 0014723 0 ustar 00root root 0000000 0000000 package tensor
// Memory is a representation of memory of the value.
//
// The main reason for requiring both Uintptr() and Pointer() methods is because while Go currently does not have a compacting
// garbage collector, from the docs of `unsafe`:
// Even if a uintptr holds the address of some object, the garbage collector, will not update that uintptr's value if the object moves,
// nor will that uintptr keep the object from being reclaimed.
type Memory interface {
Uintptr() uintptr
MemSize() uintptr
}
// Engine is a representation of an execution engine.
// While different execution engines can have different capabilities, all execution engines must be able to allocate and free memory
type Engine interface {
AllocAccessible() bool // AllocAccessible returns true if the engine return Go-accessible memory pointers?
Alloc(size int64) (Memory, error) // Alloc allocates memory
Free(mem Memory, size int64) error // Free rees memory
Memset(mem Memory, val interface{}) error // Memset - duh
Memclr(mem Memory) // Memclr - duh
Memcpy(dst, src Memory) error // Memcpy - duh
Accessible(mem Memory) (Memory, error) // Accessible returns Go-accesible memory pointers, or errors, if it cannot be done
WorksWith(order DataOrder) bool // WorksWith returns true if the data order can be directly worked with
}
type standardEngine interface {
Engine
Adder
Suber
Muler
Diver
Power
Moder
FMAer
MatMuler
MatVecMuler
OuterProder
Dotter
SVDer
Lter
Lteer
Gter
Gteer
ElEqer
MinBetweener
MaxBetweener
// Anything that returns interface{} cannot be added here because they will likely have additional
// optimized versions of the functions for types.
// For example: Tracer and InnerProder both have optimized interfaces for Float32 and Float64 which returns those types specifically.
}
type arrayMaker interface {
makeArray(arr *array, t Dtype, size int)
}
// NonStdEngine are any engines that do not allocate using the default built in allocator
type NonStdEngine interface {
NonStdAlloc() // noop
}
/* Data Agnostic Execution Engine Methods */
// Transposer is any engine that can perform an unsafe transpose of a tensor.
type Transposer interface {
Transpose(t Tensor, expStrides []int) error
}
// Concater is any enegine that can concatenate multiple Tensors together
type Concater interface {
Concat(t Tensor, axis int, others ...Tensor) (Tensor, error)
}
// Stacker is any engine that can stack multiple Tenosrs along an axis
type Stacker interface {
Stack(t Tensor, axis int, others ...Tensor) (Tensor, error)
}
// DenseStacker is any engine that can stack DenseTensors along an axis. This is a specialization of Stacker.
type DenseStacker interface {
StackDense(t DenseTensor, axis int, others ...DenseTensor) (retVal DenseTensor, err error)
}
// Repeater is any engine that can repeat values along the given axis.
type Repeater interface {
Repeat(t Tensor, axis int, repeats ...int) (Tensor, error)
RepeatReuse(t Tensor, reuse Tensor, axis int, repeeats ...int) (Tensor, error)
}
// Diager is any engine that can return a tensor that only contains the diagonal values of the input
type Diager interface {
Diag(a Tensor) (Tensor, error)
}
/* NUMBER INTERFACES
All these are expected to be unsafe on the first tensor
*/
// Adder is any engine that can perform elementwise addition.
type Adder interface {
// Add performs a + b
Add(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// AddScalar adds a scalar to the tensor. leftTensor indicates if the tensor is the left operand.
// Whether or not the input tensor is clobbered is left to the implementation
AddScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// Suber is any engine that can perform elementwise subtraction.
type Suber interface {
// Sub performs a - b
Sub(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// SubScalar subtracts a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand.
// Whether or not the input tensor is clobbered is left to the implementation
SubScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// Muler is any engine that can perform elementwise multiplication.
// For matrix multiplication, an engine should implement MatMul() or MatVecMul() or Inner()
type Muler interface {
// Mul performs a * b
Mul(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// MulScalar multiplies a scalar to the tensor. leftTensor indicates if the tensor is the left operand.
// Whether or not the input tensor is clobbered is left to the implementation
MulScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// Diver is any engine that can perform elementwise division.
type Diver interface {
// Div performs a / b
Div(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// DivScalar divides a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand.
// Whether or not the input tensor is clobbered is left to the implementation
DivScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// Power is any engine that can perform elementwise Pow()
type Power interface {
// Pow performs a ^ b
Pow(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// PowScalar exponentiates a scalar from/to the tensor. leftTensor indicates if the tensor is the left operand.
// Whether or not the input tensor is clobbered is left to the implementation
PowScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// Moder is any engine that can perform elementwise Mod()
type Moder interface {
// Mod performs a % b
Mod(a, b Tensor, opts ...FuncOpt) (Tensor, error)
// ModScalar performs a % b where one of the operands is scalar. leftTensor indicates if the tensor is the left operand.
// Whether or not hte input tensor is clobbered is left to the implementation
ModScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// MinBetweener is any engine that can perform an elementwise min=between.
type MinBetweener interface {
MinBetween(a, b Tensor, opts ...FuncOpt) (Tensor, error)
MinBetweenScalar(a Tensor, b interface{}, leftTensor bool, opts ...FuncOpt) (Tensor, error)
}
// MaxBetweener is any engine that can perform an elementwise ma