pax_global_header 0000666 0000000 0000000 00000000064 14427120666 0014522 g ustar 00root root 0000000 0000000 52 comment=249b5aaa10484bb7e8f3b866b0925aaebdac8170
stats-0.7.1/ 0000775 0000000 0000000 00000000000 14427120666 0012665 5 ustar 00root root 0000000 0000000 stats-0.7.1/.github/ 0000775 0000000 0000000 00000000000 14427120666 0014225 5 ustar 00root root 0000000 0000000 stats-0.7.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14427120666 0016262 5 ustar 00root root 0000000 0000000 stats-0.7.1/.github/workflows/go.yml 0000664 0000000 0000000 00000001100 14427120666 0017402 0 ustar 00root root 0000000 0000000 # This workflow will build a golang project
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: Go
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: "stable"
- name: Run test with coverage
run: go test -race -coverprofile=coverage.txt -covermode=atomic
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
stats-0.7.1/.gitignore 0000664 0000000 0000000 00000000120 14427120666 0014646 0 ustar 00root root 0000000 0000000 coverage.out
coverage.txt
release-notes.txt
.directory
.chglog
.vscode
.DS_Store stats-0.7.1/CHANGELOG.md 0000664 0000000 0000000 00000036352 14427120666 0014507 0 ustar 00root root 0000000 0000000
## [Unreleased]
## [v0.7.1] - 2023-05-11
### Add
- Add describe functions ([#77](https://github.com/montanaflynn/stats/issues/77))
### Update
- Update .gitignore
- Update README.md, LICENSE and DOCUMENTATION.md files
- Update github action go workflow to run on push
## [v0.7.0] - 2023-01-08
### Add
- Add geometric distribution functions ([#75](https://github.com/montanaflynn/stats/issues/75))
- Add GitHub action go workflow
### Remove
- Remove travis CI config
### Update
- Update changelog with v0.7.0 changes
- Update changelog with v0.7.0 changes
- Update github action go workflow
- Update geometric distribution tests
## [v0.6.6] - 2021-04-26
### Add
- Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68))
- Add latest versions of Go to test against
### Update
- Update changelog with v0.6.6 changes
### Use
- Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64))
## [v0.6.5] - 2021-02-21
### Add
- Add Float64Data.Quartiles documentation
- Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60))
### Fix
- Fix make release changelog command and add changelog history
### Update
- Update changelog with v0.6.5 changes
- Update changelog with v0.6.4 changes
- Update README.md links to CHANGELOG.md and DOCUMENTATION.md
- Update README.md and Makefile with new release commands
## [v0.6.4] - 2021-01-13
### Fix
- Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58))
### Update
- Update changelog with v0.6.4 changes
- Update examples directory to include a README.md used for synopsis
- Update go.mod to include go version where modules are enabled by default
- Update changelog with v0.6.3 changes
## [v0.6.3] - 2020-02-18
### Add
- Add creating and committing changelog to Makefile release directive
- Add release-notes.txt and .chglog directory to .gitignore
### Update
- Update exported tests to use import for better example documentation
- Update documentation using godoc2md
- Update changelog with v0.6.2 release
## [v0.6.2] - 2020-02-18
### Fix
- Fix linting errcheck warnings in go benchmarks
### Update
- Update Makefile release directive to use correct release name
## [v0.6.1] - 2020-02-18
### Add
- Add StableSample function signature to readme
### Fix
- Fix linting warnings for normal distribution functions formatting and tests
### Update
- Update documentation links and rename DOC.md to DOCUMENTATION.md
- Update README with link to pkg.go.dev reference and release section
- Update Makefile with new changelog, docs, and release directives
- Update DOC.md links to GitHub source code
- Update doc.go comment and add DOC.md package reference file
- Update changelog using git-chglog
## [v0.6.0] - 2020-02-17
### Add
- Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56))
- Add previous versions of Go to travis CI config
- Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51))
- Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48))
- Add doc.go file to show description and usage on godoc.org
- Add comments to new error and legacy error variables
- Add ExampleRound function to tests
- Add go.mod file for module support
- Add Sigmoid, SoftMax and Entropy methods and tests
- Add Entropy documentation, example and benchmarks
- Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44))
### Fix
- Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47))
- Fix AutoCorrelation name in comments and remove unneeded Sprintf
### Improve
- Improve documentation section with command comments
### Remove
- Remove very old versions of Go in travis CI config
- Remove boolean comparison to get rid of gometalinter warning
### Update
- Update license dates
- Update Distance functions signatures to use Float64Data
- Update Sigmoid examples
- Update error names with backward compatibility
### Use
- Use relative link to examples/main.go
- Use a single var block for exported errors
## [v0.5.0] - 2019-01-16
### Add
- Add Sigmoid and Softmax functions
### Fix
- Fix syntax highlighting and add CumulativeSum func
## [v0.4.0] - 2019-01-14
### Add
- Add goreport badge and documentation section to README.md
- Add Examples to test files
- Add AutoCorrelation and nist tests
- Add String method to statsErr type
- Add Y coordinate error for ExponentialRegression
- Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43))
- Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40))
- Add more tests and rename distance files
- Add coverage and benchmarks to azure pipeline
- Add go tests to azure pipeline
### Change
- Change travis tip alias to master
- Change codecov to coveralls for code coverage
### Fix
- Fix a few lint warnings
- Fix example error
### Improve
- Improve test coverage of distance functions
### Only
- Only run travis on stable and tip versions
- Only check code coverage on tip
### Remove
- Remove azure CI pipeline
- Remove unnecessary type conversions
### Return
- Return EmptyInputErr instead of EmptyInput
### Set
- Set up CI with Azure Pipelines
## [0.3.0] - 2017-12-02
### Add
- Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35))
- Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34))
- Add support for time.Duration
- Add LoadRawData to docs and examples
- Add unit test for edge case that wasn't covered
- Add unit tests for edge cases that weren't covered
- Add pearson alias delegating to correlation
- Add CovariancePopulation to Float64Data
- Add pearson product-moment correlation coefficient
- Add population covariance
- Add random slice benchmarks
- Add all applicable functions as methods to Float64Data type
- Add MIT license badge
- Add link to examples/methods.go
- Add Protips for usage and documentation sections
- Add tests for rounding up
- Add webdoc target and remove linting from test target
- Add example usage and consolidate contributing information
### Added
- Added MedianAbsoluteDeviation
### Annotation
- Annotation spelling error
### Auto
- auto commit
- auto commit
### Calculate
- Calculate correlation with sdev and covp
### Clean
- Clean up README.md and add info for offline docs
### Consolidated
- Consolidated all error values.
### Fix
- Fix Percentile logic
- Fix InterQuartileRange method test
- Fix zero percent bug and add test
- Fix usage example output typos
### Improve
- Improve bounds checking in Percentile
- Improve error log messaging
### Imput
- Imput -> Input
### Include
- Include alternative way to set Float64Data in example
### Make
- Make various changes to README.md
### Merge
- Merge branch 'master' of github.com:montanaflynn/stats
- Merge master
### Mode
- Mode calculation fix and tests
### Realized
- Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued.
### Refactor
- Refactor testing of Round()
- Refactor setting Coordinate y field using Exp in place of Pow
- Refactor Makefile and add docs target
### Remove
- Remove deep links to types and functions
### Rename
- Rename file from types to data
### Retrieve
- Retrieve InterQuartileRange for the Float64Data.
### Split
- Split up stats.go into separate files
### Support
- Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36))
### Switch
- Switch default and check targets
### Update
- Update Readme
- Update example methods and some text
- Update README and include Float64Data type method examples
### Pull Requests
- Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile
- Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test
- Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master
- Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds
- Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1
- Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration
- Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master
- Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master
- Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug
- Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master
- Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master
- Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master
- Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson
- Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD
- Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master
- Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce
- Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite
## [0.2.0] - 2015-10-14
### Add
- Add Makefile with gometalinter, testing, benchmarking and coverage report targets
- Add comments describing functions and structs
- Add Correlation func
- Add Covariance func
- Add tests for new function shortcuts
- Add StandardDeviation function as a shortcut to StandardDeviationPopulation
- Add Float64Data and Series types
### Change
- Change Sample to return a standard []float64 type
### Fix
- Fix broken link to Makefile
- Fix broken link and simplify code coverage reporting command
- Fix go vet warning about printf type placeholder
- Fix failing codecov test coverage reporting
- Fix link to CHANGELOG.md
### Fixed
- Fixed typographical error, changed accomdate to accommodate in README.
### Include
- Include Variance and StandardDeviation shortcuts
### Pass
- Pass gometalinter
### Refactor
- Refactor Variance function to be the same as population variance
### Release
- Release version 0.2.0
### Remove
- Remove unneeded do packages and update cover URL
- Remove sudo from pip install
### Reorder
- Reorder functions and sections
### Revert
- Revert to legacy containers to preserve go1.1 testing
### Switch
- Switch from legacy to container-based CI infrastructure
### Update
- Update contributing instructions and mention Makefile
### Pull Requests
- Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate
## [0.1.0] - 2015-08-19
### Add
- Add CONTRIBUTING.md
### Rename
- Rename functions while preserving backwards compatibility
## 0.0.9 - 2015-08-18
### Add
- Add HarmonicMean func
- Add GeometricMean func
- Add .gitignore to avoid commiting test coverage report
- Add Outliers stuct and QuantileOutliers func
- Add Interquartile Range, Midhinge and Trimean examples
- Add Trimean
- Add Midhinge
- Add Inter Quartile Range
- Add a unit test to check for an empty slice error
- Add Quantiles struct and Quantile func
- Add more tests and fix a typo
- Add Golang 1.5 to build tests
- Add a standard MIT license file
- Add basic benchmarking
- Add regression models
- Add codecov token
- Add codecov
- Add check for slices with a single item
- Add coverage tests
- Add back previous Go versions to Travis CI
- Add Travis CI
- Add GoDoc badge
- Add Percentile and Float64ToInt functions
- Add another rounding test for whole numbers
- Add build status badge
- Add code coverage badge
- Add test for NaN, achieving 100% code coverage
- Add round function
- Add standard deviation function
- Add sum function
### Add
- add tests for sample
- add sample
### Added
- Added sample and population variance and deviation functions
- Added README
### Adjust
- Adjust API ordering
### Avoid
- Avoid unintended consequence of using sort
### Better
- Better performing min/max
- Better description
### Change
- Change package path to potentially fix a bug in earlier versions of Go
### Clean
- Clean up README and add some more information
- Clean up test error
### Consistent
- Consistent empty slice error messages
- Consistent var naming
- Consistent func declaration
### Convert
- Convert ints to floats
### Duplicate
- Duplicate packages for all versions
### Export
- Export Coordinate struct fields
### First
- First commit
### Fix
- Fix copy pasta mistake testing the wrong function
- Fix error message
- Fix usage output and edit API doc section
- Fix testing edgecase where map was in wrong order
- Fix usage example
- Fix usage examples
### Include
- Include the Nearest Rank method of calculating percentiles
### More
- More commenting
### Move
- Move GoDoc link to top
### Redirect
- Redirect kills newer versions of Go
### Refactor
- Refactor code and error checking
### Remove
- Remove unnecassary typecasting in sum func
- Remove cover since it doesn't work for later versions of go
- Remove golint and gocoveralls
### Rename
- Rename StandardDev to StdDev
- Rename StandardDev to StdDev
### Return
- Return errors for all functions
### Run
- Run go fmt to clean up formatting
### Simplify
- Simplify min/max function
### Start
- Start with minimal tests
### Switch
- Switch wercker to travis and update todos
### Table
- table testing style
### Update
- Update README and move the example main.go into it's own file
- Update TODO list
- Update README
- Update usage examples and todos
### Use
- Use codecov the recommended way
- Use correct string formatting types
### Pull Requests
- Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample
[Unreleased]: https://github.com/montanaflynn/stats/compare/v0.7.1...HEAD
[v0.7.1]: https://github.com/montanaflynn/stats/compare/v0.7.0...v0.7.1
[v0.7.0]: https://github.com/montanaflynn/stats/compare/v0.6.6...v0.7.0
[v0.6.6]: https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6
[v0.6.5]: https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5
[v0.6.4]: https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4
[v0.6.3]: https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3
[v0.6.2]: https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2
[v0.6.1]: https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1
[v0.6.0]: https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0
[v0.5.0]: https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0
[v0.4.0]: https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0
[0.3.0]: https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0
[0.2.0]: https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0
[0.1.0]: https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0
stats-0.7.1/DOCUMENTATION.md 0000664 0000000 0000000 00000121333 14427120666 0015223 0 ustar 00root root 0000000 0000000
# stats
`import "github.com/montanaflynn/stats"`
* [Overview](#pkg-overview)
* [Index](#pkg-index)
* [Examples](#pkg-examples)
* [Subdirectories](#pkg-subdirectories)
## Overview
Package stats is a well tested and comprehensive
statistics library package with no dependencies.
Example Usage:
// start with some source data to use
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
// you could also use different types like this
// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// etc...
median, _ := stats.Median(data)
fmt.Println(median) // 3.65
roundedMedian, _ := stats.Round(median, 0)
fmt.Println(roundedMedian) // 4
MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com)
## Index
* [Variables](#pkg-variables)
* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation)
* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance)
* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation)
* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance)
* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation)
* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum)
* [func Entropy(input Float64Data) (float64, error)](#Entropy)
* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance)
* [func ExpGeom(p float64) (exp float64, err error)](#ExpGeom)
* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean)
* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean)
* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange)
* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance)
* [func Max(input Float64Data) (max float64, err error)](#Max)
* [func Mean(input Float64Data) (float64, error)](#Mean)
* [func Median(input Float64Data) (median float64, err error)](#Median)
* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation)
* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation)
* [func Midhinge(input Float64Data) (float64, error)](#Midhinge)
* [func Min(input Float64Data) (min float64, err error)](#Min)
* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance)
* [func Mode(input Float64Data) (mode []float64, err error)](#Mode)
* [func Ncr(n, r int) int](#Ncr)
* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs)
* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf)
* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy)
* [func NormFit(data []float64) [2]float64](#NormFit)
* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval)
* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf)
* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf)
* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf)
* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf)
* [func NormMean(loc float64, scale float64) float64](#NormMean)
* [func NormMedian(loc float64, scale float64) float64](#NormMedian)
* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment)
* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf)
* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf)
* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs)
* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf)
* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats)
* [func NormStd(loc float64, scale float64) float64](#NormStd)
* [func NormVar(loc float64, scale float64) float64](#NormVar)
* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson)
* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile)
* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank)
* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance)
* [func ProbGeom(a int, b int, p float64) (prob float64, err error)](#ProbGeom)
* [func Round(input float64, places int) (rounded float64, err error)](#Round)
* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample)
* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance)
* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid)
* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax)
* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample)
* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation)
* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation)
* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample)
* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP)
* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS)
* [func Sum(input Float64Data) (sum float64, err error)](#Sum)
* [func Trimean(input Float64Data) (float64, error)](#Trimean)
* [func VarGeom(p float64) (exp float64, err error)](#VarGeom)
* [func VarP(input Float64Data) (sdev float64, err error)](#VarP)
* [func VarS(input Float64Data) (sdev float64, err error)](#VarS)
* [func Variance(input Float64Data) (sdev float64, err error)](#Variance)
* [type Coordinate](#Coordinate)
* [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg)
* [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg)
* [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg)
* [type Float64Data](#Float64Data)
* [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData)
* [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation)
* [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation)
* [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance)
* [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation)
* [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum)
* [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy)
* [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean)
* [func (f Float64Data) Get(i int) float64](#Float64Data.Get)
* [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean)
* [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange)
* [func (f Float64Data) Len() int](#Float64Data.Len)
* [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less)
* [func (f Float64Data) Max() (float64, error)](#Float64Data.Max)
* [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean)
* [func (f Float64Data) Median() (float64, error)](#Float64Data.Median)
* [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation)
* [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation)
* [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge)
* [func (f Float64Data) Min() (float64, error)](#Float64Data.Min)
* [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode)
* [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson)
* [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile)
* [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank)
* [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance)
* [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile)
* [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers)
* [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles)
* [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample)
* [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance)
* [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid)
* [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax)
* [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation)
* [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation)
* [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample)
* [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum)
* [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap)
* [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean)
* [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance)
* [type Outliers](#Outliers)
* [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers)
* [type Quartiles](#Quartiles)
* [func Quartile(input Float64Data) (Quartiles, error)](#Quartile)
* [type Series](#Series)
* [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression)
* [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression)
* [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression)
#### Examples
* [AutoCorrelation](#example_AutoCorrelation)
* [ChebyshevDistance](#example_ChebyshevDistance)
* [Correlation](#example_Correlation)
* [CumulativeSum](#example_CumulativeSum)
* [Entropy](#example_Entropy)
* [ExpGeom](#example_ExpGeom)
* [LinearRegression](#example_LinearRegression)
* [LoadRawData](#example_LoadRawData)
* [Max](#example_Max)
* [Median](#example_Median)
* [Min](#example_Min)
* [ProbGeom](#example_ProbGeom)
* [Round](#example_Round)
* [Sigmoid](#example_Sigmoid)
* [SoftMax](#example_SoftMax)
* [Sum](#example_Sum)
* [VarGeom](#example_VarGeom)
#### Package files
[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [geometric_distribution.go](/src/github.com/montanaflynn/stats/geometric_distribution.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go)
## Variables
``` go
var (
// ErrEmptyInput Input must not be empty
ErrEmptyInput = statsError{"Input must not be empty."}
// ErrNaN Not a number
ErrNaN = statsError{"Not a number."}
// ErrNegative Must not contain negative values
ErrNegative = statsError{"Must not contain negative values."}
// ErrZero Must not contain zero values
ErrZero = statsError{"Must not contain zero values."}
// ErrBounds Input is outside of range
ErrBounds = statsError{"Input is outside of range."}
// ErrSize Must be the same length
ErrSize = statsError{"Must be the same length."}
// ErrInfValue Value is infinite
ErrInfValue = statsError{"Value is infinite."}
// ErrYCoord Y Value must be greater than zero
ErrYCoord = statsError{"Y Value must be greater than zero."}
)
```
These are the package-wide error values.
All error identification should use these values.
https://github.com/golang/go/wiki/Errors#naming
``` go
var (
EmptyInputErr = ErrEmptyInput
NaNErr = ErrNaN
NegativeErr = ErrNegative
ZeroErr = ErrZero
BoundsErr = ErrBounds
SizeErr = ErrSize
InfValue = ErrInfValue
YCoordErr = ErrYCoord
EmptyInput = ErrEmptyInput
)
```
Legacy error names that didn't start with Err
## func [AutoCorrelation](/correlation.go?s=853:918#L38)
``` go
func AutoCorrelation(data Float64Data, lags int) (float64, error)
```
AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
## func [ChebyshevDistance](/distances.go?s=368:456#L20)
``` go
func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
```
ChebyshevDistance computes the Chebyshev distance between two data sets
## func [Correlation](/correlation.go?s=112:171#L8)
``` go
func Correlation(data1, data2 Float64Data) (float64, error)
```
Correlation describes the degree of relationship between two sets of data
## func [Covariance](/variance.go?s=1284:1342#L53)
``` go
func Covariance(data1, data2 Float64Data) (float64, error)
```
Covariance is a measure of how much two sets of data change
## func [CovariancePopulation](/variance.go?s=1864:1932#L81)
``` go
func CovariancePopulation(data1, data2 Float64Data) (float64, error)
```
CovariancePopulation computes covariance for entire population between two variables.
## func [CumulativeSum](/cumulative_sum.go?s=81:137#L4)
``` go
func CumulativeSum(input Float64Data) ([]float64, error)
```
CumulativeSum calculates the cumulative sum of the input slice
## func [Entropy](/entropy.go?s=77:125#L6)
``` go
func Entropy(input Float64Data) (float64, error)
```
Entropy provides calculation of the entropy
## func [EuclideanDistance](/distances.go?s=836:924#L36)
``` go
func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
```
EuclideanDistance computes the Euclidean distance between two data sets
## func [ExpGeom](/geometric_distribution.go?s=652:700#L27)
``` go
func ExpGeom(p float64) (exp float64, err error)
```
ProbGeom generates the expectation or average number of trials
for a geometric random variable with parameter p
## func [GeometricMean](/mean.go?s=319:373#L18)
``` go
func GeometricMean(input Float64Data) (float64, error)
```
GeometricMean gets the geometric mean for a slice of numbers
## func [HarmonicMean](/mean.go?s=717:770#L40)
``` go
func HarmonicMean(input Float64Data) (float64, error)
```
HarmonicMean gets the harmonic mean for a slice of numbers
## func [InterQuartileRange](/quartile.go?s=821:880#L45)
``` go
func InterQuartileRange(input Float64Data) (float64, error)
```
InterQuartileRange finds the range between Q1 and Q3
## func [ManhattanDistance](/distances.go?s=1277:1365#L50)
``` go
func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
```
ManhattanDistance computes the Manhattan distance between two data sets
## func [Max](/max.go?s=78:130#L8)
``` go
func Max(input Float64Data) (max float64, err error)
```
Max finds the highest number in a slice
## func [Mean](/mean.go?s=77:122#L6)
``` go
func Mean(input Float64Data) (float64, error)
```
Mean gets the average of a slice of numbers
## func [Median](/median.go?s=85:143#L6)
``` go
func Median(input Float64Data) (median float64, err error)
```
Median gets the median number in a slice of numbers
## func [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6)
``` go
func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)
```
MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median
## func [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11)
``` go
func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)
```
MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
## func [Midhinge](/quartile.go?s=1075:1124#L55)
``` go
func Midhinge(input Float64Data) (float64, error)
```
Midhinge finds the average of the first and third quartiles
## func [Min](/min.go?s=78:130#L6)
``` go
func Min(input Float64Data) (min float64, err error)
```
Min finds the lowest number in a set of data
## func [MinkowskiDistance](/distances.go?s=2152:2256#L75)
``` go
func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)
```
MinkowskiDistance computes the Minkowski distance between two data sets
Arguments:
dataPointX: First set of data points
dataPointY: Second set of data points. Length of both data
sets must be equal.
lambda: aka p or city blocks; With lambda = 1
returned distance is manhattan distance and
lambda = 2; it is euclidean distance. Lambda
reaching to infinite - distance would be chebysev
distance.
Return:
Distance or error
## func [Mode](/mode.go?s=85:141#L4)
``` go
func Mode(input Float64Data) (mode []float64, err error)
```
Mode gets the mode [most frequent value(s)] of a slice of float64s
## func [Ncr](/norm.go?s=7384:7406#L239)
``` go
func Ncr(n, r int) int
```
Ncr is an N choose R algorithm.
Aaron Cannon's algorithm.
## func [NormBoxMullerRvs](/norm.go?s=667:736#L23)
``` go
func NormBoxMullerRvs(loc float64, scale float64, size int) []float64
```
NormBoxMullerRvs generates random variates using the Box–Muller transform.
For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html
## func [NormCdf](/norm.go?s=1826:1885#L52)
``` go
func NormCdf(x float64, loc float64, scale float64) float64
```
NormCdf is the cumulative distribution function.
## func [NormEntropy](/norm.go?s=5773:5825#L180)
``` go
func NormEntropy(loc float64, scale float64) float64
```
NormEntropy is the differential entropy of the RV.
## func [NormFit](/norm.go?s=6058:6097#L187)
``` go
func NormFit(data []float64) [2]float64
```
NormFit returns the maximum likelihood estimators for the Normal Distribution.
Takes array of float64 values.
Returns array of Mean followed by Standard Deviation.
## func [NormInterval](/norm.go?s=6976:7047#L221)
``` go
func NormInterval(alpha float64, loc float64, scale float64) [2]float64
```
NormInterval finds endpoints of the range that contains alpha percent of the distribution.
## func [NormIsf](/norm.go?s=4330:4393#L137)
``` go
func NormIsf(p float64, loc float64, scale float64) (x float64)
```
NormIsf is the inverse survival function (inverse of sf).
## func [NormLogCdf](/norm.go?s=2016:2078#L57)
``` go
func NormLogCdf(x float64, loc float64, scale float64) float64
```
NormLogCdf is the log of the cumulative distribution function.
## func [NormLogPdf](/norm.go?s=1590:1652#L47)
``` go
func NormLogPdf(x float64, loc float64, scale float64) float64
```
NormLogPdf is the log of the probability density function.
## func [NormLogSf](/norm.go?s=2423:2484#L67)
``` go
func NormLogSf(x float64, loc float64, scale float64) float64
```
NormLogSf is the log of the survival function.
## func [NormMean](/norm.go?s=6560:6609#L206)
``` go
func NormMean(loc float64, scale float64) float64
```
NormMean is the mean/expected value of the distribution.
## func [NormMedian](/norm.go?s=6431:6482#L201)
``` go
func NormMedian(loc float64, scale float64) float64
```
NormMedian is the median of the distribution.
## func [NormMoment](/norm.go?s=4694:4752#L146)
``` go
func NormMoment(n int, loc float64, scale float64) float64
```
NormMoment approximates the non-central (raw) moment of order n.
For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution
## func [NormPdf](/norm.go?s=1357:1416#L42)
``` go
func NormPdf(x float64, loc float64, scale float64) float64
```
NormPdf is the probability density function.
## func [NormPpf](/norm.go?s=2854:2917#L75)
``` go
func NormPpf(p float64, loc float64, scale float64) (x float64)
```
NormPpf is the point percentile function.
This is based on Peter John Acklam's inverse normal CDF.
algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible).
For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/
## func [NormPpfRvs](/norm.go?s=247:310#L12)
``` go
func NormPpfRvs(loc float64, scale float64, size int) []float64
```
NormPpfRvs generates random variates using the Point Percentile Function.
For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/
## func [NormSf](/norm.go?s=2250:2308#L62)
``` go
func NormSf(x float64, loc float64, scale float64) float64
```
NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate).
## func [NormStats](/norm.go?s=5277:5345#L162)
``` go
func NormStats(loc float64, scale float64, moments string) []float64
```
NormStats returns the mean, variance, skew, and/or kurtosis.
Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’).
Takes string containing any of 'mvsk'.
Returns array of m v s k in that order.
## func [NormStd](/norm.go?s=6814:6862#L216)
``` go
func NormStd(loc float64, scale float64) float64
```
NormStd is the standard deviation of the distribution.
## func [NormVar](/norm.go?s=6675:6723#L211)
``` go
func NormVar(loc float64, scale float64) float64
```
NormVar is the variance of the distribution.
## func [Pearson](/correlation.go?s=655:710#L33)
``` go
func Pearson(data1, data2 Float64Data) (float64, error)
```
Pearson calculates the Pearson product-moment correlation coefficient between two variables
## func [Percentile](/percentile.go?s=98:181#L8)
``` go
func Percentile(input Float64Data, percent float64) (percentile float64, err error)
```
Percentile finds the relative standing in a slice of floats
## func [PercentileNearestRank](/percentile.go?s=1079:1173#L54)
``` go
func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)
```
PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method
## func [PopulationVariance](/variance.go?s=828:896#L31)
``` go
func PopulationVariance(input Float64Data) (pvar float64, err error)
```
PopulationVariance finds the amount of variance within a population
## func [ProbGeom](/geometric_distribution.go?s=258:322#L10)
``` go
func ProbGeom(a int, b int, p float64) (prob float64, err error)
```
ProbGeom generates the probability for a geometric random variable
with parameter p to achieve success in the interval of [a, b] trials
See https://en.wikipedia.org/wiki/Geometric_distribution for more information
## func [Round](/round.go?s=88:154#L6)
``` go
func Round(input float64, places int) (rounded float64, err error)
```
Round a float to a specific decimal place or precision
## func [Sample](/sample.go?s=112:192#L9)
``` go
func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)
```
Sample returns sample from input with replacement or without
## func [SampleVariance](/variance.go?s=1058:1122#L42)
``` go
func SampleVariance(input Float64Data) (svar float64, err error)
```
SampleVariance finds the amount of variance within a sample
## func [Sigmoid](/sigmoid.go?s=228:278#L9)
``` go
func Sigmoid(input Float64Data) ([]float64, error)
```
Sigmoid returns the input values in the range of -1 to 1
along the sigmoid or s-shaped curve, commonly used in
machine learning while training neural networks as an
activation function.
## func [SoftMax](/softmax.go?s=206:256#L8)
``` go
func SoftMax(input Float64Data) ([]float64, error)
```
SoftMax returns the input values in the range of 0 to 1
with sum of all the probabilities being equal to one. It
is commonly used in machine learning neural networks.
## func [StableSample](/sample.go?s=974:1042#L50)
``` go
func StableSample(input Float64Data, takenum int) ([]float64, error)
```
StableSample like stable sort, it returns samples from input while keeps the order of original data.
## func [StandardDeviation](/deviation.go?s=695:762#L27)
``` go
func StandardDeviation(input Float64Data) (sdev float64, err error)
```
StandardDeviation the amount of variation in the dataset
## func [StandardDeviationPopulation](/deviation.go?s=892:969#L32)
``` go
func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)
```
StandardDeviationPopulation finds the amount of variation from the population
## func [StandardDeviationSample](/deviation.go?s=1250:1323#L46)
``` go
func StandardDeviationSample(input Float64Data) (sdev float64, err error)
```
StandardDeviationSample finds the amount of variation from a sample
## func [StdDevP](/legacy.go?s=339:396#L14)
``` go
func StdDevP(input Float64Data) (sdev float64, err error)
```
StdDevP is a shortcut to StandardDeviationPopulation
## func [StdDevS](/legacy.go?s=497:554#L19)
``` go
func StdDevS(input Float64Data) (sdev float64, err error)
```
StdDevS is a shortcut to StandardDeviationSample
## func [Sum](/sum.go?s=78:130#L6)
``` go
func Sum(input Float64Data) (sum float64, err error)
```
Sum adds all the numbers of a slice together
## func [Trimean](/quartile.go?s=1320:1368#L65)
``` go
func Trimean(input Float64Data) (float64, error)
```
Trimean finds the average of the median and the midhinge
## func [VarGeom](/geometric_distribution.go?s=885:933#L37)
``` go
func VarGeom(p float64) (exp float64, err error)
```
ProbGeom generates the variance for number for a
geometric random variable with parameter p
## func [VarP](/legacy.go?s=59:113#L4)
``` go
func VarP(input Float64Data) (sdev float64, err error)
```
VarP is a shortcut to PopulationVariance
## func [VarS](/legacy.go?s=193:247#L9)
``` go
func VarS(input Float64Data) (sdev float64, err error)
```
VarS is a shortcut to SampleVariance
## func [Variance](/variance.go?s=659:717#L26)
``` go
func Variance(input Float64Data) (sdev float64, err error)
```
Variance the amount of variation in the dataset
## type [Coordinate](/regression.go?s=143:183#L9)
``` go
type Coordinate struct {
X, Y float64
}
```
Coordinate holds the data in a series
### func [ExpReg](/legacy.go?s=791:856#L29)
``` go
func ExpReg(s []Coordinate) (regressions []Coordinate, err error)
```
ExpReg is a shortcut to ExponentialRegression
### func [LinReg](/legacy.go?s=643:708#L24)
``` go
func LinReg(s []Coordinate) (regressions []Coordinate, err error)
```
LinReg is a shortcut to LinearRegression
### func [LogReg](/legacy.go?s=944:1009#L34)
``` go
func LogReg(s []Coordinate) (regressions []Coordinate, err error)
```
LogReg is a shortcut to LogarithmicRegression
## type [Float64Data](/data.go?s=80:106#L4)
``` go
type Float64Data []float64
```
Float64Data is a named type for []float64 with helper methods
### func [LoadRawData](/load.go?s=145:194#L12)
``` go
func LoadRawData(raw interface{}) (f Float64Data)
```
LoadRawData parses and converts a slice of mixed data types to floats
### func (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91)
``` go
func (f Float64Data) AutoCorrelation(lags int) (float64, error)
```
AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
### func (Float64Data) [Correlation](/data.go?s=3058:3122#L86)
``` go
func (f Float64Data) Correlation(d Float64Data) (float64, error)
```
Correlation describes the degree of relationship between two sets of data
### func (Float64Data) [Covariance](/data.go?s=4801:4864#L141)
``` go
func (f Float64Data) Covariance(d Float64Data) (float64, error)
```
Covariance is a measure of how much two sets of data change
### func (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146)
``` go
func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)
```
CovariancePopulation computes covariance for entire population between two variables
### func (Float64Data) [CumulativeSum](/data.go?s=883:938#L28)
``` go
func (f Float64Data) CumulativeSum() ([]float64, error)
```
CumulativeSum returns the cumulative sum of the data
### func (Float64Data) [Entropy](/data.go?s=5480:5527#L162)
``` go
func (f Float64Data) Entropy() (float64, error)
```
Entropy provides calculation of the entropy
### func (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40)
``` go
func (f Float64Data) GeometricMean() (float64, error)
```
GeometricMean returns the median of the data
### func (Float64Data) [Get](/data.go?s=129:168#L7)
``` go
func (f Float64Data) Get(i int) float64
```
Get item in slice
### func (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43)
``` go
func (f Float64Data) HarmonicMean() (float64, error)
```
HarmonicMean returns the mode of the data
### func (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106)
``` go
func (f Float64Data) InterQuartileRange() (float64, error)
```
InterQuartileRange finds the range between Q1 and Q3
### func (Float64Data) [Len](/data.go?s=217:247#L10)
``` go
func (f Float64Data) Len() int
```
Len returns length of slice
### func (Float64Data) [Less](/data.go?s=318:358#L13)
``` go
func (f Float64Data) Less(i, j int) bool
```
Less returns if one number is less than another
### func (Float64Data) [Max](/data.go?s=645:688#L22)
``` go
func (f Float64Data) Max() (float64, error)
```
Max returns the maximum number in the data
### func (Float64Data) [Mean](/data.go?s=1005:1049#L31)
``` go
func (f Float64Data) Mean() (float64, error)
```
Mean returns the mean of the data
### func (Float64Data) [Median](/data.go?s=1111:1157#L34)
``` go
func (f Float64Data) Median() (float64, error)
```
Median returns the median of the data
### func (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46)
``` go
func (f Float64Data) MedianAbsoluteDeviation() (float64, error)
```
MedianAbsoluteDeviation the median of the absolute deviations from the dataset median
### func (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51)
``` go
func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)
```
MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
### func (Float64Data) [Midhinge](/data.go?s=3912:3973#L111)
``` go
func (f Float64Data) Midhinge(d Float64Data) (float64, error)
```
Midhinge finds the average of the first and third quartiles
### func (Float64Data) [Min](/data.go?s=536:579#L19)
``` go
func (f Float64Data) Min() (float64, error)
```
Min returns the minimum number in the data
### func (Float64Data) [Mode](/data.go?s=1217:1263#L37)
``` go
func (f Float64Data) Mode() ([]float64, error)
```
Mode returns the mode of the data
### func (Float64Data) [Pearson](/data.go?s=3455:3515#L96)
``` go
func (f Float64Data) Pearson(d Float64Data) (float64, error)
```
Pearson calculates the Pearson product-moment correlation coefficient between two variables.
### func (Float64Data) [Percentile](/data.go?s=2696:2755#L76)
``` go
func (f Float64Data) Percentile(p float64) (float64, error)
```
Percentile finds the relative standing in a slice of floats
### func (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81)
``` go
func (f Float64Data) PercentileNearestRank(p float64) (float64, error)
```
PercentileNearestRank finds the relative standing using the Nearest Rank method
### func (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131)
``` go
func (f Float64Data) PopulationVariance() (float64, error)
```
PopulationVariance finds the amount of variance within a population
### func (Float64Data) [Quartile](/data.go?s=3610:3673#L101)
``` go
func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)
```
Quartile returns the three quartile points from a slice of data
### func (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71)
``` go
func (f Float64Data) QuartileOutliers() (Outliers, error)
```
QuartileOutliers finds the mild and extreme outliers
### func (Float64Data) [Quartiles](/data.go?s=5628:5679#L167)
``` go
func (f Float64Data) Quartiles() (Quartiles, error)
```
Quartiles returns the three quartile points from instance of Float64Data
### func (Float64Data) [Sample](/data.go?s=4208:4269#L121)
``` go
func (f Float64Data) Sample(n int, r bool) ([]float64, error)
```
Sample returns sample from input with replacement or without
### func (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136)
``` go
func (f Float64Data) SampleVariance() (float64, error)
```
SampleVariance finds the amount of variance within a sample
### func (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151)
``` go
func (f Float64Data) Sigmoid() ([]float64, error)
```
Sigmoid returns the input values along the sigmoid or s-shaped curve
### func (Float64Data) [SoftMax](/data.go?s=5359:5408#L157)
``` go
func (f Float64Data) SoftMax() ([]float64, error)
```
SoftMax returns the input values in the range of 0 to 1
with sum of all the probabilities being equal to one.
### func (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56)
``` go
func (f Float64Data) StandardDeviation() (float64, error)
```
StandardDeviation the amount of variation in the dataset
### func (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61)
``` go
func (f Float64Data) StandardDeviationPopulation() (float64, error)
```
StandardDeviationPopulation finds the amount of variation from the population
### func (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66)
``` go
func (f Float64Data) StandardDeviationSample() (float64, error)
```
StandardDeviationSample finds the amount of variation from a sample
### func (Float64Data) [Sum](/data.go?s=764:807#L25)
``` go
func (f Float64Data) Sum() (float64, error)
```
Sum returns the total of all the numbers in the data
### func (Float64Data) [Swap](/data.go?s=425:460#L16)
``` go
func (f Float64Data) Swap(i, j int)
```
Swap switches out two numbers in slice
### func (Float64Data) [Trimean](/data.go?s=4059:4119#L116)
``` go
func (f Float64Data) Trimean(d Float64Data) (float64, error)
```
Trimean finds the average of the median and the midhinge
### func (Float64Data) [Variance](/data.go?s=4350:4398#L126)
``` go
func (f Float64Data) Variance() (float64, error)
```
Variance the amount of variation in the dataset
## type [Outliers](/outlier.go?s=73:139#L4)
``` go
type Outliers struct {
Mild Float64Data
Extreme Float64Data
}
```
Outliers holds mild and extreme outliers found in data
### func [QuartileOutliers](/outlier.go?s=197:255#L10)
``` go
func QuartileOutliers(input Float64Data) (Outliers, error)
```
QuartileOutliers finds the mild and extreme outliers
## type [Quartiles](/quartile.go?s=75:136#L6)
``` go
type Quartiles struct {
Q1 float64
Q2 float64
Q3 float64
}
```
Quartiles holds the three quartile points
### func [Quartile](/quartile.go?s=205:256#L13)
``` go
func Quartile(input Float64Data) (Quartiles, error)
```
Quartile returns the three quartile points from a slice of data
## type [Series](/regression.go?s=76:100#L6)
``` go
type Series []Coordinate
```
Series is a container for a series of data
### func [ExponentialRegression](/regression.go?s=1089:1157#L50)
``` go
func ExponentialRegression(s Series) (regressions Series, err error)
```
ExponentialRegression returns an exponential regression on data series
### func [LinearRegression](/regression.go?s=262:325#L14)
``` go
func LinearRegression(s Series) (regressions Series, err error)
```
LinearRegression finds the least squares linear regression on data series
### func [LogarithmicRegression](/regression.go?s=1903:1971#L85)
``` go
func LogarithmicRegression(s Series) (regressions Series, err error)
```
LogarithmicRegression returns an logarithmic regression on data series
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
stats-0.7.1/LICENSE 0000664 0000000 0000000 00000002130 14427120666 0013666 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
stats-0.7.1/Makefile 0000664 0000000 0000000 00000001231 14427120666 0014322 0 ustar 00root root 0000000 0000000 .PHONY: all
default: test lint
format:
go fmt .
test:
go test -race
check: format test
benchmark:
go test -bench=. -benchmem
coverage:
go test -coverprofile=coverage.out
go tool cover -html="coverage.out"
lint: format
golangci-lint run .
docs:
godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md
release:
git-chglog --output CHANGELOG.md --next-tag ${TAG}
git add CHANGELOG.md
git commit -m "Update changelog with ${TAG} changes"
git tag ${TAG}
git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt
git push origin master ${TAG}
hub release create --copy -F release-notes.txt ${TAG}
stats-0.7.1/README.md 0000664 0000000 0000000 00000026005 14427120666 0014147 0 ustar 00root root 0000000 0000000 # Stats - Golang Statistics Package
[![][action-svg]][action-url] [![][codecov-svg]][codecov-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url]
A well tested and comprehensive Golang statistics library / package / module with no dependencies.
If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated!
## Installation
```
go get github.com/montanaflynn/stats
```
## Example Usage
All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste:
```go
// start with some source data to use
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
// you could also use different types like this
// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// etc...
median, _ := stats.Median(data)
fmt.Println(median) // 3.65
roundedMedian, _ := stats.Round(median, 0)
fmt.Println(roundedMedian) // 4
```
## Documentation
The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats).
You can also view docs offline with the following commands:
```
# Command line
godoc . # show all exported apis
godoc . Median # show a single function
godoc -ex . Round # show function with example
godoc . Float64Data # show the type and methods
# Local website
godoc -http=:4444 # start the godoc server on port 4444
open http://localhost:4444/pkg/github.com/montanaflynn/stats/
```
The exported API is as follows:
```go
var (
ErrEmptyInput = statsError{"Input must not be empty."}
ErrNaN = statsError{"Not a number."}
ErrNegative = statsError{"Must not contain negative values."}
ErrZero = statsError{"Must not contain zero values."}
ErrBounds = statsError{"Input is outside of range."}
ErrSize = statsError{"Must be the same length."}
ErrInfValue = statsError{"Value is infinite."}
ErrYCoord = statsError{"Y Value must be greater than zero."}
)
func Round(input float64, places int) (rounded float64, err error) {}
type Float64Data []float64
func LoadRawData(raw interface{}) (f Float64Data) {}
func AutoCorrelation(data Float64Data, lags int) (float64, error) {}
func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func Correlation(data1, data2 Float64Data) (float64, error) {}
func Covariance(data1, data2 Float64Data) (float64, error) {}
func CovariancePopulation(data1, data2 Float64Data) (float64, error) {}
func CumulativeSum(input Float64Data) ([]float64, error) {}
func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) {}
func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) {}
func Entropy(input Float64Data) (float64, error) {}
func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func GeometricMean(input Float64Data) (float64, error) {}
func HarmonicMean(input Float64Data) (float64, error) {}
func InterQuartileRange(input Float64Data) (float64, error) {}
func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func Max(input Float64Data) (max float64, err error) {}
func Mean(input Float64Data) (float64, error) {}
func Median(input Float64Data) (median float64, err error) {}
func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {}
func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {}
func Midhinge(input Float64Data) (float64, error) {}
func Min(input Float64Data) (min float64, err error) {}
func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {}
func Mode(input Float64Data) (mode []float64, err error) {}
func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {}
func NormCdf(x float64, loc float64, scale float64) float64 {}
func NormEntropy(loc float64, scale float64) float64 {}
func NormFit(data []float64) [2]float64{}
func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {}
func NormIsf(p float64, loc float64, scale float64) (x float64) {}
func NormLogCdf(x float64, loc float64, scale float64) float64 {}
func NormLogPdf(x float64, loc float64, scale float64) float64 {}
func NormLogSf(x float64, loc float64, scale float64) float64 {}
func NormMean(loc float64, scale float64) float64 {}
func NormMedian(loc float64, scale float64) float64 {}
func NormMoment(n int, loc float64, scale float64) float64 {}
func NormPdf(x float64, loc float64, scale float64) float64 {}
func NormPpf(p float64, loc float64, scale float64) (x float64) {}
func NormPpfRvs(loc float64, scale float64, size int) []float64 {}
func NormSf(x float64, loc float64, scale float64) float64 {}
func NormStats(loc float64, scale float64, moments string) []float64 {}
func NormStd(loc float64, scale float64) float64 {}
func NormVar(loc float64, scale float64) float64 {}
func Pearson(data1, data2 Float64Data) (float64, error) {}
func Percentile(input Float64Data, percent float64) (percentile float64, err error) {}
func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {}
func PopulationVariance(input Float64Data) (pvar float64, err error) {}
func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {}
func SampleVariance(input Float64Data) (svar float64, err error) {}
func Sigmoid(input Float64Data) ([]float64, error) {}
func SoftMax(input Float64Data) ([]float64, error) {}
func StableSample(input Float64Data, takenum int) ([]float64, error) {}
func StandardDeviation(input Float64Data) (sdev float64, err error) {}
func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {}
func StandardDeviationSample(input Float64Data) (sdev float64, err error) {}
func StdDevP(input Float64Data) (sdev float64, err error) {}
func StdDevS(input Float64Data) (sdev float64, err error) {}
func Sum(input Float64Data) (sum float64, err error) {}
func Trimean(input Float64Data) (float64, error) {}
func VarP(input Float64Data) (sdev float64, err error) {}
func VarS(input Float64Data) (sdev float64, err error) {}
func Variance(input Float64Data) (sdev float64, err error) {}
func ProbGeom(a int, b int, p float64) (prob float64, err error) {}
func ExpGeom(p float64) (exp float64, err error) {}
func VarGeom(p float64) (exp float64, err error) {}
type Coordinate struct {
X, Y float64
}
type Series []Coordinate
func ExponentialRegression(s Series) (regressions Series, err error) {}
func LinearRegression(s Series) (regressions Series, err error) {}
func LogarithmicRegression(s Series) (regressions Series, err error) {}
type Outliers struct {
Mild Float64Data
Extreme Float64Data
}
type Quartiles struct {
Q1 float64
Q2 float64
Q3 float64
}
func Quartile(input Float64Data) (Quartiles, error) {}
func QuartileOutliers(input Float64Data) (Outliers, error) {}
```
## Contributing
Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more.
1. Fork the repo and clone your fork
2. Create new branch (`git checkout -b some-thing`)
3. Make the desired changes
4. Ensure tests pass (`go test -cover` or `make test`)
5. Run lint and fix problems (`go vet .` or `make lint`)
6. Commit changes (`git commit -am 'Did something'`)
7. Push branch (`git push origin some-thing`)
8. Submit pull request
To make things as seamless as possible please also consider the following steps:
- Update `examples/main.go` with a simple example of the new feature
- Update `README.md` documentation section with any new exported API
- Keep 100% code coverage (you can check with `make coverage`)
- Squash commits into single units of work with `git rebase -i new-feature`
## Releasing
This is not required by contributors and mostly here as a reminder to myself as the maintainer of this repo. To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md).
First install the tools used to generate the markdown files and release:
```
go install github.com/davecheney/godoc2md@latest
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
brew tap git-chglog/git-chglog
brew install gnu-sed hub git-chglog
```
Then you can run these `make` directives:
```
# Generate DOCUMENTATION.md
make docs
```
Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release:
```
make release TAG=v0.x.x
```
To authenticate `hub` for the release you will need to create a personal access token and use it as the password when it's requested.
## MIT License
Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
[action-url]: https://github.com/montanaflynn/stats/actions
[action-svg]: https://img.shields.io/github/actions/workflow/status/montanaflynn/stats/go.yml
[codecov-url]: https://app.codecov.io/gh/montanaflynn/stats
[codecov-svg]: https://img.shields.io/codecov/c/github/montanaflynn/stats?token=wnw8dActnH
[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats
[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats
[godoc-url]: https://godoc.org/github.com/montanaflynn/stats
[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg
[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats
[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg
[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE
[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg
stats-0.7.1/correlation.go 0000664 0000000 0000000 00000002446 14427120666 0015543 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
)
// Correlation describes the degree of relationship between two sets of data
func Correlation(data1, data2 Float64Data) (float64, error) {
l1 := data1.Len()
l2 := data2.Len()
if l1 == 0 || l2 == 0 {
return math.NaN(), EmptyInputErr
}
if l1 != l2 {
return math.NaN(), SizeErr
}
sdev1, _ := StandardDeviationPopulation(data1)
sdev2, _ := StandardDeviationPopulation(data2)
if sdev1 == 0 || sdev2 == 0 {
return 0, nil
}
covp, _ := CovariancePopulation(data1, data2)
return covp / (sdev1 * sdev2), nil
}
// Pearson calculates the Pearson product-moment correlation coefficient between two variables
func Pearson(data1, data2 Float64Data) (float64, error) {
return Correlation(data1, data2)
}
// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
func AutoCorrelation(data Float64Data, lags int) (float64, error) {
if len(data) < 1 {
return 0, EmptyInputErr
}
mean, _ := Mean(data)
var result, q float64
for i := 0; i < lags; i++ {
v := (data[0] - mean) * (data[0] - mean)
for i := 1; i < len(data); i++ {
delta0 := data[i-1] - mean
delta1 := data[i] - mean
q += (delta0*delta1 - q) / float64(i+1)
v += (delta1*delta1 - v) / float64(i+1)
}
result = q / v
}
return result, nil
}
stats-0.7.1/correlation_test.go 0000664 0000000 0000000 00000003714 14427120666 0016601 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"math"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleCorrelation() {
s1 := []float64{1, 2, 3, 4, 5}
s2 := []float64{1, 2, 3, 5, 6}
a, _ := stats.Correlation(s1, s2)
rounded, _ := stats.Round(a, 5)
fmt.Println(rounded)
// Output: 0.99124
}
func TestCorrelation(t *testing.T) {
s1 := []float64{1, 2, 3, 4, 5}
s2 := []float64{10, -51.2, 8}
s3 := []float64{1, 2, 3, 5, 6}
s4 := []float64{}
s5 := []float64{0, 0, 0}
testCases := []struct {
name string
input [][]float64
output float64
err error
}{
{"Empty Slice Error", [][]float64{s4, s4}, math.NaN(), stats.EmptyInputErr},
{"Different Length Error", [][]float64{s1, s2}, math.NaN(), stats.SizeErr},
{"Correlation Value", [][]float64{s1, s3}, 0.9912407071619302, nil},
{"Same Input Value", [][]float64{s5, s5}, 0.00, nil},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
a, err := stats.Correlation(tc.input[0], tc.input[1])
if err != nil {
if err != tc.err {
t.Errorf("Should have returned error %s", tc.err)
}
} else if !veryclose(a, tc.output) {
t.Errorf("Result %.08f should be %.08f", a, tc.output)
}
a2, err2 := stats.Pearson(tc.input[0], tc.input[1])
if err2 != nil {
if err2 != tc.err {
t.Errorf("Should have returned error %s", tc.err)
}
} else if !veryclose(a2, tc.output) {
t.Errorf("Result %.08f should be %.08f", a2, tc.output)
}
})
}
}
func ExampleAutoCorrelation() {
s1 := []float64{1, 2, 3, 4, 5}
a, _ := stats.AutoCorrelation(s1, 1)
fmt.Println(a)
// Output: 0.4
}
func TestAutoCorrelation(t *testing.T) {
s1 := []float64{1, 2, 3, 4, 5}
s2 := []float64{}
a, err := stats.AutoCorrelation(s1, 1)
if err != nil {
t.Errorf("Should not have returned an error")
}
if a != 0.4 {
t.Errorf("Should have returned 0.4")
}
_, err = stats.AutoCorrelation(s2, 1)
if err != stats.EmptyInputErr {
t.Errorf("Should have returned empty input error")
}
}
stats-0.7.1/cumulative_sum.go 0000664 0000000 0000000 00000000574 14427120666 0016264 0 ustar 00root root 0000000 0000000 package stats
// CumulativeSum calculates the cumulative sum of the input slice
func CumulativeSum(input Float64Data) ([]float64, error) {
if input.Len() == 0 {
return Float64Data{}, EmptyInput
}
cumSum := make([]float64, input.Len())
for i, val := range input {
if i == 0 {
cumSum[i] = val
} else {
cumSum[i] = cumSum[i-1] + val
}
}
return cumSum, nil
}
stats-0.7.1/cumulative_sum_test.go 0000664 0000000 0000000 00000002313 14427120666 0017314 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleCumulativeSum() {
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
csum, _ := stats.CumulativeSum(data)
fmt.Println(csum)
// Output: [1 3.1 6.300000000000001 11.123000000000001 15.223 21.023]
}
func TestCumulativeSum(t *testing.T) {
for _, c := range []struct {
in []float64
out []float64
}{
{[]float64{1, 2, 3}, []float64{1, 3, 6}},
{[]float64{1.0, 1.1, 1.2, 2.2}, []float64{1.0, 2.1, 3.3, 5.5}},
{[]float64{-1, -1, 2, -3}, []float64{-1, -2, 0, -3}},
} {
got, err := stats.CumulativeSum(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if !reflect.DeepEqual(c.out, got) {
t.Errorf("CumulativeSum(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.CumulativeSum([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func BenchmarkCumulativeSumSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.CumulativeSum(makeFloatSlice(5))
}
}
func BenchmarkCumulativeSumLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.CumulativeSum(lf)
}
}
stats-0.7.1/data.go 0000664 0000000 0000000 00000013110 14427120666 0014121 0 ustar 00root root 0000000 0000000 package stats
// Float64Data is a named type for []float64 with helper methods
type Float64Data []float64
// Get item in slice
func (f Float64Data) Get(i int) float64 { return f[i] }
// Len returns length of slice
func (f Float64Data) Len() int { return len(f) }
// Less returns if one number is less than another
func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] }
// Swap switches out two numbers in slice
func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// Min returns the minimum number in the data
func (f Float64Data) Min() (float64, error) { return Min(f) }
// Max returns the maximum number in the data
func (f Float64Data) Max() (float64, error) { return Max(f) }
// Sum returns the total of all the numbers in the data
func (f Float64Data) Sum() (float64, error) { return Sum(f) }
// CumulativeSum returns the cumulative sum of the data
func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) }
// Mean returns the mean of the data
func (f Float64Data) Mean() (float64, error) { return Mean(f) }
// Median returns the median of the data
func (f Float64Data) Median() (float64, error) { return Median(f) }
// Mode returns the mode of the data
func (f Float64Data) Mode() ([]float64, error) { return Mode(f) }
// GeometricMean returns the median of the data
func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) }
// HarmonicMean returns the mode of the data
func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) }
// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median
func (f Float64Data) MedianAbsoluteDeviation() (float64, error) {
return MedianAbsoluteDeviation(f)
}
// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) {
return MedianAbsoluteDeviationPopulation(f)
}
// StandardDeviation the amount of variation in the dataset
func (f Float64Data) StandardDeviation() (float64, error) {
return StandardDeviation(f)
}
// StandardDeviationPopulation finds the amount of variation from the population
func (f Float64Data) StandardDeviationPopulation() (float64, error) {
return StandardDeviationPopulation(f)
}
// StandardDeviationSample finds the amount of variation from a sample
func (f Float64Data) StandardDeviationSample() (float64, error) {
return StandardDeviationSample(f)
}
// QuartileOutliers finds the mild and extreme outliers
func (f Float64Data) QuartileOutliers() (Outliers, error) {
return QuartileOutliers(f)
}
// Percentile finds the relative standing in a slice of floats
func (f Float64Data) Percentile(p float64) (float64, error) {
return Percentile(f, p)
}
// PercentileNearestRank finds the relative standing using the Nearest Rank method
func (f Float64Data) PercentileNearestRank(p float64) (float64, error) {
return PercentileNearestRank(f, p)
}
// Correlation describes the degree of relationship between two sets of data
func (f Float64Data) Correlation(d Float64Data) (float64, error) {
return Correlation(f, d)
}
// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
func (f Float64Data) AutoCorrelation(lags int) (float64, error) {
return AutoCorrelation(f, lags)
}
// Pearson calculates the Pearson product-moment correlation coefficient between two variables.
func (f Float64Data) Pearson(d Float64Data) (float64, error) {
return Pearson(f, d)
}
// Quartile returns the three quartile points from a slice of data
func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) {
return Quartile(d)
}
// InterQuartileRange finds the range between Q1 and Q3
func (f Float64Data) InterQuartileRange() (float64, error) {
return InterQuartileRange(f)
}
// Midhinge finds the average of the first and third quartiles
func (f Float64Data) Midhinge(d Float64Data) (float64, error) {
return Midhinge(d)
}
// Trimean finds the average of the median and the midhinge
func (f Float64Data) Trimean(d Float64Data) (float64, error) {
return Trimean(d)
}
// Sample returns sample from input with replacement or without
func (f Float64Data) Sample(n int, r bool) ([]float64, error) {
return Sample(f, n, r)
}
// Variance the amount of variation in the dataset
func (f Float64Data) Variance() (float64, error) {
return Variance(f)
}
// PopulationVariance finds the amount of variance within a population
func (f Float64Data) PopulationVariance() (float64, error) {
return PopulationVariance(f)
}
// SampleVariance finds the amount of variance within a sample
func (f Float64Data) SampleVariance() (float64, error) {
return SampleVariance(f)
}
// Covariance is a measure of how much two sets of data change
func (f Float64Data) Covariance(d Float64Data) (float64, error) {
return Covariance(f, d)
}
// CovariancePopulation computes covariance for entire population between two variables
func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) {
return CovariancePopulation(f, d)
}
// Sigmoid returns the input values along the sigmoid or s-shaped curve
func (f Float64Data) Sigmoid() ([]float64, error) {
return Sigmoid(f)
}
// SoftMax returns the input values in the range of 0 to 1
// with sum of all the probabilities being equal to one.
func (f Float64Data) SoftMax() ([]float64, error) {
return SoftMax(f)
}
// Entropy provides calculation of the entropy
func (f Float64Data) Entropy() (float64, error) {
return Entropy(f)
}
// Quartiles returns the three quartile points from instance of Float64Data
func (f Float64Data) Quartiles() (Quartiles, error) {
return Quartile(f)
}
stats-0.7.1/data_test.go 0000664 0000000 0000000 00000015477 14427120666 0015202 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"math/rand"
"reflect"
"runtime"
"testing"
"time"
"github.com/montanaflynn/stats"
)
var data1 = stats.Float64Data{-10, -10.001, 5, 1.1, 2, 3, 4.20, 5}
var data2 = stats.Float64Data{-9, -9.001, 4, .1, 1, 2, 3.20, 5}
func getFunctionName(i interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
}
func checkResult(result float64, err error, name string, f float64, t *testing.T) {
if err != nil {
t.Errorf("%s returned an error", name)
}
if !veryclose(result, f) {
t.Errorf("%s() => %v != %v", name, result, f)
}
}
// makeFloatSlice makes a slice of float64s
func makeFloatSlice(c int) []float64 {
lf := make([]float64, 0, c)
for i := 0; i < c; i++ {
f := float64(i * 100)
lf = append(lf, f)
}
return lf
}
func makeRandFloatSlice(c int) []float64 {
lf := make([]float64, 0, c)
rand.Seed(time.Now().UTC().UnixNano())
for i := 0; i < c; i++ {
f := float64(i * 100)
lf = append(lf, f)
}
return lf
}
func TestInterfaceMethods(t *testing.T) {
// Test Get
a := data1.Get(1)
if a != -10.001 {
t.Errorf("Get(2) => %.1f != %.1f", a, -10.001)
}
// Test Len
l := data1.Len()
if l != 8 {
t.Errorf("Len() => %v != %v", l, 8)
}
// Test Less
b := data1.Less(0, 5)
if !b {
t.Errorf("Less() => %v != %v", b, true)
}
// Test Swap
data1.Swap(0, 2)
if data1.Get(0) != 5 {
t.Errorf("Len() => %v != %v", l, 8)
}
}
func TestHelperMethods(t *testing.T) {
// Test Min
m, _ := data1.Min()
if m != -10.001 {
t.Errorf("Min() => %v != %v", m, -10.001)
}
// Test Max
m, _ = data1.Max()
if m != 5 {
t.Errorf("Max() => %v != %v", m, 5)
}
// Test Sum
m, _ = data1.Sum()
if m != 0.2990000000000004 {
t.Errorf("Sum() => %v != %v", m, 0.2990000000000004)
}
// Test CumulativeSum
cs, _ := data1.CumulativeSum()
want := []float64{5, -5.0009999999999994, -15.001, -13.901, -11.901, -8.901, -4.701, 0.2990000000000004}
if !reflect.DeepEqual(cs, want) {
t.Errorf("CumulativeSum() => %v != %v", cs, want)
}
// Test Mean
m, _ = data1.Mean()
if m != 0.03737500000000005 {
t.Errorf("Mean() => %v != %v", m, 0.03737500000000005)
}
// Test GeometricMean
m, _ = data1.GeometricMean()
if m != 4.028070682618703 {
t.Errorf("GeometricMean() => %v != %v", m, 4.028070682618703)
}
// Test HarmonicMean
m, _ = data1.HarmonicMean()
if !math.IsNaN(m) {
t.Errorf("HarmonicMean() => %v != %v", m, math.NaN())
}
// Test Median
m, _ = data1.Median()
if m != 2.5 {
t.Errorf("Median() => %v != %v", m, 2.5)
}
// Test Mode
mo, _ := data1.Mode()
if !reflect.DeepEqual(mo, []float64{5.0}) {
t.Errorf("Mode() => %.1f != %.1f", mo, []float64{5.0})
}
// Test InterQuartileRange
iqr, _ := data1.InterQuartileRange()
if iqr != 9.05 {
t.Errorf("InterQuartileRange() => %v != %v", iqr, 9.05)
}
}
func assertFloat64(fn func() (float64, error), f float64, t *testing.T) {
res, err := fn()
checkResult(res, err, getFunctionName(fn), f, t)
}
func TestMedianAbsoluteDeviationMethods(t *testing.T) {
assertFloat64(data1.MedianAbsoluteDeviation, 2.1, t)
assertFloat64(data1.MedianAbsoluteDeviationPopulation, 2.1, t)
}
func TestStandardDeviationMethods(t *testing.T) {
assertFloat64(data1.StandardDeviation, 5.935684731720091, t)
assertFloat64(data1.StandardDeviationPopulation, 5.935684731720091, t)
assertFloat64(data1.StandardDeviationSample, 6.345513892000508, t)
}
func TestVarianceMethods(t *testing.T) {
assertFloat64(data1.Variance, 35.232353234375005, t)
assertFloat64(data1.PopulationVariance, 35.232353234375005, t)
assertFloat64(data1.SampleVariance, 40.26554655357143, t)
}
func assertPercentiles(fn func(i float64) (float64, error), i float64, f float64, t *testing.T) {
res, err := fn(i)
checkResult(res, err, getFunctionName(fn), f, t)
}
func TestPercentileMethods(t *testing.T) {
assertPercentiles(data1.Percentile, 75, 4.2, t)
assertPercentiles(data1.PercentileNearestRank, 75, 4.2, t)
}
func assertOtherDataMethods(fn func(d stats.Float64Data) (float64, error), d stats.Float64Data, f float64, t *testing.T) {
res, err := fn(d)
checkResult(res, err, getFunctionName(fn), f, t)
}
func TestOtherDataMethods(t *testing.T) {
assertOtherDataMethods(data1.Correlation, data2, 0.20875473597605448, t)
assertOtherDataMethods(data1.Pearson, data2, 0.20875473597605448, t)
assertOtherDataMethods(data1.Midhinge, data2, -0.42500000000000004, t)
assertOtherDataMethods(data1.Trimean, data2, 0.5375, t)
assertOtherDataMethods(data1.Covariance, data2, 7.3814215535714265, t)
assertOtherDataMethods(data1.CovariancePopulation, data2, 6.458743859374998, t)
}
func TestAutoCorrelationMethod(t *testing.T) {
_, err := data1.AutoCorrelation(1)
if err != nil {
t.Error("stats.Float64Data.AutoCorrelation returned an error")
}
}
func TestSampleMethod(t *testing.T) {
// Test Sample method
_, err := data1.Sample(5, true)
if err != nil {
t.Errorf("%s returned an error", getFunctionName(data1.Sample))
}
}
func TestQuartileMethods(t *testing.T) {
// Test QuartileOutliers method
_, err := data1.QuartileOutliers()
if err != nil {
t.Errorf("%s returned an error", getFunctionName(data1.QuartileOutliers))
}
// Test Quartile method
_, err = data1.Quartile(data2)
if err != nil {
t.Errorf("%s returned an error", getFunctionName(data1.Quartile))
}
}
func TestSigmoidMethod(t *testing.T) {
d := stats.LoadRawData([]float64{3.0, 1.0, 2.1})
a := []float64{0.9525741268224334, 0.7310585786300049, 0.8909031788043871}
s, _ := d.Sigmoid()
if !reflect.DeepEqual(s, a) {
t.Errorf("Sigmoid() => %g != %g", s, a)
}
}
func TestSoftMaxMethod(t *testing.T) {
d := stats.LoadRawData([]float64{3.0, 1.0, 0.2})
a := []float64{0.8360188027814407, 0.11314284146556013, 0.05083835575299916}
s, _ := d.SoftMax()
if !reflect.DeepEqual(s, a) {
t.Errorf("SoftMax() => %g != %g", s, a)
}
}
func TestEntropyMethod(t *testing.T) {
d := stats.LoadRawData([]float64{3.0, 1.0, 0.2})
a := 0.7270013625470586
e, _ := d.Entropy()
if e != a {
t.Errorf("Entropy() => %v != %v", e, a)
}
}
// Here we show the regular way of doing it
// with a plain old slice of float64s
func BenchmarkRegularAPI(b *testing.B) {
for i := 0; i < b.N; i++ {
data := []float64{-10, -7, -3.11, 5, 1.1, 2, 3, 4.20, 5, 18}
_, _ = stats.Min(data)
_, _ = stats.Max(data)
_, _ = stats.Sum(data)
_, _ = stats.Mean(data)
_, _ = stats.Median(data)
_, _ = stats.Mode(data)
}
}
// Here's where things get interesting
// and we start to use the included
// stats.Float64Data type and methods
func BenchmarkMethodsAPI(b *testing.B) {
for i := 0; i < b.N; i++ {
data := stats.Float64Data{-10, -7, -3.11, 5, 1.1, 2, 3, 4.20, 5, 18}
_, _ = data.Min()
_, _ = data.Max()
_, _ = data.Sum()
_, _ = data.Mean()
_, _ = data.Median()
_, _ = data.Mode()
}
}
func TestQuartilesMethods(t *testing.T) {
_, err := data1.Quartiles()
if err != nil {
t.Errorf("%s returned an error", getFunctionName(data1.Quartiles))
}
}
stats-0.7.1/describe.go 0000664 0000000 0000000 00000005063 14427120666 0015000 0 ustar 00root root 0000000 0000000 package stats
import "fmt"
// Holds information about the dataset provided to Describe
type Description struct {
Count int
Mean float64
Std float64
Max float64
Min float64
DescriptionPercentiles []descriptionPercentile
AllowedNaN bool
}
// Specifies percentiles to be computed
type descriptionPercentile struct {
Percentile float64
Value float64
}
// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe()
func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) {
return DescribePercentileFunc(input, allowNaN, percentiles, Percentile)
}
// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe()
// Takes in a function to use for percentile calculation
func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) {
var description Description
description.AllowedNaN = allowNaN
description.Count = input.Len()
if description.Count == 0 && !allowNaN {
return &description, ErrEmptyInput
}
// Disregard error, since it cannot be thrown if Count is > 0 and allowNaN is false, else NaN is accepted
description.Std, _ = StandardDeviation(input)
description.Max, _ = Max(input)
description.Min, _ = Min(input)
description.Mean, _ = Mean(input)
if percentiles != nil {
for _, percentile := range *percentiles {
if value, err := percentileFunc(input, percentile); err == nil || allowNaN {
description.DescriptionPercentiles = append(description.DescriptionPercentiles, descriptionPercentile{Percentile: percentile, Value: value})
}
}
}
return &description, nil
}
/*
Represents the Description instance in a string format with specified number of decimals
count 3
mean 2.00
std 0.82
max 3.00
min 1.00
25.00% NaN
50.00% 1.50
75.00% 2.50
NaN OK true
*/
func (d *Description) String(decimals int) string {
var str string
str += fmt.Sprintf("count\t%d\n", d.Count)
str += fmt.Sprintf("mean\t%.*f\n", decimals, d.Mean)
str += fmt.Sprintf("std\t%.*f\n", decimals, d.Std)
str += fmt.Sprintf("max\t%.*f\n", decimals, d.Max)
str += fmt.Sprintf("min\t%.*f\n", decimals, d.Min)
for _, percentile := range d.DescriptionPercentiles {
str += fmt.Sprintf("%.2f%%\t%.*f\n", percentile.Percentile, decimals, percentile.Value)
}
str += fmt.Sprintf("NaN OK\t%t", d.AllowedNaN)
return str
}
stats-0.7.1/describe_test.go 0000664 0000000 0000000 00000004256 14427120666 0016042 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"testing"
"github.com/montanaflynn/stats"
)
func TestDescribeValidDataset(t *testing.T) {
_, err := stats.Describe([]float64{1.0, 2.0, 3.0}, false, &[]float64{25.0, 50.0, 75.0})
if err != nil {
t.Errorf("Returned an error")
}
}
func TestDescribeEmptyDataset(t *testing.T) {
_, err := stats.Describe([]float64{}, false, nil)
if err != stats.ErrEmptyInput {
t.Errorf("Did not return empty input error")
}
}
func TestDescribeEmptyDatasetNaN(t *testing.T) {
describe, err := stats.Describe([]float64{}, true, nil)
if err != nil {
t.Errorf("Returned an error")
}
if !math.IsNaN(describe.Max) || !math.IsNaN(describe.Mean) || !math.IsNaN(describe.Min) || !math.IsNaN(describe.Std) {
t.Errorf("Was not NaN")
}
}
func TestDescribeValidDatasetNaN(t *testing.T) {
describe, err := stats.Describe([]float64{1.0, 2.0, 3.0}, true, &[]float64{25.0, 50.0, 75.0})
if err != nil {
t.Errorf("Returned an error")
}
if math.IsNaN(describe.Max) {
t.Errorf("Was NaN")
}
}
func TestDescribeValues(t *testing.T) {
dataSet := []float64{1.0, 2.0, 3.0}
percentiles := []float64{25.0, 50.0, 75.0}
describe, _ := stats.Describe(dataSet, true, &percentiles)
if describe.Count != len(dataSet) {
t.Errorf("Count was not == length of dataset")
}
if len(describe.DescriptionPercentiles) != len(percentiles) {
t.Errorf("Percentiles length was not == length of input percentiles")
}
max, _ := stats.Max(dataSet)
if max != describe.Max {
t.Errorf("Max was not equal to Max(dataset)")
}
min, _ := stats.Min(dataSet)
if min != describe.Min {
t.Errorf("Min was not equal to Min(dataset)")
}
mean, _ := stats.Mean(dataSet)
if mean != describe.Mean {
t.Errorf("Mean was not equal to Mean(dataset)")
}
std, _ := stats.StandardDeviation(dataSet)
if std != describe.Std {
t.Errorf("Std was not equal to StandardDeviation(dataset)")
}
}
func TestDescribeString(t *testing.T) {
describe, _ := stats.Describe([]float64{1.0, 2.0, 3.0}, true, &[]float64{25.0, 50.0, 75.0})
if describe.String(2) != "count\t3\nmean\t2.00\nstd\t0.82\nmax\t3.00\nmin\t1.00\n25.00%\tNaN\n50.00%\t1.50\n75.00%\t2.50\nNaN OK\ttrue" {
t.Errorf("String output is not correct")
}
}
stats-0.7.1/deviation.go 0000664 0000000 0000000 00000002760 14427120666 0015203 0 ustar 00root root 0000000 0000000 package stats
import "math"
// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median
func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {
return MedianAbsoluteDeviationPopulation(input)
}
// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
i := copyslice(input)
m, _ := Median(i)
for key, value := range i {
i[key] = math.Abs(value - m)
}
return Median(i)
}
// StandardDeviation the amount of variation in the dataset
func StandardDeviation(input Float64Data) (sdev float64, err error) {
return StandardDeviationPopulation(input)
}
// StandardDeviationPopulation finds the amount of variation from the population
func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
// Get the population variance
vp, _ := PopulationVariance(input)
// Return the population standard deviation
return math.Sqrt(vp), nil
}
// StandardDeviationSample finds the amount of variation from a sample
func StandardDeviationSample(input Float64Data) (sdev float64, err error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
// Get the sample variance
vs, _ := SampleVariance(input)
// Return the sample standard deviation
return math.Sqrt(vs), nil
}
stats-0.7.1/deviation_test.go 0000664 0000000 0000000 00000004122 14427120666 0016234 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"testing"
"github.com/montanaflynn/stats"
)
func TestMedianAbsoluteDeviation(t *testing.T) {
_, err := stats.MedianAbsoluteDeviation([]float64{1, 2, 3})
if err != nil {
t.Errorf("Returned an error")
}
}
func TestMedianAbsoluteDeviationPopulation(t *testing.T) {
s, _ := stats.MedianAbsoluteDeviation([]float64{1, 2, 3})
m, err := stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 1.00 {
t.Errorf("%.10f != %.10f", m, 1.00)
}
s, _ = stats.MedianAbsoluteDeviation([]float64{-2, 0, 4, 5, 7})
m, err = stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 3.00 {
t.Errorf("%.10f != %.10f", m, 3.00)
}
m, _ = stats.MedianAbsoluteDeviation([]float64{})
if !math.IsNaN(m) {
t.Errorf("%.1f != %.1f", m, math.NaN())
}
}
func TestStandardDeviation(t *testing.T) {
_, err := stats.StandardDeviation([]float64{1, 2, 3})
if err != nil {
t.Errorf("Returned an error")
}
}
func TestStandardDeviationPopulation(t *testing.T) {
s, _ := stats.StandardDeviationPopulation([]float64{1, 2, 3})
m, err := stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 0.82 {
t.Errorf("%.10f != %.10f", m, 0.82)
}
s, _ = stats.StandardDeviationPopulation([]float64{-1, -2, -3.3})
m, err = stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 0.94 {
t.Errorf("%.10f != %.10f", m, 0.94)
}
m, _ = stats.StandardDeviationPopulation([]float64{})
if !math.IsNaN(m) {
t.Errorf("%.1f != %.1f", m, math.NaN())
}
}
func TestStandardDeviationSample(t *testing.T) {
s, _ := stats.StandardDeviationSample([]float64{1, 2, 3})
m, err := stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 1.0 {
t.Errorf("%.10f != %.10f", m, 1.0)
}
s, _ = stats.StandardDeviationSample([]float64{-1, -2, -3.3})
m, err = stats.Round(s, 2)
if err != nil {
t.Errorf("Returned an error")
}
if m != 1.15 {
t.Errorf("%.10f != %.10f", m, 1.15)
}
m, _ = stats.StandardDeviationSample([]float64{})
if !math.IsNaN(m) {
t.Errorf("%.1f != %.1f", m, math.NaN())
}
}
stats-0.7.1/distances.go 0000664 0000000 0000000 00000005024 14427120666 0015172 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
)
// Validate data for distance calculation
func validateData(dataPointX, dataPointY Float64Data) error {
if len(dataPointX) == 0 || len(dataPointY) == 0 {
return EmptyInputErr
}
if len(dataPointX) != len(dataPointY) {
return SizeErr
}
return nil
}
// ChebyshevDistance computes the Chebyshev distance between two data sets
func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
err = validateData(dataPointX, dataPointY)
if err != nil {
return math.NaN(), err
}
var tempDistance float64
for i := 0; i < len(dataPointY); i++ {
tempDistance = math.Abs(dataPointX[i] - dataPointY[i])
if distance < tempDistance {
distance = tempDistance
}
}
return distance, nil
}
// EuclideanDistance computes the Euclidean distance between two data sets
func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
err = validateData(dataPointX, dataPointY)
if err != nil {
return math.NaN(), err
}
distance = 0
for i := 0; i < len(dataPointX); i++ {
distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i]))
}
return math.Sqrt(distance), nil
}
// ManhattanDistance computes the Manhattan distance between two data sets
func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
err = validateData(dataPointX, dataPointY)
if err != nil {
return math.NaN(), err
}
distance = 0
for i := 0; i < len(dataPointX); i++ {
distance = distance + math.Abs(dataPointX[i]-dataPointY[i])
}
return distance, nil
}
// MinkowskiDistance computes the Minkowski distance between two data sets
//
// Arguments:
//
// dataPointX: First set of data points
// dataPointY: Second set of data points. Length of both data
// sets must be equal.
// lambda: aka p or city blocks; With lambda = 1
// returned distance is manhattan distance and
// lambda = 2; it is euclidean distance. Lambda
// reaching to infinite - distance would be chebysev
// distance.
//
// Return:
//
// Distance or error
func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {
err = validateData(dataPointX, dataPointY)
if err != nil {
return math.NaN(), err
}
for i := 0; i < len(dataPointY); i++ {
distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda)
}
distance = math.Pow(distance, 1/lambda)
if math.IsInf(distance, 1) {
return math.NaN(), InfValue
}
return distance, nil
}
stats-0.7.1/distances_test.go 0000664 0000000 0000000 00000004675 14427120666 0016244 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
type distanceFunctionType func(stats.Float64Data, stats.Float64Data) (float64, error)
var minkowskiDistanceTestMatrix = []struct {
dataPointX []float64
dataPointY []float64
lambda float64
distance float64
}{
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 1, 24},
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 2, 10.583005244258363},
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 99, 6},
}
var distanceTestMatrix = []struct {
dataPointX []float64
dataPointY []float64
distance float64
distanceFunction distanceFunctionType
}{
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 6, stats.ChebyshevDistance},
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 24, stats.ManhattanDistance},
{[]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, 10.583005244258363, stats.EuclideanDistance},
}
func TestDataSetDistances(t *testing.T) {
// Test Minkowski Distance with different lambda values.
for _, testData := range minkowskiDistanceTestMatrix {
distance, err := stats.MinkowskiDistance(testData.dataPointX, testData.dataPointY, testData.lambda)
if err != nil && distance != testData.distance {
t.Errorf("Failed to compute Minkowski distance.")
}
_, err = stats.MinkowskiDistance([]float64{}, []float64{}, 3)
if err == nil {
t.Errorf("Empty slices should have resulted in an error")
}
_, err = stats.MinkowskiDistance([]float64{1, 2, 3}, []float64{1, 4}, 3)
if err == nil {
t.Errorf("Different length slices should have resulted in an error")
}
_, err = stats.MinkowskiDistance([]float64{999, 999, 999}, []float64{1, 1, 1}, 1000)
if err == nil {
t.Errorf("Infinite distance should have resulted in an error")
}
}
// Compute distance with the help of all algorithms.
for _, testSet := range distanceTestMatrix {
distance, err := testSet.distanceFunction(testSet.dataPointX, testSet.dataPointY)
if err != nil && testSet.distance != distance {
t.Errorf("Failed to compute distance.")
}
_, err = testSet.distanceFunction([]float64{}, []float64{})
if err == nil {
t.Errorf("Empty slices should have resulted in an error")
}
}
}
func ExampleChebyshevDistance() {
d1 := []float64{2, 3, 4, 5, 6, 7, 8}
d2 := []float64{8, 7, 6, 5, 4, 3, 2}
cd, _ := stats.ChebyshevDistance(d1, d2)
fmt.Println(cd)
// Output: 6
}
stats-0.7.1/doc.go 0000664 0000000 0000000 00000001147 14427120666 0013764 0 ustar 00root root 0000000 0000000 /*
Package stats is a well tested and comprehensive
statistics library package with no dependencies.
Example Usage:
// start with some source data to use
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
// you could also use different types like this
// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// etc...
median, _ := stats.Median(data)
fmt.Println(median) // 3.65
roundedMedian, _ := stats.Round(median, 0)
fmt.Println(roundedMedian) // 4
MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com)
*/
package stats
stats-0.7.1/entropy.go 0000664 0000000 0000000 00000001112 14427120666 0014707 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Entropy provides calculation of the entropy
func Entropy(input Float64Data) (float64, error) {
input, err := normalize(input)
if err != nil {
return math.NaN(), err
}
var result float64
for i := 0; i < input.Len(); i++ {
v := input.Get(i)
if v == 0 {
continue
}
result += (v * math.Log(v))
}
return -result, nil
}
func normalize(input Float64Data) (Float64Data, error) {
sum, err := input.Sum()
if err != nil {
return Float64Data{}, err
}
for i := 0; i < input.Len(); i++ {
input[i] = input[i] / sum
}
return input, nil
}
stats-0.7.1/entropy_test.go 0000664 0000000 0000000 00000002071 14427120666 0015753 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleEntropy() {
d := []float64{1.1, 2.2, 3.3}
e, _ := stats.Entropy(d)
fmt.Println(e)
// Output: 1.0114042647073518
}
func TestEntropy(t *testing.T) {
for _, c := range []struct {
in stats.Float64Data
out float64
}{
{stats.Float64Data{4, 8, 5, 1}, 1.2110440167801229},
{stats.Float64Data{0.8, 0.01, 0.4}, 0.6791185708986585},
{stats.Float64Data{0.8, 1.1, 0, 5}, 0.7759393943707658},
} {
got, err := stats.Entropy(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if !veryclose(got, c.out) {
t.Errorf("Max(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Entropy([]float64{})
if err == nil {
t.Errorf("Empty slice didn't return an error")
}
}
func BenchmarkEntropySmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Entropy(makeFloatSlice(5))
}
}
func BenchmarkEntropyLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Entropy(lf)
}
}
stats-0.7.1/errors.go 0000664 0000000 0000000 00000002044 14427120666 0014530 0 ustar 00root root 0000000 0000000 package stats
type statsError struct {
err string
}
func (s statsError) Error() string {
return s.err
}
func (s statsError) String() string {
return s.err
}
// These are the package-wide error values.
// All error identification should use these values.
// https://github.com/golang/go/wiki/Errors#naming
var (
// ErrEmptyInput Input must not be empty
ErrEmptyInput = statsError{"Input must not be empty."}
// ErrNaN Not a number
ErrNaN = statsError{"Not a number."}
// ErrNegative Must not contain negative values
ErrNegative = statsError{"Must not contain negative values."}
// ErrZero Must not contain zero values
ErrZero = statsError{"Must not contain zero values."}
// ErrBounds Input is outside of range
ErrBounds = statsError{"Input is outside of range."}
// ErrSize Must be the same length
ErrSize = statsError{"Must be the same length."}
// ErrInfValue Value is infinite
ErrInfValue = statsError{"Value is infinite."}
// ErrYCoord Y Value must be greater than zero
ErrYCoord = statsError{"Y Value must be greater than zero."}
)
stats-0.7.1/errors_test.go 0000664 0000000 0000000 00000000424 14427120666 0015567 0 ustar 00root root 0000000 0000000 package stats
import (
"testing"
)
func TestError(t *testing.T) {
err := statsError{"test error"}
if err.Error() != "test error" {
t.Errorf("Error method message didn't match")
}
if err.String() != "test error" {
t.Errorf("String method message didn't match")
}
}
stats-0.7.1/examples/ 0000775 0000000 0000000 00000000000 14427120666 0014503 5 ustar 00root root 0000000 0000000 stats-0.7.1/examples/README.md 0000664 0000000 0000000 00000000125 14427120666 0015760 0 ustar 00root root 0000000 0000000 # examples
The examples directory provides some examples of using the stats package. stats-0.7.1/examples/main.go 0000664 0000000 0000000 00000011703 14427120666 0015760 0 ustar 00root root 0000000 0000000 package main
import (
"fmt"
"github.com/montanaflynn/stats"
)
func main() {
// d := stats.LoadRawData([]interface{}{1.1, "2", 3.0, 4, "5"})
d := stats.LoadRawData([]int{1, 2, 3, 4, 5})
a, _ := stats.Min(d)
fmt.Println(a)
// Output: 1.1
a, _ = stats.Max(d)
fmt.Println(a)
// Output: 5
a, _ = stats.Sum([]float64{1.1, 2.2, 3.3})
fmt.Println(a)
// Output: 6.6
cs, _ := stats.CumulativeSum([]float64{1.1, 2.2, 3.3})
fmt.Println(cs) // [1.1 3.3000000000000003 6.6]
a, _ = stats.Mean([]float64{1, 2, 3, 4, 5})
fmt.Println(a)
// Output: 3
a, _ = stats.Median([]float64{1, 2, 3, 4, 5, 6, 7})
fmt.Println(a)
// Output: 4
m, _ := stats.Mode([]float64{5, 5, 3, 3, 4, 2, 1})
fmt.Println(m)
// Output: [5 3]
a, _ = stats.PopulationVariance([]float64{1, 2, 3, 4, 5})
fmt.Println(a)
// Output: 2
a, _ = stats.SampleVariance([]float64{1, 2, 3, 4, 5})
fmt.Println(a)
// Output: 2.5
a, _ = stats.MedianAbsoluteDeviationPopulation([]float64{1, 2, 3})
fmt.Println(a)
// Output: 1
a, _ = stats.StandardDeviationPopulation([]float64{1, 2, 3})
fmt.Println(a)
// Output: 0.816496580927726
a, _ = stats.StandardDeviationSample([]float64{1, 2, 3})
fmt.Println(a)
// Output: 1
a, _ = stats.Percentile([]float64{1, 2, 3, 4, 5}, 75)
fmt.Println(a)
// Output: 4
a, _ = stats.PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75)
fmt.Println(a)
// Output: 40
c := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
{4, 4.3},
{5, 5.3},
}
r, _ := stats.LinearRegression(c)
fmt.Println(r)
// Output: [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}]
r, _ = stats.ExponentialRegression(c)
fmt.Println(r)
// Output: [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}]
r, _ = stats.LogarithmicRegression(c)
fmt.Println(r)
// Output: [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}]
s, _ := stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 3, false)
fmt.Println(s)
// Output: [0.2,0.4,0.3]
s, _ = stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 10, true)
fmt.Println(s)
// Output: [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1]
q, _ := stats.Quartile([]float64{7, 15, 36, 39, 40, 41})
fmt.Println(q)
// Output: {15 37.5 40}
iqr, _ := stats.InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118})
fmt.Println(iqr)
// Output: 10
mh, _ := stats.Midhinge([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})
fmt.Println(mh)
// Output: 7.5
tr, _ := stats.Trimean([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})
fmt.Println(tr)
// Output: 7.25
o, _ := stats.QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100})
fmt.Printf("%+v\n", o)
// Output: {Mild:[15 18] Extreme:[-1000 100]}
gm, _ := stats.GeometricMean([]float64{10, 51.2, 8})
fmt.Println(gm)
// Output: 15.999999999999991
hm, _ := stats.HarmonicMean([]float64{1, 2, 3, 4, 5})
fmt.Println(hm)
// Output: 2.18978102189781
a, _ = stats.Round(2.18978102189781, 3)
fmt.Println(a)
// Output: 2.189
e, _ := stats.ChebyshevDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
fmt.Println(e)
// Output: 6
e, _ = stats.ManhattanDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
fmt.Println(e)
// Output: 24
e, _ = stats.EuclideanDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
fmt.Println(e)
// Output: 10.583005244258363
e, _ = stats.MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(1))
fmt.Println(e)
// Output: 24
e, _ = stats.MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(2))
fmt.Println(e)
// Output: 10.583005244258363
e, _ = stats.MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(99))
fmt.Println(e)
// Output: 6
cor, _ := stats.Correlation([]float64{1, 2, 3, 4, 5}, []float64{1, 2, 3, 5, 6})
fmt.Println(cor)
// Output: 0.9912407071619302
ac, _ := stats.AutoCorrelation([]float64{1, 2, 3, 4, 5}, 1)
fmt.Println(ac)
// Output: 0.4
sig, _ := stats.Sigmoid([]float64{3.0, 1.0, 2.1})
fmt.Println(sig)
// Output: [0.9525741268224334 0.7310585786300049 0.8909031788043871]
sm, _ := stats.SoftMax([]float64{3.0, 1.0, 0.2})
fmt.Println(sm)
// Output: [0.8360188027814407 0.11314284146556013 0.05083835575299916]
e, _ = stats.Entropy([]float64{1.1, 2.2, 3.3})
fmt.Println(e)
// Output: 1.0114042647073518
p := 0.5
begin := 1
end := 2
chance, _ := stats.ProbGeom(begin, end, p)
fmt.Println(chance)
// Output: 0.25
prob1 := 0.5
exp, _ := stats.ExpGeom(prob1)
fmt.Println(exp)
// Output:
prob2 := 0.5
vari, _ := stats.VarGeom(prob2)
fmt.Println(vari)
// Output: 2
description, _ := stats.Describe([]float64{1.0, 2.0, 3.0}, true, &[]float64{25.0, 50.0, 75.0})
fmt.Println(description.String(2))
}
stats-0.7.1/examples/methods.go 0000664 0000000 0000000 00000001007 14427120666 0016473 0 ustar 00root root 0000000 0000000 package main
import (
"fmt"
"github.com/montanaflynn/stats"
)
func main() {
var d stats.Float64Data = []float64{1, 2, 3, 4, 4, 5}
// you could also use arbitrary types like this
// var d = stats.LoadRawData([]interface{}{1.1, "2", 3.0, 4, "5"})
min, _ := d.Min()
fmt.Println(min) // 1
max, _ := d.Max()
fmt.Println(max) // 5
sum, _ := d.Sum()
fmt.Println(sum) // 19
// See https://godoc.org/github.com/montanaflynn/stats#Float64Data
// or run godoc ./ Float64Data to view all available methods
}
stats-0.7.1/examples_test.go 0000664 0000000 0000000 00000011272 14427120666 0016074 0 ustar 00root root 0000000 0000000 package stats_test
// import (
// "fmt"
// "testing"
// "github.com/montanaflynn/stats"
// )
// func Example() {
// // t.Parallel()
// t.Run("LoadRawData", func(t *testing.T) {
// // t.Parallel()
// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// fmt.Println(data)
// // Output: 1.1, 2.0, 3.0, 4
// })
// }
// // func Example() {
// // // start with some source data to use
// // data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
// // // you could also use different types like this
// // // data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
// // // data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// // // etc...
// // median, _ := Median(data)
// // fmt.Println(median)
// // // Output: 3.65
// // roundedMedian, _ := Round(median, 0)
// // fmt.Println(roundedMedian)
// // // Output: 4
// // a, _ := Mean([]float64{1, 2, 3, 4, 5})
// // fmt.Println(a)
// // // Output: 3
// // a, _ = Median([]float64{1, 2, 3, 4, 5, 6, 7})
// // fmt.Println(a)
// // // Output: 4
// // m, _ := Mode([]float64{5, 5, 3, 3, 4, 2, 1})
// // fmt.Println(m)
// // // Output: [5 3]
// // a, _ = PopulationVariance([]float64{1, 2, 3, 4, 5})
// // fmt.Println(a)
// // // Output: 2
// // a, _ = SampleVariance([]float64{1, 2, 3, 4, 5})
// // fmt.Println(a)
// // // Output: 2.5
// // a, _ = MedianAbsoluteDeviationPopulation([]float64{1, 2, 3})
// // fmt.Println(a)
// // // Output: 1
// // a, _ = StandardDeviationPopulation([]float64{1, 2, 3})
// // fmt.Println(a)
// // // Output: 0.816496580927726
// // a, _ = StandardDeviationSample([]float64{1, 2, 3})
// // fmt.Println(a)
// // // Output: 1
// // a, _ = Percentile([]float64{1, 2, 3, 4, 5}, 75)
// // fmt.Println(a)
// // // Output: 4
// // a, _ = PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75)
// // fmt.Println(a)
// // // Output: 40
// // c := []Coordinate{
// // {1, 2.3},
// // {2, 3.3},
// // {3, 3.7},
// // {4, 4.3},
// // {5, 5.3},
// // }
// // r, _ := LinearRegression(c)
// // fmt.Println(r)
// // // Output: [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}]
// // r, _ = ExponentialRegression(c)
// // fmt.Println(r)
// // // Output: [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}]
// // r, _ = LogarithmicRegression(c)
// // fmt.Println(r)
// // // Output: [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}]
// // s, _ := Sample([]float64{0.1, 0.2, 0.3, 0.4}, 3, false)
// // fmt.Println(s)
// // // Output: [0.2,0.4,0.3]
// // s, _ = Sample([]float64{0.1, 0.2, 0.3, 0.4}, 10, true)
// // fmt.Println(s)
// // // Output: [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1]
// // q, _ := Quartile([]float64{7, 15, 36, 39, 40, 41})
// // fmt.Println(q)
// // // Output: {15 37.5 40}
// // iqr, _ := InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118})
// // fmt.Println(iqr)
// // // Output: 10
// // mh, _ := Midhinge([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})
// // fmt.Println(mh)
// // // Output: 7.5
// // tr, _ := Trimean([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})
// // fmt.Println(tr)
// // // Output: 7.25
// // o, _ := QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100})
// // fmt.Printf("%+v\n", o)
// // // Output: {Mild:[15 18] Extreme:[-1000 100]}
// // gm, _ := GeometricMean([]float64{10, 51.2, 8})
// // fmt.Println(gm)
// // // Output: 15.999999999999991
// // hm, _ := HarmonicMean([]float64{1, 2, 3, 4, 5})
// // fmt.Println(hm)
// // // Output: 2.18978102189781
// // a, _ = Round(2.18978102189781, 3)
// // fmt.Println(a)
// // // Output: 2.189
// // e, _ := ChebyshevDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
// // fmt.Println(e)
// // // Output: 6
// // e, _ = ManhattanDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
// // fmt.Println(e)
// // // Output: 24
// // e, _ = EuclideanDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2})
// // fmt.Println(e)
// // // Output: 10.583005244258363
// // e, _ = MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(1))
// // fmt.Println(e)
// // // Output: 24
// // e, _ = MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(2))
// // fmt.Println(e)
// // // Output: 10.583005244258363
// // e, _ = MinkowskiDistance([]float64{2, 3, 4, 5, 6, 7, 8}, []float64{8, 7, 6, 5, 4, 3, 2}, float64(99))
// // fmt.Println(e)
// // // Output: 6
// // }
stats-0.7.1/geometric_distribution.go 0000664 0000000 0000000 00000002015 14427120666 0017767 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
)
// ProbGeom generates the probability for a geometric random variable
// with parameter p to achieve success in the interval of [a, b] trials
// See https://en.wikipedia.org/wiki/Geometric_distribution for more information
func ProbGeom(a int, b int, p float64) (prob float64, err error) {
if (a > b) || (a < 1) {
return math.NaN(), ErrBounds
}
prob = 0
q := 1 - p // probability of failure
for k := a + 1; k <= b; k++ {
prob = prob + p*math.Pow(q, float64(k-1))
}
return prob, nil
}
// ProbGeom generates the expectation or average number of trials
// for a geometric random variable with parameter p
func ExpGeom(p float64) (exp float64, err error) {
if (p > 1) || (p < 0) {
return math.NaN(), ErrNegative
}
return 1 / p, nil
}
// ProbGeom generates the variance for number for a
// geometric random variable with parameter p
func VarGeom(p float64) (exp float64, err error) {
if (p > 1) || (p < 0) {
return math.NaN(), ErrNegative
}
return (1 - p) / math.Pow(p, 2), nil
}
stats-0.7.1/geometric_distribution_test.go 0000664 0000000 0000000 00000003544 14427120666 0021036 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"math"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleProbGeom() {
p := 0.5
a := 1
b := 2
chance, _ := stats.ProbGeom(a, b, p)
fmt.Println(chance)
// Output: 0.25
}
func TestProbGeomLarge(t *testing.T) {
p := 0.5
a := 1
b := 10000
chance, err := stats.ProbGeom(a, b, p)
if err != nil {
t.Errorf("Returned an error")
}
if chance != 0.5 {
t.Errorf("ProbGeom(%d, %d, %.01f) => %.1f != %.1f", a, b, p, chance, 0.5)
}
}
func TestErrBoundsProbGeom(t *testing.T) {
p := 0.5
a := -1
b := 4
chance, err := stats.ProbGeom(a, b, p)
if err == nil {
t.Errorf("Did not return an error when expected")
}
if !math.IsNaN(chance) {
t.Errorf("ProbGeom(%d, %d, %.01f) => %.1f != %.1f", a, b, p, chance, math.NaN())
}
}
func ExampleExpGeom() {
p := 0.5
exp, _ := stats.ExpGeom(p)
fmt.Println(exp)
// Output: 2
}
func TestExpGeom(t *testing.T) {
p := 0.5
exp, err := stats.ExpGeom(p)
if err != nil {
t.Errorf("Returned an error when not expected")
}
if exp != 2.0 {
t.Errorf("ExpGeom(%.01f) => %.1f != %.1f", p, exp, 2.0)
}
}
func TestErrExpGeom(t *testing.T) {
p := -1.0
exp, err := stats.ExpGeom(p)
if err == nil {
t.Errorf("Did not return an error")
}
if !math.IsNaN(exp) {
t.Errorf("ExpGeom(%.01f) => %.1f != %.1f", p, exp, math.NaN())
}
}
func ExampleVarGeom() {
p := 0.5
vari, _ := stats.VarGeom(p)
fmt.Println(vari)
// Output: 2
}
func TestVarGeom(t *testing.T) {
p := 0.25
vari, err := stats.VarGeom(p)
if err != nil {
t.Errorf("Returned an error when not expected")
}
if vari != 12.0 {
t.Errorf("VarGeom(%.01f) => %.1f != %.1f", p, vari, 12.0)
}
}
func TestErrVarGeom(t *testing.T) {
p := -1.0
vari, err := stats.VarGeom(p)
if err == nil {
t.Errorf("Did not return an error")
}
if !math.IsNaN(vari) {
t.Errorf("VarGeom(%.01f) => %.1f != %.1f", p, vari, math.NaN())
}
}
stats-0.7.1/go.mod 0000664 0000000 0000000 00000000056 14427120666 0013774 0 ustar 00root root 0000000 0000000 module github.com/montanaflynn/stats
go 1.13
stats-0.7.1/legacy.go 0000664 0000000 0000000 00000002511 14427120666 0014457 0 ustar 00root root 0000000 0000000 package stats
// VarP is a shortcut to PopulationVariance
func VarP(input Float64Data) (sdev float64, err error) {
return PopulationVariance(input)
}
// VarS is a shortcut to SampleVariance
func VarS(input Float64Data) (sdev float64, err error) {
return SampleVariance(input)
}
// StdDevP is a shortcut to StandardDeviationPopulation
func StdDevP(input Float64Data) (sdev float64, err error) {
return StandardDeviationPopulation(input)
}
// StdDevS is a shortcut to StandardDeviationSample
func StdDevS(input Float64Data) (sdev float64, err error) {
return StandardDeviationSample(input)
}
// LinReg is a shortcut to LinearRegression
func LinReg(s []Coordinate) (regressions []Coordinate, err error) {
return LinearRegression(s)
}
// ExpReg is a shortcut to ExponentialRegression
func ExpReg(s []Coordinate) (regressions []Coordinate, err error) {
return ExponentialRegression(s)
}
// LogReg is a shortcut to LogarithmicRegression
func LogReg(s []Coordinate) (regressions []Coordinate, err error) {
return LogarithmicRegression(s)
}
// Legacy error names that didn't start with Err
var (
EmptyInputErr = ErrEmptyInput
NaNErr = ErrNaN
NegativeErr = ErrNegative
ZeroErr = ErrZero
BoundsErr = ErrBounds
SizeErr = ErrSize
InfValue = ErrInfValue
YCoordErr = ErrYCoord
EmptyInput = ErrEmptyInput
)
stats-0.7.1/legacy_test.go 0000664 0000000 0000000 00000002642 14427120666 0015523 0 ustar 00root root 0000000 0000000 package stats_test
import (
"testing"
"github.com/montanaflynn/stats"
)
// Create working sample data to test if the legacy
// functions cause a runtime crash or return an error
func TestLegacy(t *testing.T) {
// Slice of data
s := []float64{-10, -10.001, 5, 1.1, 2, 3, 4.20, 5}
// Slice of coordinates
d := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
{4, 4.3},
{5, 5.3},
}
// VarP rename compatibility
_, err := stats.VarP(s)
if err != nil {
t.Errorf("VarP not successfully returning PopulationVariance.")
}
// VarS rename compatibility
_, err = stats.VarS(s)
if err != nil {
t.Errorf("VarS not successfully returning SampleVariance.")
}
// StdDevP rename compatibility
_, err = stats.StdDevP(s)
if err != nil {
t.Errorf("StdDevP not successfully returning StandardDeviationPopulation.")
}
// StdDevS rename compatibility
_, err = stats.StdDevS(s)
if err != nil {
t.Errorf("StdDevS not successfully returning StandardDeviationSample.")
}
// LinReg rename compatibility
_, err = stats.LinReg(d)
if err != nil {
t.Errorf("LinReg not successfully returning LinearRegression.")
}
// ExpReg rename compatibility
_, err = stats.ExpReg(d)
if err != nil {
t.Errorf("ExpReg not successfully returning ExponentialRegression.")
}
// LogReg rename compatibility
_, err = stats.LogReg(d)
if err != nil {
t.Errorf("LogReg not successfully returning LogarithmicRegression.")
}
}
stats-0.7.1/load.go 0000664 0000000 0000000 00000006667 14427120666 0014152 0 ustar 00root root 0000000 0000000 package stats
import (
"bufio"
"io"
"strconv"
"strings"
"time"
)
// LoadRawData parses and converts a slice of mixed data types to floats
func LoadRawData(raw interface{}) (f Float64Data) {
var r []interface{}
var s Float64Data
switch t := raw.(type) {
case []interface{}:
r = t
case []uint:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []uint8:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []uint16:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []uint32:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []uint64:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []bool:
for _, v := range t {
if v {
s = append(s, 1.0)
} else {
s = append(s, 0.0)
}
}
return s
case []float64:
return Float64Data(t)
case []int:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []int8:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []int16:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []int32:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []int64:
for _, v := range t {
s = append(s, float64(v))
}
return s
case []string:
for _, v := range t {
r = append(r, v)
}
case []time.Duration:
for _, v := range t {
r = append(r, v)
}
case map[int]int:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]int8:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]int16:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]int32:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]int64:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]string:
for i := 0; i < len(t); i++ {
r = append(r, t[i])
}
case map[int]uint:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]uint8:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]uint16:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]uint32:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]uint64:
for i := 0; i < len(t); i++ {
s = append(s, float64(t[i]))
}
return s
case map[int]bool:
for i := 0; i < len(t); i++ {
if t[i] {
s = append(s, 1.0)
} else {
s = append(s, 0.0)
}
}
return s
case map[int]float64:
for i := 0; i < len(t); i++ {
s = append(s, t[i])
}
return s
case map[int]time.Duration:
for i := 0; i < len(t); i++ {
r = append(r, t[i])
}
case string:
for _, v := range strings.Fields(t) {
r = append(r, v)
}
case io.Reader:
scanner := bufio.NewScanner(t)
for scanner.Scan() {
l := scanner.Text()
for _, v := range strings.Fields(l) {
r = append(r, v)
}
}
}
for _, v := range r {
switch t := v.(type) {
case int:
a := float64(t)
f = append(f, a)
case uint:
f = append(f, float64(t))
case float64:
f = append(f, t)
case string:
fl, err := strconv.ParseFloat(t, 64)
if err == nil {
f = append(f, fl)
}
case bool:
if t {
f = append(f, 1.0)
} else {
f = append(f, 0.0)
}
case time.Duration:
f = append(f, float64(t))
}
}
return f
}
stats-0.7.1/load_test.go 0000664 0000000 0000000 00000011473 14427120666 0015200 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/montanaflynn/stats"
)
func ExampleLoadRawData() {
data := stats.LoadRawData([]interface{}{1.1, "2", 3})
fmt.Println(data)
// Output: [1.1 2 3]
}
var allTestData = []struct {
actual interface{}
expected stats.Float64Data
}{
{
[]interface{}{1.0, "2", 3.0, uint(4), "4.0", 5, time.Duration(6), time.Duration(-7)},
stats.Float64Data{1.0, 2.0, 3.0, 4.0, 4.0, 5.0, 6.0, -7.0},
},
{
[]interface{}{"-345", "223", "-654.4", "194", "898.3"},
stats.Float64Data{-345.0, 223.0, -654.4, 194.0, 898.3},
},
{
[]interface{}{7862, 4234, 9872.1, 8794},
stats.Float64Data{7862.0, 4234.0, 9872.1, 8794.0},
},
{
[]interface{}{true, false, true, false, false},
stats.Float64Data{1.0, 0.0, 1.0, 0.0, 0.0},
},
{
[]interface{}{14.3, 26, 17.7, "shoe"},
stats.Float64Data{14.3, 26.0, 17.7},
},
{
[]bool{true, false, true, true, false},
stats.Float64Data{1.0, 0.0, 1.0, 1.0, 0.0},
},
{
[]float64{10230.9823, 93432.9384, 23443.945, 12374.945},
stats.Float64Data{10230.9823, 93432.9384, 23443.945, 12374.945},
},
{
[]time.Duration{-843, 923, -398, 1000},
stats.Float64Data{-843.0, 923.0, -398.0, 1000.0},
},
{
[]string{"-843.2", "923", "hello", "-398", "1000.5"},
stats.Float64Data{-843.2, 923.0, -398.0, 1000.5},
},
{
[]uint{34, 12, 65, 230, 30},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 30.0},
},
{
[]uint8{34, 12, 65, 23, 255},
stats.Float64Data{34.0, 12.0, 65.0, 23.0, 255.0},
},
{
[]uint16{34, 12, 65, 230, 65535},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 65535.0},
},
{
[]uint32{34, 12, 65, 230, 4294967295},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 4294967295.0},
},
{
[]uint64{34, 12, 65, 230, 18446744073709551615},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 18446744073709552000.0},
},
{
[]int{-843, 923, -398, 1000},
stats.Float64Data{-843.0, 923.0, -398.0, 1000.0},
},
{
[]int8{-43, 23, -128, 127},
stats.Float64Data{-43.0, 23.0, -128.0, 127.0},
},
{
[]int16{-843, 923, -32768, 32767},
stats.Float64Data{-843.0, 923.0, -32768.0, 32767.0},
},
{
[]int32{-843, 923, -2147483648, 2147483647},
stats.Float64Data{-843.0, 923.0, -2147483648.0, 2147483647.0},
},
{
[]int64{-843, 923, -9223372036854775808, 9223372036854775807, 9223372036854775800},
stats.Float64Data{-843.0, 923.0, -9223372036854776000.0, 9223372036854776000.0, 9223372036854776000.0},
},
{
map[int]bool{0: true, 1: true, 2: false, 3: true, 4: false},
stats.Float64Data{1.0, 1.0, 0.0, 1.0, 0.0},
},
{
map[int]float64{0: 68.6, 1: 72.1, 2: -33.3, 3: -99.2},
stats.Float64Data{68.6, 72.1, -33.3, -99.2},
},
{
map[int]time.Duration{0: -843, 1: 923, 2: -398, 3: 1000},
stats.Float64Data{-843.0, 923.0, -398.0, 1000.0},
},
{
map[int]string{0: "456", 1: "758", 2: "-9874", 3: "-1981", 4: "68.6", 5: "72.1", 6: "-33.3", 7: "-99.2"},
stats.Float64Data{456.0, 758.0, -9874.0, -1981.0, 68.6, 72.1, -33.3, -99.2},
},
{
map[int]uint{0: 4567, 1: 7580, 2: 98742, 3: 19817},
stats.Float64Data{4567.0, 7580.0, 98742.0, 19817.0},
},
{
map[int]uint8{0: 34, 1: 12, 2: 65, 3: 23, 4: 255},
stats.Float64Data{34.0, 12.0, 65.0, 23.0, 255.0},
},
{
map[int]uint16{0: 34, 1: 12, 2: 65, 3: 230, 4: 65535},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 65535.0},
},
{
map[int]uint32{0: 34, 1: 12, 2: 65, 3: 230, 4: 4294967295},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 4294967295.0},
},
{
map[int]uint64{0: 34, 1: 12, 2: 65, 3: 230, 4: 18446744073709551615},
stats.Float64Data{34.0, 12.0, 65.0, 230.0, 18446744073709552000.0},
},
{
map[int]int{0: 456, 1: 758, 2: -9874, 3: -1981},
stats.Float64Data{456.0, 758.0, -9874.0, -1981.0},
},
{
map[int]int8{0: -43, 1: 23, 2: -128, 3: 127},
stats.Float64Data{-43.0, 23.0, -128.0, 127.0},
},
{
map[int]int16{0: -843, 1: 923, 2: -32768, 3: 32767},
stats.Float64Data{-843.0, 923.0, -32768.0, 32767.0},
},
{
map[int]int32{0: -843, 1: 923, 2: -2147483648, 3: 2147483647},
stats.Float64Data{-843.0, 923.0, -2147483648.0, 2147483647.0},
},
{
map[int]int64{0: -843, 1: 923, 2: -9223372036854775808, 3: 9223372036854775807, 4: 9223372036854775800},
stats.Float64Data{-843.0, 923.0, -9223372036854776000.0, 9223372036854776000.0, 9223372036854776000.0},
},
{
"1\n\n2 3.3\n 4.4",
stats.Float64Data{1.0, 2, 3.3, 4.4},
},
{
strings.NewReader("1\n\n2 3.3\n 4.4"),
stats.Float64Data{1.0, 2, 3.3, 4.4},
},
}
func equal(actual, expected stats.Float64Data) bool {
if len(actual) != len(expected) {
return false
}
for k, actualVal := range actual {
if actualVal != expected[k] {
return false
}
}
return true
}
func TestLoadRawData(t *testing.T) {
for _, data := range allTestData {
actual := stats.LoadRawData(data.actual)
if !equal(actual, data.expected) {
t.Fatalf("Transform(%v). Expected [%v], Actual [%v]", data.actual, data.expected, actual)
}
}
}
stats-0.7.1/max.go 0000664 0000000 0000000 00000000705 14427120666 0014003 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
)
// Max finds the highest number in a slice
func Max(input Float64Data) (max float64, err error) {
// Return an error if there are no numbers
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
// Get the first value as the starting point
max = input.Get(0)
// Loop and replace higher values
for i := 1; i < input.Len(); i++ {
if input.Get(i) > max {
max = input.Get(i)
}
}
return max, nil
}
stats-0.7.1/max_test.go 0000664 0000000 0000000 00000001742 14427120666 0015044 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleMax() {
d := []float64{1.1, 2.3, 3.2, 4.0, 4.01, 5.09}
a, _ := stats.Max(d)
fmt.Println(a)
// Output: 5.09
}
func TestMax(t *testing.T) {
for _, c := range []struct {
in []float64
out float64
}{
{[]float64{1, 2, 3, 4, 5}, 5.0},
{[]float64{10.5, 3, 5, 7, 9}, 10.5},
{[]float64{-20, -1, -5.5}, -1.0},
{[]float64{-1.0}, -1.0},
} {
got, err := stats.Max(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if got != c.out {
t.Errorf("Max(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Max([]float64{})
if err == nil {
t.Errorf("Empty slice didn't return an error")
}
}
func BenchmarkMaxSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Max(makeFloatSlice(5))
}
}
func BenchmarkMaxLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Max(lf)
}
}
stats-0.7.1/mean.go 0000664 0000000 0000000 00000002210 14427120666 0014127 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Mean gets the average of a slice of numbers
func Mean(input Float64Data) (float64, error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
sum, _ := input.Sum()
return sum / float64(input.Len()), nil
}
// GeometricMean gets the geometric mean for a slice of numbers
func GeometricMean(input Float64Data) (float64, error) {
l := input.Len()
if l == 0 {
return math.NaN(), EmptyInputErr
}
// Get the product of all the numbers
var p float64
for _, n := range input {
if p == 0 {
p = n
} else {
p *= n
}
}
// Calculate the geometric mean
return math.Pow(p, 1/float64(l)), nil
}
// HarmonicMean gets the harmonic mean for a slice of numbers
func HarmonicMean(input Float64Data) (float64, error) {
l := input.Len()
if l == 0 {
return math.NaN(), EmptyInputErr
}
// Get the sum of all the numbers reciprocals and return an
// error for values that cannot be included in harmonic mean
var p float64
for _, n := range input {
if n < 0 {
return math.NaN(), NegativeErr
} else if n == 0 {
return math.NaN(), ZeroErr
}
p += (1 / n)
}
return float64(l) / p, nil
}
stats-0.7.1/mean_test.go 0000664 0000000 0000000 00000003755 14427120666 0015205 0 ustar 00root root 0000000 0000000 package stats_test
import (
"testing"
"github.com/montanaflynn/stats"
)
func TestMean(t *testing.T) {
for _, c := range []struct {
in []float64
out float64
}{
{[]float64{1, 2, 3, 4, 5}, 3.0},
{[]float64{1, 2, 3, 4, 5, 6}, 3.5},
{[]float64{1}, 1.0},
} {
got, _ := stats.Mean(c.in)
if got != c.out {
t.Errorf("Mean(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Mean([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func BenchmarkMeanSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Mean(makeFloatSlice(5))
}
}
func BenchmarkMeanLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Mean(lf)
}
}
func TestGeometricMean(t *testing.T) {
s1 := []float64{2, 18}
s2 := []float64{10, 51.2, 8}
s3 := []float64{1, 3, 9, 27, 81}
for _, c := range []struct {
in []float64
out float64
}{
{s1, 6},
{s2, 16},
{s3, 9},
} {
gm, err := stats.GeometricMean(c.in)
if err != nil {
t.Errorf("Should not have returned an error")
}
gm, _ = stats.Round(gm, 0)
if gm != c.out {
t.Errorf("Geometric Mean %v != %v", gm, c.out)
}
}
_, err := stats.GeometricMean([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestHarmonicMean(t *testing.T) {
s1 := []float64{1, 2, 3, 4, 5}
s2 := []float64{10, -51.2, 8}
s3 := []float64{1, 0, 9, 27, 81}
hm, err := stats.HarmonicMean(s1)
if err != nil {
t.Errorf("Should not have returned an error")
}
hm, _ = stats.Round(hm, 2)
if hm != 2.19 {
t.Errorf("Geometric Mean %v != %v", hm, 2.19)
}
_, err = stats.HarmonicMean(s2)
if err == nil {
t.Errorf("Should have returned a negative number error")
}
_, err = stats.HarmonicMean(s3)
if err == nil {
t.Errorf("Should have returned a zero number error")
}
_, err = stats.HarmonicMean([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
stats-0.7.1/median.go 0000664 0000000 0000000 00000001112 14427120666 0014444 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Median gets the median number in a slice of numbers
func Median(input Float64Data) (median float64, err error) {
// Start by sorting a copy of the slice
c := sortedCopy(input)
// No math is needed if there are no numbers
// For even numbers we add the two middle numbers
// and divide by two using the mean function above
// For odd numbers we just use the middle number
l := len(c)
if l == 0 {
return math.NaN(), EmptyInputErr
} else if l%2 == 0 {
median, _ = Mean(c[l/2-1 : l/2+1])
} else {
median = c[l/2]
}
return median, nil
}
stats-0.7.1/median_test.go 0000664 0000000 0000000 00000002237 14427120666 0015514 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleMedian() {
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
median, _ := stats.Median(data)
fmt.Println(median)
// Output: 3.65
}
func TestMedian(t *testing.T) {
for _, c := range []struct {
in []float64
out float64
}{
{[]float64{5, 3, 4, 2, 1}, 3.0},
{[]float64{6, 3, 2, 4, 5, 1}, 3.5},
{[]float64{1}, 1.0},
} {
got, _ := stats.Median(c.in)
if got != c.out {
t.Errorf("Median(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Median([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func BenchmarkMedianSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Median(makeFloatSlice(5))
}
}
func BenchmarkMedianLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Median(lf)
}
}
func TestMedianSortSideEffects(t *testing.T) {
s := []float64{0.1, 0.3, 0.2, 0.4, 0.5}
a := []float64{0.1, 0.3, 0.2, 0.4, 0.5}
_, _ = stats.Median(s)
if !reflect.DeepEqual(s, a) {
t.Errorf("%.1f != %.1f", s, a)
}
}
stats-0.7.1/min.go 0000664 0000000 0000000 00000000774 14427120666 0014007 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Min finds the lowest number in a set of data
func Min(input Float64Data) (min float64, err error) {
// Get the count of numbers in the slice
l := input.Len()
// Return an error if there are no numbers
if l == 0 {
return math.NaN(), EmptyInputErr
}
// Get the first value as the starting point
min = input.Get(0)
// Iterate until done checking for a lower value
for i := 1; i < l; i++ {
if input.Get(i) < min {
min = input.Get(i)
}
}
return min, nil
}
stats-0.7.1/min_test.go 0000664 0000000 0000000 00000002534 14427120666 0015042 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleMin() {
d := stats.LoadRawData([]interface{}{1.1, "2", 3.0, 4, "5"})
a, _ := stats.Min(d)
fmt.Println(a)
// Output: 1.1
}
func TestMin(t *testing.T) {
for _, c := range []struct {
in []float64
out float64
}{
{[]float64{1.1, 2, 3, 4, 5}, 1.1},
{[]float64{10.534, 3, 5, 7, 9}, 3.0},
{[]float64{-5, 1, 5}, -5.0},
{[]float64{5}, 5},
} {
got, err := stats.Min(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if got != c.out {
t.Errorf("Min(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Min([]float64{})
if err == nil {
t.Errorf("Empty slice didn't return an error")
}
}
func BenchmarkMinSmallFloatSlice(b *testing.B) {
testData := makeFloatSlice(5)
for i := 0; i < b.N; i++ {
_, _ = stats.Min(testData)
}
}
func BenchmarkMinSmallRandFloatSlice(b *testing.B) {
testData := makeRandFloatSlice(5)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Min(testData)
}
}
func BenchmarkMinLargeFloatSlice(b *testing.B) {
testData := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Min(testData)
}
}
func BenchmarkMinLargeRandFloatSlice(b *testing.B) {
testData := makeRandFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Min(testData)
}
}
stats-0.7.1/mode.go 0000664 0000000 0000000 00000001777 14427120666 0014154 0 ustar 00root root 0000000 0000000 package stats
// Mode gets the mode [most frequent value(s)] of a slice of float64s
func Mode(input Float64Data) (mode []float64, err error) {
// Return the input if there's only one number
l := input.Len()
if l == 1 {
return input, nil
} else if l == 0 {
return nil, EmptyInputErr
}
c := sortedCopyDif(input)
// Traverse sorted array,
// tracking the longest repeating sequence
mode = make([]float64, 5)
cnt, maxCnt := 1, 1
for i := 1; i < l; i++ {
switch {
case c[i] == c[i-1]:
cnt++
case cnt == maxCnt && maxCnt != 1:
mode = append(mode, c[i-1])
cnt = 1
case cnt > maxCnt:
mode = append(mode[:0], c[i-1])
maxCnt, cnt = cnt, 1
default:
cnt = 1
}
}
switch {
case cnt == maxCnt:
mode = append(mode, c[l-1])
case cnt > maxCnt:
mode = append(mode[:0], c[l-1])
maxCnt = cnt
}
// Since length must be greater than 1,
// check for slices of distinct values
if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l {
return Float64Data{}, nil
}
return mode, nil
}
stats-0.7.1/mode_test.go 0000664 0000000 0000000 00000003241 14427120666 0015177 0 ustar 00root root 0000000 0000000 package stats_test
import (
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func TestMode(t *testing.T) {
for _, c := range []struct {
in []float64
out []float64
}{
{[]float64{2, 2, 2, 2}, []float64{2}},
{[]float64{5, 3, 4, 2, 1}, []float64{}},
{[]float64{5, 5, 3, 3, 4, 4, 2, 2, 1, 1}, []float64{}},
{[]float64{5, 5, 3, 4, 2, 1}, []float64{5}},
{[]float64{5, 5, 3, 3, 4, 2, 1}, []float64{3, 5}},
{[]float64{1}, []float64{1}},
{[]float64{-50, -46.325, -46.325, -.87, 1, 2.1122, 3.20, 5, 15, 15, 15.0001}, []float64{-46.325, 15}},
{[]float64{1, 2, 3, 4, 4, 4, 4, 4, 5, 3, 6, 7, 5, 0, 8, 8, 7, 6, 9, 9}, []float64{4}},
{[]float64{76, 76, 110, 76, 76, 76, 76, 119, 76, 76, 76, 76, 31, 31, 31, 31, 83, 83, 83, 78, 78, 78, 78, 78, 78, 78, 78}, []float64{76}},
} {
got, err := stats.Mode(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if !reflect.DeepEqual(c.out, got) {
t.Errorf("Mode(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Mode([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func BenchmarkModeSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Mode(makeFloatSlice(5))
}
}
func BenchmarkModeSmallRandFloatSlice(b *testing.B) {
lf := makeRandFloatSlice(5)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Mode(lf)
}
}
func BenchmarkModeLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Mode(lf)
}
}
func BenchmarkModeLargeRandFloatSlice(b *testing.B) {
lf := makeRandFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Mode(lf)
}
}
stats-0.7.1/nist_test.go 0000664 0000000 0000000 00000057411 14427120666 0015240 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"testing"
"github.com/montanaflynn/stats"
)
var (
lew = stats.Float64Data{
-213, -564, -35, -15, 141, 115, -420, -360, 203, -338, -431, 194,
-220, -513, 154, -125, -559, 92, -21, -579, -52, 99, -543, -175,
162, -457, -346, 204, -300, -474, 164, -107, -572, -8, 83, -541,
-224, 180, -420, -374, 201, -236, -531, 83, 27, -564, -112, 131,
-507, -254, 199, -311, -495, 143, -46, -579, -90, 136, -472, -338,
202, -287, -477, 169, -124, -568, 17, 48, -568, -135, 162, -430,
-422, 172, -74, -577, -13, 92, -534, -243, 194, -355, -465, 156,
-81, -578, -64, 139, -449, -384, 193, -198, -538, 110, -44, -577,
-6, 66, -552, -164, 161, -460, -344, 205, -281, -504, 134, -28,
-576, -118, 156, -437, -381, 200, -220, -540, 83, 11, -568, -160,
172, -414, -408, 188, -125, -572, -32, 139, -492, -321, 205, -262,
-504, 142, -83, -574, 0, 48, -571, -106, 137, -501, -266, 190,
-391, -406, 194, -186, -553, 83, -13, -577, -49, 103, -515, -280,
201, 300, -506, 131, -45, -578, -80, 138, -462, -361, 201, -211,
-554, 32, 74, -533, -235, 187, -372, -442, 182, -147, -566, 25,
68, -535, -244, 194, -351, -463, 174, -125, -570, 15, 72, -550,
-190, 172, -424, -385, 198, -218, -536, 96}
lottery = stats.Float64Data{
162, 671, 933, 414, 788, 730, 817, 33, 536, 875, 670, 236, 473, 167,
877, 980, 316, 950, 456, 92, 517, 557, 956, 954, 104, 178, 794, 278,
147, 773, 437, 435, 502, 610, 582, 780, 689, 562, 964, 791, 28, 97,
848, 281, 858, 538, 660, 972, 671, 613, 867, 448, 738, 966, 139, 636,
847, 659, 754, 243, 122, 455, 195, 968, 793, 59, 730, 361, 574, 522,
97, 762, 431, 158, 429, 414, 22, 629, 788, 999, 187, 215, 810, 782,
47, 34, 108, 986, 25, 644, 829, 630, 315, 567, 919, 331, 207, 412,
242, 607, 668, 944, 749, 168, 864, 442, 533, 805, 372, 63, 458, 777,
416, 340, 436, 140, 919, 350, 510, 572, 905, 900, 85, 389, 473, 758,
444, 169, 625, 692, 140, 897, 672, 288, 312, 860, 724, 226, 884, 508,
976, 741, 476, 417, 831, 15, 318, 432, 241, 114, 799, 955, 833, 358,
935, 146, 630, 830, 440, 642, 356, 373, 271, 715, 367, 393, 190, 669,
8, 861, 108, 795, 269, 590, 326, 866, 64, 523, 862, 840, 219, 382,
998, 4, 628, 305, 747, 247, 34, 747, 729, 645, 856, 974, 24, 568, 24,
694, 608, 480, 410, 729, 947, 293, 53, 930, 223, 203, 677, 227, 62,
455, 387, 318, 562, 242, 428, 968}
mavro = stats.Float64Data{
2.00180, 2.00170, 2.00180, 2.00190, 2.00180, 2.00170, 2.00150,
2.00140, 2.00150, 2.00150, 2.00170, 2.00180, 2.00180, 2.00190,
2.00190, 2.00210, 2.00200, 2.00160, 2.00140, 2.00130, 2.00130,
2.00150, 2.00150, 2.00160, 2.00150, 2.00140, 2.00130, 2.00140,
2.00150, 2.00140, 2.00150, 2.00160, 2.00150, 2.00160, 2.00190,
2.00200, 2.00200, 2.00210, 2.00220, 2.00230, 2.00240, 2.00250,
2.00270, 2.00260, 2.00260, 2.00260, 2.00270, 2.00260, 2.00250,
2.00240}
michelson = stats.Float64Data{
299.85, 299.74, 299.90, 300.07, 299.93, 299.85, 299.95, 299.98,
299.98, 299.88, 300.00, 299.98, 299.93, 299.65, 299.76, 299.81,
300.00, 300.00, 299.96, 299.96, 299.96, 299.94, 299.96, 299.94,
299.88, 299.80, 299.85, 299.88, 299.90, 299.84, 299.83, 299.79,
299.81, 299.88, 299.88, 299.83, 299.80, 299.79, 299.76, 299.80,
299.88, 299.88, 299.88, 299.86, 299.72, 299.72, 299.62, 299.86,
299.97, 299.95, 299.88, 299.91, 299.85, 299.87, 299.84, 299.84,
299.85, 299.84, 299.84, 299.84, 299.89, 299.81, 299.81, 299.82,
299.80, 299.77, 299.76, 299.74, 299.75, 299.76, 299.91, 299.92,
299.89, 299.86, 299.88, 299.72, 299.84, 299.85, 299.85, 299.78,
299.89, 299.84, 299.78, 299.81, 299.76, 299.81, 299.79, 299.81,
299.82, 299.85, 299.87, 299.87, 299.81, 299.74, 299.81, 299.94,
299.95, 299.80, 299.81, 299.87}
pidigits = stats.Float64Data{
3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8,
9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3, 3, 8, 3, 2, 7, 9, 5, 0, 2,
8, 8, 4, 1, 9, 7, 1, 6, 9, 3, 9, 9, 3, 7, 5, 1, 0, 5, 8, 2, 0, 9,
7, 4, 9, 4, 4, 5, 9, 2, 3, 0, 7, 8, 1, 6, 4, 0, 6, 2, 8, 6, 2, 0,
8, 9, 9, 8, 6, 2, 8, 0, 3, 4, 8, 2, 5, 3, 4, 2, 1, 1, 7, 0, 6, 7,
9, 8, 2, 1, 4, 8, 0, 8, 6, 5, 1, 3, 2, 8, 2, 3, 0, 6, 6, 4, 7, 0,
9, 3, 8, 4, 4, 6, 0, 9, 5, 5, 0, 5, 8, 2, 2, 3, 1, 7, 2, 5, 3, 5,
9, 4, 0, 8, 1, 2, 8, 4, 8, 1, 1, 1, 7, 4, 5, 0, 2, 8, 4, 1, 0, 2,
7, 0, 1, 9, 3, 8, 5, 2, 1, 1, 0, 5, 5, 5, 9, 6, 4, 4, 6, 2, 2, 9,
4, 8, 9, 5, 4, 9, 3, 0, 3, 8, 1, 9, 6, 4, 4, 2, 8, 8, 1, 0, 9, 7,
5, 6, 6, 5, 9, 3, 3, 4, 4, 6, 1, 2, 8, 4, 7, 5, 6, 4, 8, 2, 3, 3,
7, 8, 6, 7, 8, 3, 1, 6, 5, 2, 7, 1, 2, 0, 1, 9, 0, 9, 1, 4, 5, 6,
4, 8, 5, 6, 6, 9, 2, 3, 4, 6, 0, 3, 4, 8, 6, 1, 0, 4, 5, 4, 3, 2,
6, 6, 4, 8, 2, 1, 3, 3, 9, 3, 6, 0, 7, 2, 6, 0, 2, 4, 9, 1, 4, 1,
2, 7, 3, 7, 2, 4, 5, 8, 7, 0, 0, 6, 6, 0, 6, 3, 1, 5, 5, 8, 8, 1,
7, 4, 8, 8, 1, 5, 2, 0, 9, 2, 0, 9, 6, 2, 8, 2, 9, 2, 5, 4, 0, 9,
1, 7, 1, 5, 3, 6, 4, 3, 6, 7, 8, 9, 2, 5, 9, 0, 3, 6, 0, 0, 1, 1,
3, 3, 0, 5, 3, 0, 5, 4, 8, 8, 2, 0, 4, 6, 6, 5, 2, 1, 3, 8, 4, 1,
4, 6, 9, 5, 1, 9, 4, 1, 5, 1, 1, 6, 0, 9, 4, 3, 3, 0, 5, 7, 2, 7,
0, 3, 6, 5, 7, 5, 9, 5, 9, 1, 9, 5, 3, 0, 9, 2, 1, 8, 6, 1, 1, 7,
3, 8, 1, 9, 3, 2, 6, 1, 1, 7, 9, 3, 1, 0, 5, 1, 1, 8, 5, 4, 8, 0,
7, 4, 4, 6, 2, 3, 7, 9, 9, 6, 2, 7, 4, 9, 5, 6, 7, 3, 5, 1, 8, 8,
5, 7, 5, 2, 7, 2, 4, 8, 9, 1, 2, 2, 7, 9, 3, 8, 1, 8, 3, 0, 1, 1,
9, 4, 9, 1, 2, 9, 8, 3, 3, 6, 7, 3, 3, 6, 2, 4, 4, 0, 6, 5, 6, 6,
4, 3, 0, 8, 6, 0, 2, 1, 3, 9, 4, 9, 4, 6, 3, 9, 5, 2, 2, 4, 7, 3,
7, 1, 9, 0, 7, 0, 2, 1, 7, 9, 8, 6, 0, 9, 4, 3, 7, 0, 2, 7, 7, 0,
5, 3, 9, 2, 1, 7, 1, 7, 6, 2, 9, 3, 1, 7, 6, 7, 5, 2, 3, 8, 4, 6,
7, 4, 8, 1, 8, 4, 6, 7, 6, 6, 9, 4, 0, 5, 1, 3, 2, 0, 0, 0, 5, 6,
8, 1, 2, 7, 1, 4, 5, 2, 6, 3, 5, 6, 0, 8, 2, 7, 7, 8, 5, 7, 7, 1,
3, 4, 2, 7, 5, 7, 7, 8, 9, 6, 0, 9, 1, 7, 3, 6, 3, 7, 1, 7, 8, 7,
2, 1, 4, 6, 8, 4, 4, 0, 9, 0, 1, 2, 2, 4, 9, 5, 3, 4, 3, 0, 1, 4,
6, 5, 4, 9, 5, 8, 5, 3, 7, 1, 0, 5, 0, 7, 9, 2, 2, 7, 9, 6, 8, 9,
2, 5, 8, 9, 2, 3, 5, 4, 2, 0, 1, 9, 9, 5, 6, 1, 1, 2, 1, 2, 9, 0,
2, 1, 9, 6, 0, 8, 6, 4, 0, 3, 4, 4, 1, 8, 1, 5, 9, 8, 1, 3, 6, 2,
9, 7, 7, 4, 7, 7, 1, 3, 0, 9, 9, 6, 0, 5, 1, 8, 7, 0, 7, 2, 1, 1,
3, 4, 9, 9, 9, 9, 9, 9, 8, 3, 7, 2, 9, 7, 8, 0, 4, 9, 9, 5, 1, 0,
5, 9, 7, 3, 1, 7, 3, 2, 8, 1, 6, 0, 9, 6, 3, 1, 8, 5, 9, 5, 0, 2,
4, 4, 5, 9, 4, 5, 5, 3, 4, 6, 9, 0, 8, 3, 0, 2, 6, 4, 2, 5, 2, 2,
3, 0, 8, 2, 5, 3, 3, 4, 4, 6, 8, 5, 0, 3, 5, 2, 6, 1, 9, 3, 1, 1,
8, 8, 1, 7, 1, 0, 1, 0, 0, 0, 3, 1, 3, 7, 8, 3, 8, 7, 5, 2, 8, 8,
6, 5, 8, 7, 5, 3, 3, 2, 0, 8, 3, 8, 1, 4, 2, 0, 6, 1, 7, 1, 7, 7,
6, 6, 9, 1, 4, 7, 3, 0, 3, 5, 9, 8, 2, 5, 3, 4, 9, 0, 4, 2, 8, 7,
5, 5, 4, 6, 8, 7, 3, 1, 1, 5, 9, 5, 6, 2, 8, 6, 3, 8, 8, 2, 3, 5,
3, 7, 8, 7, 5, 9, 3, 7, 5, 1, 9, 5, 7, 7, 8, 1, 8, 5, 7, 7, 3, 0,
5, 3, 2, 1, 7, 1, 2, 2, 6, 8, 0, 6, 6, 1, 3, 0, 0, 1, 9, 2, 7, 8,
7, 6, 6, 1, 1, 1, 9, 5, 9, 0, 9, 2, 1, 6, 4, 2, 0, 1, 9, 8, 9, 3,
8, 0, 9, 5, 2, 5, 7, 2, 0, 1, 0, 6, 5, 4, 8, 5, 8, 6, 3, 2, 7, 8,
8, 6, 5, 9, 3, 6, 1, 5, 3, 3, 8, 1, 8, 2, 7, 9, 6, 8, 2, 3, 0, 3,
0, 1, 9, 5, 2, 0, 3, 5, 3, 0, 1, 8, 5, 2, 9, 6, 8, 9, 9, 5, 7, 7,
3, 6, 2, 2, 5, 9, 9, 4, 1, 3, 8, 9, 1, 2, 4, 9, 7, 2, 1, 7, 7, 5,
2, 8, 3, 4, 7, 9, 1, 3, 1, 5, 1, 5, 5, 7, 4, 8, 5, 7, 2, 4, 2, 4,
5, 4, 1, 5, 0, 6, 9, 5, 9, 5, 0, 8, 2, 9, 5, 3, 3, 1, 1, 6, 8, 6,
1, 7, 2, 7, 8, 5, 5, 8, 8, 9, 0, 7, 5, 0, 9, 8, 3, 8, 1, 7, 5, 4,
6, 3, 7, 4, 6, 4, 9, 3, 9, 3, 1, 9, 2, 5, 5, 0, 6, 0, 4, 0, 0, 9,
2, 7, 7, 0, 1, 6, 7, 1, 1, 3, 9, 0, 0, 9, 8, 4, 8, 8, 2, 4, 0, 1,
2, 8, 5, 8, 3, 6, 1, 6, 0, 3, 5, 6, 3, 7, 0, 7, 6, 6, 0, 1, 0, 4,
7, 1, 0, 1, 8, 1, 9, 4, 2, 9, 5, 5, 5, 9, 6, 1, 9, 8, 9, 4, 6, 7,
6, 7, 8, 3, 7, 4, 4, 9, 4, 4, 8, 2, 5, 5, 3, 7, 9, 7, 7, 4, 7, 2,
6, 8, 4, 7, 1, 0, 4, 0, 4, 7, 5, 3, 4, 6, 4, 6, 2, 0, 8, 0, 4, 6,
6, 8, 4, 2, 5, 9, 0, 6, 9, 4, 9, 1, 2, 9, 3, 3, 1, 3, 6, 7, 7, 0,
2, 8, 9, 8, 9, 1, 5, 2, 1, 0, 4, 7, 5, 2, 1, 6, 2, 0, 5, 6, 9, 6,
6, 0, 2, 4, 0, 5, 8, 0, 3, 8, 1, 5, 0, 1, 9, 3, 5, 1, 1, 2, 5, 3,
3, 8, 2, 4, 3, 0, 0, 3, 5, 5, 8, 7, 6, 4, 0, 2, 4, 7, 4, 9, 6, 4,
7, 3, 2, 6, 3, 9, 1, 4, 1, 9, 9, 2, 7, 2, 6, 0, 4, 2, 6, 9, 9, 2,
2, 7, 9, 6, 7, 8, 2, 3, 5, 4, 7, 8, 1, 6, 3, 6, 0, 0, 9, 3, 4, 1,
7, 2, 1, 6, 4, 1, 2, 1, 9, 9, 2, 4, 5, 8, 6, 3, 1, 5, 0, 3, 0, 2,
8, 6, 1, 8, 2, 9, 7, 4, 5, 5, 5, 7, 0, 6, 7, 4, 9, 8, 3, 8, 5, 0,
5, 4, 9, 4, 5, 8, 8, 5, 8, 6, 9, 2, 6, 9, 9, 5, 6, 9, 0, 9, 2, 7,
2, 1, 0, 7, 9, 7, 5, 0, 9, 3, 0, 2, 9, 5, 5, 3, 2, 1, 1, 6, 5, 3,
4, 4, 9, 8, 7, 2, 0, 2, 7, 5, 5, 9, 6, 0, 2, 3, 6, 4, 8, 0, 6, 6,
5, 4, 9, 9, 1, 1, 9, 8, 8, 1, 8, 3, 4, 7, 9, 7, 7, 5, 3, 5, 6, 6,
3, 6, 9, 8, 0, 7, 4, 2, 6, 5, 4, 2, 5, 2, 7, 8, 6, 2, 5, 5, 1, 8,
1, 8, 4, 1, 7, 5, 7, 4, 6, 7, 2, 8, 9, 0, 9, 7, 7, 7, 7, 2, 7, 9,
3, 8, 0, 0, 0, 8, 1, 6, 4, 7, 0, 6, 0, 0, 1, 6, 1, 4, 5, 2, 4, 9,
1, 9, 2, 1, 7, 3, 2, 1, 7, 2, 1, 4, 7, 7, 2, 3, 5, 0, 1, 4, 1, 4,
4, 1, 9, 7, 3, 5, 6, 8, 5, 4, 8, 1, 6, 1, 3, 6, 1, 1, 5, 7, 3, 5,
2, 5, 5, 2, 1, 3, 3, 4, 7, 5, 7, 4, 1, 8, 4, 9, 4, 6, 8, 4, 3, 8,
5, 2, 3, 3, 2, 3, 9, 0, 7, 3, 9, 4, 1, 4, 3, 3, 3, 4, 5, 4, 7, 7,
6, 2, 4, 1, 6, 8, 6, 2, 5, 1, 8, 9, 8, 3, 5, 6, 9, 4, 8, 5, 5, 6,
2, 0, 9, 9, 2, 1, 9, 2, 2, 2, 1, 8, 4, 2, 7, 2, 5, 5, 0, 2, 5, 4,
2, 5, 6, 8, 8, 7, 6, 7, 1, 7, 9, 0, 4, 9, 4, 6, 0, 1, 6, 5, 3, 4,
6, 6, 8, 0, 4, 9, 8, 8, 6, 2, 7, 2, 3, 2, 7, 9, 1, 7, 8, 6, 0, 8,
5, 7, 8, 4, 3, 8, 3, 8, 2, 7, 9, 6, 7, 9, 7, 6, 6, 8, 1, 4, 5, 4,
1, 0, 0, 9, 5, 3, 8, 8, 3, 7, 8, 6, 3, 6, 0, 9, 5, 0, 6, 8, 0, 0,
6, 4, 2, 2, 5, 1, 2, 5, 2, 0, 5, 1, 1, 7, 3, 9, 2, 9, 8, 4, 8, 9,
6, 0, 8, 4, 1, 2, 8, 4, 8, 8, 6, 2, 6, 9, 4, 5, 6, 0, 4, 2, 4, 1,
9, 6, 5, 2, 8, 5, 0, 2, 2, 2, 1, 0, 6, 6, 1, 1, 8, 6, 3, 0, 6, 7,
4, 4, 2, 7, 8, 6, 2, 2, 0, 3, 9, 1, 9, 4, 9, 4, 5, 0, 4, 7, 1, 2,
3, 7, 1, 3, 7, 8, 6, 9, 6, 0, 9, 5, 6, 3, 6, 4, 3, 7, 1, 9, 1, 7,
2, 8, 7, 4, 6, 7, 7, 6, 4, 6, 5, 7, 5, 7, 3, 9, 6, 2, 4, 1, 3, 8,
9, 0, 8, 6, 5, 8, 3, 2, 6, 4, 5, 9, 9, 5, 8, 1, 3, 3, 9, 0, 4, 7,
8, 0, 2, 7, 5, 9, 0, 0, 9, 9, 4, 6, 5, 7, 6, 4, 0, 7, 8, 9, 5, 1,
2, 6, 9, 4, 6, 8, 3, 9, 8, 3, 5, 2, 5, 9, 5, 7, 0, 9, 8, 2, 5, 8,
2, 2, 6, 2, 0, 5, 2, 2, 4, 8, 9, 4, 0, 7, 7, 2, 6, 7, 1, 9, 4, 7,
8, 2, 6, 8, 4, 8, 2, 6, 0, 1, 4, 7, 6, 9, 9, 0, 9, 0, 2, 6, 4, 0,
1, 3, 6, 3, 9, 4, 4, 3, 7, 4, 5, 5, 3, 0, 5, 0, 6, 8, 2, 0, 3, 4,
9, 6, 2, 5, 2, 4, 5, 1, 7, 4, 9, 3, 9, 9, 6, 5, 1, 4, 3, 1, 4, 2,
9, 8, 0, 9, 1, 9, 0, 6, 5, 9, 2, 5, 0, 9, 3, 7, 2, 2, 1, 6, 9, 6,
4, 6, 1, 5, 1, 5, 7, 0, 9, 8, 5, 8, 3, 8, 7, 4, 1, 0, 5, 9, 7, 8,
8, 5, 9, 5, 9, 7, 7, 2, 9, 7, 5, 4, 9, 8, 9, 3, 0, 1, 6, 1, 7, 5,
3, 9, 2, 8, 4, 6, 8, 1, 3, 8, 2, 6, 8, 6, 8, 3, 8, 6, 8, 9, 4, 2,
7, 7, 4, 1, 5, 5, 9, 9, 1, 8, 5, 5, 9, 2, 5, 2, 4, 5, 9, 5, 3, 9,
5, 9, 4, 3, 1, 0, 4, 9, 9, 7, 2, 5, 2, 4, 6, 8, 0, 8, 4, 5, 9, 8,
7, 2, 7, 3, 6, 4, 4, 6, 9, 5, 8, 4, 8, 6, 5, 3, 8, 3, 6, 7, 3, 6,
2, 2, 2, 6, 2, 6, 0, 9, 9, 1, 2, 4, 6, 0, 8, 0, 5, 1, 2, 4, 3, 8,
8, 4, 3, 9, 0, 4, 5, 1, 2, 4, 4, 1, 3, 6, 5, 4, 9, 7, 6, 2, 7, 8,
0, 7, 9, 7, 7, 1, 5, 6, 9, 1, 4, 3, 5, 9, 9, 7, 7, 0, 0, 1, 2, 9,
6, 1, 6, 0, 8, 9, 4, 4, 1, 6, 9, 4, 8, 6, 8, 5, 5, 5, 8, 4, 8, 4,
0, 6, 3, 5, 3, 4, 2, 2, 0, 7, 2, 2, 2, 5, 8, 2, 8, 4, 8, 8, 6, 4,
8, 1, 5, 8, 4, 5, 6, 0, 2, 8, 5, 0, 6, 0, 1, 6, 8, 4, 2, 7, 3, 9,
4, 5, 2, 2, 6, 7, 4, 6, 7, 6, 7, 8, 8, 9, 5, 2, 5, 2, 1, 3, 8, 5,
2, 2, 5, 4, 9, 9, 5, 4, 6, 6, 6, 7, 2, 7, 8, 2, 3, 9, 8, 6, 4, 5,
6, 5, 9, 6, 1, 1, 6, 3, 5, 4, 8, 8, 6, 2, 3, 0, 5, 7, 7, 4, 5, 6,
4, 9, 8, 0, 3, 5, 5, 9, 3, 6, 3, 4, 5, 6, 8, 1, 7, 4, 3, 2, 4, 1,
1, 2, 5, 1, 5, 0, 7, 6, 0, 6, 9, 4, 7, 9, 4, 5, 1, 0, 9, 6, 5, 9,
6, 0, 9, 4, 0, 2, 5, 2, 2, 8, 8, 7, 9, 7, 1, 0, 8, 9, 3, 1, 4, 5,
6, 6, 9, 1, 3, 6, 8, 6, 7, 2, 2, 8, 7, 4, 8, 9, 4, 0, 5, 6, 0, 1,
0, 1, 5, 0, 3, 3, 0, 8, 6, 1, 7, 9, 2, 8, 6, 8, 0, 9, 2, 0, 8, 7,
4, 7, 6, 0, 9, 1, 7, 8, 2, 4, 9, 3, 8, 5, 8, 9, 0, 0, 9, 7, 1, 4,
9, 0, 9, 6, 7, 5, 9, 8, 5, 2, 6, 1, 3, 6, 5, 5, 4, 9, 7, 8, 1, 8,
9, 3, 1, 2, 9, 7, 8, 4, 8, 2, 1, 6, 8, 2, 9, 9, 8, 9, 4, 8, 7, 2,
2, 6, 5, 8, 8, 0, 4, 8, 5, 7, 5, 6, 4, 0, 1, 4, 2, 7, 0, 4, 7, 7,
5, 5, 5, 1, 3, 2, 3, 7, 9, 6, 4, 1, 4, 5, 1, 5, 2, 3, 7, 4, 6, 2,
3, 4, 3, 6, 4, 5, 4, 2, 8, 5, 8, 4, 4, 4, 7, 9, 5, 2, 6, 5, 8, 6,
7, 8, 2, 1, 0, 5, 1, 1, 4, 1, 3, 5, 4, 7, 3, 5, 7, 3, 9, 5, 2, 3,
1, 1, 3, 4, 2, 7, 1, 6, 6, 1, 0, 2, 1, 3, 5, 9, 6, 9, 5, 3, 6, 2,
3, 1, 4, 4, 2, 9, 5, 2, 4, 8, 4, 9, 3, 7, 1, 8, 7, 1, 1, 0, 1, 4,
5, 7, 6, 5, 4, 0, 3, 5, 9, 0, 2, 7, 9, 9, 3, 4, 4, 0, 3, 7, 4, 2,
0, 0, 7, 3, 1, 0, 5, 7, 8, 5, 3, 9, 0, 6, 2, 1, 9, 8, 3, 8, 7, 4,
4, 7, 8, 0, 8, 4, 7, 8, 4, 8, 9, 6, 8, 3, 3, 2, 1, 4, 4, 5, 7, 1,
3, 8, 6, 8, 7, 5, 1, 9, 4, 3, 5, 0, 6, 4, 3, 0, 2, 1, 8, 4, 5, 3,
1, 9, 1, 0, 4, 8, 4, 8, 1, 0, 0, 5, 3, 7, 0, 6, 1, 4, 6, 8, 0, 6,
7, 4, 9, 1, 9, 2, 7, 8, 1, 9, 1, 1, 9, 7, 9, 3, 9, 9, 5, 2, 0, 6,
1, 4, 1, 9, 6, 6, 3, 4, 2, 8, 7, 5, 4, 4, 4, 0, 6, 4, 3, 7, 4, 5,
1, 2, 3, 7, 1, 8, 1, 9, 2, 1, 7, 9, 9, 9, 8, 3, 9, 1, 0, 1, 5, 9,
1, 9, 5, 6, 1, 8, 1, 4, 6, 7, 5, 1, 4, 2, 6, 9, 1, 2, 3, 9, 7, 4,
8, 9, 4, 0, 9, 0, 7, 1, 8, 6, 4, 9, 4, 2, 3, 1, 9, 6, 1, 5, 6, 7,
9, 4, 5, 2, 0, 8, 0, 9, 5, 1, 4, 6, 5, 5, 0, 2, 2, 5, 2, 3, 1, 6,
0, 3, 8, 8, 1, 9, 3, 0, 1, 4, 2, 0, 9, 3, 7, 6, 2, 1, 3, 7, 8, 5,
5, 9, 5, 6, 6, 3, 8, 9, 3, 7, 7, 8, 7, 0, 8, 3, 0, 3, 9, 0, 6, 9,
7, 9, 2, 0, 7, 7, 3, 4, 6, 7, 2, 2, 1, 8, 2, 5, 6, 2, 5, 9, 9, 6,
6, 1, 5, 0, 1, 4, 2, 1, 5, 0, 3, 0, 6, 8, 0, 3, 8, 4, 4, 7, 7, 3,
4, 5, 4, 9, 2, 0, 2, 6, 0, 5, 4, 1, 4, 6, 6, 5, 9, 2, 5, 2, 0, 1,
4, 9, 7, 4, 4, 2, 8, 5, 0, 7, 3, 2, 5, 1, 8, 6, 6, 6, 0, 0, 2, 1,
3, 2, 4, 3, 4, 0, 8, 8, 1, 9, 0, 7, 1, 0, 4, 8, 6, 3, 3, 1, 7, 3,
4, 6, 4, 9, 6, 5, 1, 4, 5, 3, 9, 0, 5, 7, 9, 6, 2, 6, 8, 5, 6, 1,
0, 0, 5, 5, 0, 8, 1, 0, 6, 6, 5, 8, 7, 9, 6, 9, 9, 8, 1, 6, 3, 5,
7, 4, 7, 3, 6, 3, 8, 4, 0, 5, 2, 5, 7, 1, 4, 5, 9, 1, 0, 2, 8, 9,
7, 0, 6, 4, 1, 4, 0, 1, 1, 0, 9, 7, 1, 2, 0, 6, 2, 8, 0, 4, 3, 9,
0, 3, 9, 7, 5, 9, 5, 1, 5, 6, 7, 7, 1, 5, 7, 7, 0, 0, 4, 2, 0, 3,
3, 7, 8, 6, 9, 9, 3, 6, 0, 0, 7, 2, 3, 0, 5, 5, 8, 7, 6, 3, 1, 7,
6, 3, 5, 9, 4, 2, 1, 8, 7, 3, 1, 2, 5, 1, 4, 7, 1, 2, 0, 5, 3, 2,
9, 2, 8, 1, 9, 1, 8, 2, 6, 1, 8, 6, 1, 2, 5, 8, 6, 7, 3, 2, 1, 5,
7, 9, 1, 9, 8, 4, 1, 4, 8, 4, 8, 8, 2, 9, 1, 6, 4, 4, 7, 0, 6, 0,
9, 5, 7, 5, 2, 7, 0, 6, 9, 5, 7, 2, 2, 0, 9, 1, 7, 5, 6, 7, 1, 1,
6, 7, 2, 2, 9, 1, 0, 9, 8, 1, 6, 9, 0, 9, 1, 5, 2, 8, 0, 1, 7, 3,
5, 0, 6, 7, 1, 2, 7, 4, 8, 5, 8, 3, 2, 2, 2, 8, 7, 1, 8, 3, 5, 2,
0, 9, 3, 5, 3, 9, 6, 5, 7, 2, 5, 1, 2, 1, 0, 8, 3, 5, 7, 9, 1, 5,
1, 3, 6, 9, 8, 8, 2, 0, 9, 1, 4, 4, 4, 2, 1, 0, 0, 6, 7, 5, 1, 0,
3, 3, 4, 6, 7, 1, 1, 0, 3, 1, 4, 1, 2, 6, 7, 1, 1, 1, 3, 6, 9, 9,
0, 8, 6, 5, 8, 5, 1, 6, 3, 9, 8, 3, 1, 5, 0, 1, 9, 7, 0, 1, 6, 5,
1, 5, 1, 1, 6, 8, 5, 1, 7, 1, 4, 3, 7, 6, 5, 7, 6, 1, 8, 3, 5, 1,
5, 5, 6, 5, 0, 8, 8, 4, 9, 0, 9, 9, 8, 9, 8, 5, 9, 9, 8, 2, 3, 8,
7, 3, 4, 5, 5, 2, 8, 3, 3, 1, 6, 3, 5, 5, 0, 7, 6, 4, 7, 9, 1, 8,
5, 3, 5, 8, 9, 3, 2, 2, 6, 1, 8, 5, 4, 8, 9, 6, 3, 2, 1, 3, 2, 9,
3, 3, 0, 8, 9, 8, 5, 7, 0, 6, 4, 2, 0, 4, 6, 7, 5, 2, 5, 9, 0, 7,
0, 9, 1, 5, 4, 8, 1, 4, 1, 6, 5, 4, 9, 8, 5, 9, 4, 6, 1, 6, 3, 7,
1, 8, 0, 2, 7, 0, 9, 8, 1, 9, 9, 4, 3, 0, 9, 9, 2, 4, 4, 8, 8, 9,
5, 7, 5, 7, 1, 2, 8, 2, 8, 9, 0, 5, 9, 2, 3, 2, 3, 3, 2, 6, 0, 9,
7, 2, 9, 9, 7, 1, 2, 0, 8, 4, 4, 3, 3, 5, 7, 3, 2, 6, 5, 4, 8, 9,
3, 8, 2, 3, 9, 1, 1, 9, 3, 2, 5, 9, 7, 4, 6, 3, 6, 6, 7, 3, 0, 5,
8, 3, 6, 0, 4, 1, 4, 2, 8, 1, 3, 8, 8, 3, 0, 3, 2, 0, 3, 8, 2, 4,
9, 0, 3, 7, 5, 8, 9, 8, 5, 2, 4, 3, 7, 4, 4, 1, 7, 0, 2, 9, 1, 3,
2, 7, 6, 5, 6, 1, 8, 0, 9, 3, 7, 7, 3, 4, 4, 4, 0, 3, 0, 7, 0, 7,
4, 6, 9, 2, 1, 1, 2, 0, 1, 9, 1, 3, 0, 2, 0, 3, 3, 0, 3, 8, 0, 1,
9, 7, 6, 2, 1, 1, 0, 1, 1, 0, 0, 4, 4, 9, 2, 9, 3, 2, 1, 5, 1, 6,
0, 8, 4, 2, 4, 4, 4, 8, 5, 9, 6, 3, 7, 6, 6, 9, 8, 3, 8, 9, 5, 2,
2, 8, 6, 8, 4, 7, 8, 3, 1, 2, 3, 5, 5, 2, 6, 5, 8, 2, 1, 3, 1, 4,
4, 9, 5, 7, 6, 8, 5, 7, 2, 6, 2, 4, 3, 3, 4, 4, 1, 8, 9, 3, 0, 3,
9, 6, 8, 6, 4, 2, 6, 2, 4, 3, 4, 1, 0, 7, 7, 3, 2, 2, 6, 9, 7, 8,
0, 2, 8, 0, 7, 3, 1, 8, 9, 1, 5, 4, 4, 1, 1, 0, 1, 0, 4, 4, 6, 8,
2, 3, 2, 5, 2, 7, 1, 6, 2, 0, 1, 0, 5, 2, 6, 5, 2, 2, 7, 2, 1, 1,
1, 6, 6, 0, 3, 9, 6, 6, 6, 5, 5, 7, 3, 0, 9, 2, 5, 4, 7, 1, 1, 0,
5, 5, 7, 8, 5, 3, 7, 6, 3, 4, 6, 6, 8, 2, 0, 6, 5, 3, 1, 0, 9, 8,
9, 6, 5, 2, 6, 9, 1, 8, 6, 2, 0, 5, 6, 4, 7, 6, 9, 3, 1, 2, 5, 7,
0, 5, 8, 6, 3, 5, 6, 6, 2, 0, 1, 8, 5, 5, 8, 1, 0, 0, 7, 2, 9, 3,
6, 0, 6, 5, 9, 8, 7, 6, 4, 8, 6, 1, 1, 7, 9, 1, 0, 4, 5, 3, 3, 4,
8, 8, 5, 0, 3, 4, 6, 1, 1, 3, 6, 5, 7, 6, 8, 6, 7, 5, 3, 2, 4, 9,
4, 4, 1, 6, 6, 8, 0, 3, 9, 6, 2, 6, 5, 7, 9, 7, 8, 7, 7, 1, 8, 5,
5, 6, 0, 8, 4, 5, 5, 2, 9, 6, 5, 4, 1, 2, 6, 6, 5, 4, 0, 8, 5, 3,
0, 6, 1, 4, 3, 4, 4, 4, 3, 1, 8, 5, 8, 6, 7, 6, 9, 7, 5, 1, 4, 5,
6, 6, 1, 4, 0, 6, 8, 0, 0, 7, 0, 0, 2, 3, 7, 8, 7, 7, 6, 5, 9, 1,
3, 4, 4, 0, 1, 7, 1, 2, 7, 4, 9, 4, 7, 0, 4, 2, 0, 5, 6, 2, 2, 3,
0, 5, 3, 8, 9, 9, 4, 5, 6, 1, 3, 1, 4, 0, 7, 1, 1, 2, 7, 0, 0, 0,
4, 0, 7, 8, 5, 4, 7, 3, 3, 2, 6, 9, 9, 3, 9, 0, 8, 1, 4, 5, 4, 6,
6, 4, 6, 4, 5, 8, 8, 0, 7, 9, 7, 2, 7, 0, 8, 2, 6, 6, 8, 3, 0, 6,
3, 4, 3, 2, 8, 5, 8, 7, 8, 5, 6, 9, 8, 3, 0, 5, 2, 3, 5, 8, 0, 8,
9, 3, 3, 0, 6, 5, 7, 5, 7, 4, 0, 6, 7, 9, 5, 4, 5, 7, 1, 6, 3, 7,
7, 5, 2, 5, 4, 2, 0, 2, 1, 1, 4, 9, 5, 5, 7, 6, 1, 5, 8, 1, 4, 0,
0, 2, 5, 0, 1, 2, 6, 2, 2, 8, 5, 9, 4, 1, 3, 0, 2, 1, 6, 4, 7, 1,
5, 5, 0, 9, 7, 9, 2, 5, 9, 2, 3, 0, 9, 9, 0, 7, 9, 6, 5, 4, 7, 3,
7, 6, 1, 2, 5, 5, 1, 7, 6, 5, 6, 7, 5, 1, 3, 5, 7, 5, 1, 7, 8, 2,
9, 6, 6, 6, 4, 5, 4, 7, 7, 9, 1, 7, 4, 5, 0, 1, 1, 2, 9, 9, 6, 1,
4, 8, 9, 0, 3, 0, 4, 6, 3, 9, 9, 4, 7, 1, 3, 2, 9, 6, 2, 1, 0, 7,
3, 4, 0, 4, 3, 7, 5, 1, 8, 9, 5, 7, 3, 5, 9, 6, 1, 4, 5, 8, 9, 0,
1, 9, 3, 8, 9, 7, 1, 3, 1, 1, 1, 7, 9, 0, 4, 2, 9, 7, 8, 2, 8, 5,
6, 4, 7, 5, 0, 3, 2, 0, 3, 1, 9, 8, 6, 9, 1, 5, 1, 4, 0, 2, 8, 7,
0, 8, 0, 8, 5, 9, 9, 0, 4, 8, 0, 1, 0, 9, 4, 1, 2, 1, 4, 7, 2, 2,
1, 3, 1, 7, 9, 4, 7, 6, 4, 7, 7, 7, 2, 6, 2, 2, 4, 1, 4, 2, 5, 4,
8, 5, 4, 5, 4, 0, 3, 3, 2, 1, 5, 7, 1, 8, 5, 3, 0, 6, 1, 4, 2, 2,
8, 8, 1, 3, 7, 5, 8, 5, 0, 4, 3, 0, 6, 3, 3, 2, 1, 7, 5, 1, 8, 2,
9, 7, 9, 8, 6, 6, 2, 2, 3, 7, 1, 7, 2, 1, 5, 9, 1, 6, 0, 7, 7, 1,
6, 6, 9, 2, 5, 4, 7, 4, 8, 7, 3, 8, 9, 8, 6, 6, 5, 4, 9, 4, 9, 4,
5, 0, 1, 1, 4, 6, 5, 4, 0, 6, 2, 8, 4, 3, 3, 6, 6, 3, 9, 3, 7, 9,
0, 0, 3, 9, 7, 6, 9, 2, 6, 5, 6, 7, 2, 1, 4, 6, 3, 8, 5, 3, 0, 6,
7, 3, 6, 0, 9, 6, 5, 7, 1, 2, 0, 9, 1, 8, 0, 7, 6, 3, 8, 3, 2, 7,
1, 6, 6, 4, 1, 6, 2, 7, 4, 8, 8, 8, 8, 0, 0, 7, 8, 6, 9, 2, 5, 6,
0, 2, 9, 0, 2, 2, 8, 4, 7, 2, 1, 0, 4, 0, 3, 1, 7, 2, 1, 1, 8, 6,
0, 8, 2, 0, 4, 1, 9, 0, 0, 0, 4, 2, 2, 9, 6, 6, 1, 7, 1, 1, 9, 6,
3, 7, 7, 9, 2, 1, 3, 3, 7, 5, 7, 5, 1, 1, 4, 9, 5, 9, 5, 0, 1, 5,
6, 6, 0, 4, 9, 6, 3, 1, 8, 6, 2, 9, 4, 7, 2, 6, 5, 4, 7, 3, 6, 4,
2, 5, 2, 3, 0, 8, 1, 7, 7, 0, 3, 6, 7, 5, 1, 5, 9, 0, 6, 7, 3, 5,
0, 2, 3, 5, 0, 7, 2, 8, 3, 5, 4, 0, 5, 6, 7, 0, 4, 0, 3, 8, 6, 7,
4, 3, 5, 1, 3, 6, 2, 2, 2, 2, 4, 7, 7, 1, 5, 8, 9, 1, 5, 0, 4, 9,
5, 3, 0, 9, 8, 4, 4, 4, 8, 9, 3, 3, 3, 0, 9, 6, 3, 4, 0, 8, 7, 8,
0, 7, 6, 9, 3, 2, 5, 9, 9, 3, 9, 7, 8, 0, 5, 4, 1, 9, 3, 4, 1, 4,
4, 7, 3, 7, 7, 4, 4, 1, 8, 4, 2, 6, 3, 1, 2, 9, 8, 6, 0, 8, 0, 9,
9, 8, 8, 8, 6, 8, 7, 4, 1, 3, 2, 6, 0, 4, 7, 2}
numacc1 = stats.Float64Data{10000001, 10000003, 10000002}
numacc2 = make(stats.Float64Data, 1001)
numacc3 = make(stats.Float64Data, 1001)
numacc4 = make(stats.Float64Data, 1001)
)
func init() {
numacc2[0] = 1.2
numacc3[0] = 1000000.2
numacc4[0] = 10000000.2
for i := 1; i < 1000; i += 2 {
numacc2[i] = 1.1
numacc2[i+1] = 1.3
numacc3[i] = 1000000.1
numacc3[i+1] = 1000000.3
numacc4[i] = 10000000.1
numacc4[i+1] = 10000000.3
}
}
func TestLewData(t *testing.T) {
r, e := stats.Mean(lew)
test("Lew Mean", r, -177.435000000000, 1e-15, e, t)
r, e = stats.StandardDeviationSample(lew)
test("Lew Standard Deviation", r, 277.332168044316, 1e-15, e, t)
r, e = stats.AutoCorrelation(lew, 1)
test("Lew AutoCorrelate1", r, -0.307304800605679, 1e-14, e, t)
}
func TestLotteryData(t *testing.T) {
r, e := stats.Mean(lottery)
test("Lottery Mean", r, 518.958715596330, 1e-15, e, t)
r, e = stats.StandardDeviationSample(lottery)
test("Lottery Standard Deviation", r, 291.699727470969, 1e-15, e, t)
r, e = stats.AutoCorrelation(lottery, 1)
test("Lottery AutoCorrelate1", r, -0.120948622967393, 1e-14, e, t)
}
func TestMavroData(t *testing.T) {
r, e := stats.Mean(mavro)
test("Mavro Mean", r, 2.00185600000000, 1e-15, e, t)
r, e = stats.StandardDeviationSample(mavro)
test("Mavro Standard Deviation", r, 0.000429123454003053, 1e-13, e, t)
r, e = stats.AutoCorrelation(mavro, 1)
test("Mavro AutoCorrelate1", r, 0.937989183438248, 1e-13, e, t)
}
func TestMichelsonData(t *testing.T) {
r, e := stats.Mean(michelson)
test("Michelson Mean", r, 299.852400000000, 1e-15, e, t)
r, e = stats.StandardDeviationSample(michelson)
test("Michelson Standard Deviation", r, 0.0790105478190518, 1e-13, e, t)
r, e = stats.AutoCorrelation(michelson, 1)
test("Michelson AutoCorrelate1", r, 0.535199668621283, 1e-13, e, t)
}
func TestPidigitsData(t *testing.T) {
r, e := stats.Mean(pidigits)
test("Pidigits Mean", r, 4.53480000000000, 1e-14, e, t)
r, e = stats.StandardDeviationSample(pidigits)
test("Pidigits Standard Deviation", r, 2.86733906028871, 1e-14, e, t)
r, e = stats.AutoCorrelation(pidigits, 1)
test("Pidigits AutoCorrelate1", r, -0.00355099287237972, 1e-13, e, t)
}
func TestNumacc1Data(t *testing.T) {
r, e := stats.Mean(numacc1)
test("numacc1 Mean", r, 10000002.0, 1e-14, e, t)
r, e = stats.StandardDeviationSample(numacc1)
test("numacc1 Standard Deviation", r, 1.0, 1e-13, e, t)
r, e = stats.AutoCorrelation(numacc1, 1)
test("Lew AutoCorrelateNumacc1", r, -0.5, 1e-15, e, t)
}
func TestNumacc2Data(t *testing.T) {
r, e := stats.Mean(numacc2)
test("numacc2 Mean", r, 1.2, 1e-10, e, t)
r, e = stats.StandardDeviationSample(numacc2)
test("numacc2 Standard Deviation", r, 0.1, 1e-10, e, t)
r, e = stats.AutoCorrelation(numacc2, 1)
test("Lew AutoCorrelateNumacc2", r, -0.999, 1e-10, e, t)
}
func TestNumacc3Data(t *testing.T) {
r, e := stats.Mean(numacc3)
test("numacc3 Mean", r, 1000000.2, 1e-15, e, t)
r, e = stats.StandardDeviationSample(numacc3)
test("numacc3 Standard Deviation", r, 0.1, 1e-9, e, t)
r, e = stats.AutoCorrelation(numacc3, 1)
test("Lew AutoCorrelateNumacc3", r, -0.999, 1e-10, e, t)
}
func TestNumacc4Data(t *testing.T) {
r, e := stats.Mean(numacc4)
test("numacc4 Mean", r, 10000000.2, 1e-10, e, t)
r, e = stats.StandardDeviationSample(numacc4)
test("numacc4 Standard Deviation", r, 0.1, 1e-7, e, t)
r, e = stats.AutoCorrelation(numacc4, 1)
test("Lew AutoCorrelateNumacc4", r, -0.999, 1e-7, e, t)
}
func bench(d stats.Float64Data) {
_, _ = stats.Mean(d)
_, _ = stats.StdDevS(d)
_, _ = stats.AutoCorrelation(d, 1)
}
func BenchmarkNistLew(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(lew)
}
}
func BenchmarkNistLottery(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(lottery)
}
}
func BenchmarkNistMavro(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(mavro)
}
}
func BenchmarkNistMichelson(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(michelson)
}
}
func BenchmarkNistPidigits(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(pidigits)
}
}
func BenchmarkNistNumacc1(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(numacc1)
}
}
func BenchmarkNistNumacc2(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(numacc2)
}
}
func BenchmarkNistNumacc3(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(numacc3)
}
}
func BenchmarkNistNumacc4(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(numacc4)
}
}
func BenchmarkNistAll(b *testing.B) {
for i := 0; i < b.N; i++ {
bench(lew)
bench(lottery)
bench(mavro)
bench(michelson)
bench(pidigits)
bench(numacc1)
bench(numacc2)
bench(numacc3)
bench(numacc4)
}
}
func test(d string, r, a, v float64, e error, t *testing.T) {
if e != nil {
t.Error(e)
}
var failure bool
if math.IsNaN(r) || math.IsNaN(a) {
failure = math.IsNaN(r) != math.IsNaN(a)
} else if math.IsInf(r, 0) || math.IsInf(a, 0) {
failure = math.IsInf(r, 0) != math.IsInf(a, 0)
} else if a != 0 {
failure = math.Abs(r-a)/math.Abs(a) > v
} else {
failure = math.Abs(r) > v
}
if failure {
t.Errorf("%s => %v != %v", d, r, a)
}
}
stats-0.7.1/norm.go 0000664 0000000 0000000 00000016731 14427120666 0014177 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
"math/rand"
"strings"
"time"
)
// NormPpfRvs generates random variates using the Point Percentile Function.
// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/
func NormPpfRvs(loc float64, scale float64, size int) []float64 {
rand.Seed(time.Now().UnixNano())
var toReturn []float64
for i := 0; i < size; i++ {
toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale))
}
return toReturn
}
// NormBoxMullerRvs generates random variates using the Box–Muller transform.
// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html
func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {
rand.Seed(time.Now().UnixNano())
var toReturn []float64
for i := 0; i < int(float64(size/2)+float64(size%2)); i++ {
// u1 and u2 are uniformly distributed random numbers between 0 and 1.
u1 := rand.Float64()
u2 := rand.Float64()
// x1 and x2 are normally distributed random numbers.
x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2)))
toReturn = append(toReturn, x1)
if (i+1)*2 <= size {
x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2)))
toReturn = append(toReturn, x2)
}
}
return toReturn
}
// NormPdf is the probability density function.
func NormPdf(x float64, loc float64, scale float64) float64 {
return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))
}
// NormLogPdf is the log of the probability density function.
func NormLogPdf(x float64, loc float64, scale float64) float64 {
return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)))
}
// NormCdf is the cumulative distribution function.
func NormCdf(x float64, loc float64, scale float64) float64 {
return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))
}
// NormLogCdf is the log of the cumulative distribution function.
func NormLogCdf(x float64, loc float64, scale float64) float64 {
return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))))
}
// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate).
func NormSf(x float64, loc float64, scale float64) float64 {
return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))
}
// NormLogSf is the log of the survival function.
func NormLogSf(x float64, loc float64, scale float64) float64 {
return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))))
}
// NormPpf is the point percentile function.
// This is based on Peter John Acklam's inverse normal CDF.
// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible).
// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/
func NormPpf(p float64, loc float64, scale float64) (x float64) {
const (
a1 = -3.969683028665376e+01
a2 = 2.209460984245205e+02
a3 = -2.759285104469687e+02
a4 = 1.383577518672690e+02
a5 = -3.066479806614716e+01
a6 = 2.506628277459239e+00
b1 = -5.447609879822406e+01
b2 = 1.615858368580409e+02
b3 = -1.556989798598866e+02
b4 = 6.680131188771972e+01
b5 = -1.328068155288572e+01
c1 = -7.784894002430293e-03
c2 = -3.223964580411365e-01
c3 = -2.400758277161838e+00
c4 = -2.549732539343734e+00
c5 = 4.374664141464968e+00
c6 = 2.938163982698783e+00
d1 = 7.784695709041462e-03
d2 = 3.224671290700398e-01
d3 = 2.445134137142996e+00
d4 = 3.754408661907416e+00
plow = 0.02425
phigh = 1 - plow
)
if p < 0 || p > 1 {
return math.NaN()
} else if p == 0 {
return -math.Inf(0)
} else if p == 1 {
return math.Inf(0)
}
if p < plow {
q := math.Sqrt(-2 * math.Log(p))
x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else if phigh < p {
q := math.Sqrt(-2 * math.Log(1-p))
x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else {
q := p - 0.5
r := q * q
x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /
(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)
}
e := 0.5*math.Erfc(-x/math.Sqrt2) - p
u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)
x = x - u/(1+x*u/2)
return x*scale + loc
}
// NormIsf is the inverse survival function (inverse of sf).
func NormIsf(p float64, loc float64, scale float64) (x float64) {
if -NormPpf(p, loc, scale) == 0 {
return 0
}
return -NormPpf(p, loc, scale)
}
// NormMoment approximates the non-central (raw) moment of order n.
// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution
func NormMoment(n int, loc float64, scale float64) float64 {
toReturn := 0.0
for i := 0; i < n+1; i++ {
if (n-i)%2 == 0 {
toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) *
(float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) *
float64(factorial((n-i)/2))))
}
}
return toReturn
}
// NormStats returns the mean, variance, skew, and/or kurtosis.
// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’).
// Takes string containing any of 'mvsk'.
// Returns array of m v s k in that order.
func NormStats(loc float64, scale float64, moments string) []float64 {
var toReturn []float64
if strings.ContainsAny(moments, "m") {
toReturn = append(toReturn, loc)
}
if strings.ContainsAny(moments, "v") {
toReturn = append(toReturn, math.Pow(scale, 2))
}
if strings.ContainsAny(moments, "s") {
toReturn = append(toReturn, 0.0)
}
if strings.ContainsAny(moments, "k") {
toReturn = append(toReturn, 0.0)
}
return toReturn
}
// NormEntropy is the differential entropy of the RV.
func NormEntropy(loc float64, scale float64) float64 {
return math.Log(scale * math.Sqrt(2*math.Pi*math.E))
}
// NormFit returns the maximum likelihood estimators for the Normal Distribution.
// Takes array of float64 values.
// Returns array of Mean followed by Standard Deviation.
func NormFit(data []float64) [2]float64 {
sum := 0.00
for i := 0; i < len(data); i++ {
sum += data[i]
}
mean := sum / float64(len(data))
stdNumerator := 0.00
for i := 0; i < len(data); i++ {
stdNumerator += math.Pow(data[i]-mean, 2)
}
return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))}
}
// NormMedian is the median of the distribution.
func NormMedian(loc float64, scale float64) float64 {
return loc
}
// NormMean is the mean/expected value of the distribution.
func NormMean(loc float64, scale float64) float64 {
return loc
}
// NormVar is the variance of the distribution.
func NormVar(loc float64, scale float64) float64 {
return math.Pow(scale, 2)
}
// NormStd is the standard deviation of the distribution.
func NormStd(loc float64, scale float64) float64 {
return scale
}
// NormInterval finds endpoints of the range that contains alpha percent of the distribution.
func NormInterval(alpha float64, loc float64, scale float64) [2]float64 {
q1 := (1.0 - alpha) / 2
q2 := (1.0 + alpha) / 2
a := NormPpf(q1, loc, scale)
b := NormPpf(q2, loc, scale)
return [2]float64{a, b}
}
// factorial is the naive factorial algorithm.
func factorial(x int) int {
if x == 0 {
return 1
}
return x * factorial(x-1)
}
// Ncr is an N choose R algorithm.
// Aaron Cannon's algorithm.
func Ncr(n, r int) int {
if n <= 1 || r == 0 || n == r {
return 1
}
if newR := n - r; newR < r {
r = newR
}
if r == 1 {
return n
}
ret := int(n - r + 1)
for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 {
ret = ret * i / j
}
return ret
}
stats-0.7.1/norm_test.go 0000664 0000000 0000000 00000011226 14427120666 0015230 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func TestNormPpf(t *testing.T) {
if stats.NormPpf(0.5, 0, 1) != 0 {
t.Error("Input 0.5, Expected 0")
}
if !veryclose(stats.NormPpf(0.1, 0, 1), -1.2815515655446004) {
t.Error("Input 0.1, Expected -1.2815515655446004")
}
if stats.NormPpf(0.002423, 0, 1) != -2.817096255323953 {
t.Error("Input 0.002423, Expected -2.817096255323953")
}
if !close(stats.NormPpf(1-0.002423, 0, 1), 2.817096255323956) {
t.Error("Input 1 - 0.002423, Expected 2.817096255323956")
}
if !math.IsNaN(stats.NormPpf(1.1, 0, 1)) {
t.Error("Input 1.1, Expected NaN")
}
if !math.IsNaN(stats.NormPpf(-1.1, 0, 1)) {
t.Error("Input -0.1, Expected Nan")
}
if stats.NormPpf(0, 0, 1) != -math.Inf(1) {
t.Error("Input 0, Expected -Inf")
}
if stats.NormPpf(1, 0, 1) != math.Inf(1) {
t.Error("Input 1, Expected Inf")
}
}
func TestNormCdf(t *testing.T) {
if stats.NormCdf(0, 0, 1) != 0.5 {
t.Error("Input 0, Expected 0.5")
}
if stats.NormCdf(0.5, 0, 1) != 0.6914624612740131 {
t.Error("Input 0.5, Expected 0.6914624612740131")
}
if stats.NormCdf(-0.5, 0, 1) != 0.3085375387259869 {
t.Error("Input -0.5, Expected 0.3085375387259869")
}
}
func TestNormPdf(t *testing.T) {
if stats.NormPdf(0.5, 0, 1) != 0.35206532676429947 {
t.Error("Input 0.5, Expected 0.35206532676429947")
}
if stats.NormPdf(0, 0, 1) != 0.3989422804014327 {
t.Error("Input 0, Expected 0.3989422804014327")
}
if stats.NormPdf(-0.5, 0, 1) != 0.35206532676429947 {
t.Error("Input -0.5, Expected 0.35206532676429947")
}
}
func TestNormLogPdf(t *testing.T) {
if stats.NormLogPdf(0, 0, 1) != -0.9189385332046727 {
t.Error("Input 0, Expected -0.9189385332046727")
}
if stats.NormPdf(0, 0, 1) != 0.3989422804014327 {
t.Error("Input 0, Expected 0.3989422804014327")
}
if stats.NormPdf(-0.5, 0, 1) != 0.35206532676429947 {
t.Error("Input -0.5, Expected 0.35206532676429947")
}
}
func TestNormLogCdf(t *testing.T) {
if stats.NormLogCdf(0.5, 0, 1) != -0.36894641528865635 {
t.Error("Input 0.5, Expected -0.36894641528865635")
}
}
func TestNormIsf(t *testing.T) {
if stats.NormIsf(0.5, 0, 1) != 0 {
t.Error("Input 0.5, Expected 0")
}
if !veryclose(stats.NormIsf(0.1, 0, 1), 1.2815515655446004) {
t.Error("Input 0.1, Expected 1.2815515655446004")
}
}
func TestNormSf(t *testing.T) {
if stats.NormSf(0.5, 0, 1) != 0.3085375387259869 {
t.Error("Input 0.5, Expected 0.3085375387259869")
}
}
func TestNormLogSf(t *testing.T) {
if stats.NormLogSf(0.5, 0, 1) != -1.1759117615936185 {
t.Error("Input 0.5, Expected -1.1759117615936185")
}
}
func TestNormMoment(t *testing.T) {
if stats.NormMoment(4, 0, 1) != 3 {
t.Error("Input 3, Expected 3")
}
if stats.NormMoment(4, 0, 1) != 3 {
t.Error("Input 3, Expected 3")
}
}
func TestNormStats(t *testing.T) {
if !reflect.DeepEqual(stats.NormStats(0, 1, "m"), []float64{0}) {
t.Error("Input 'm' , Expected 0")
}
if !reflect.DeepEqual(stats.NormStats(0, 1, "v"), []float64{1}) {
t.Error("Input 'v' , Expected 1")
}
if !reflect.DeepEqual(stats.NormStats(0, 1, "s"), []float64{0}) {
t.Error("Input 's' , Expected 0")
}
if !reflect.DeepEqual(stats.NormStats(0, 1, "k"), []float64{0}) {
t.Error("Input 'k' , Expected 0")
}
}
func TestNormEntropy(t *testing.T) {
if stats.NormEntropy(0, 1) != 1.4189385332046727 {
t.Error("Input ( 0 , 1 ), Expected 1.4189385332046727")
}
}
func TestNormFit(t *testing.T) {
if !reflect.DeepEqual(stats.NormFit([]float64{0, 2, 3, 4}), [2]float64{2.25, 1.479019945774904}) {
t.Error("Input (0,2,3,4), Expected {2.25, 1.479019945774904}")
}
}
func TestNormInterval(t *testing.T) {
if !reflect.DeepEqual(stats.NormInterval(0.5, 0, 1), [2]float64{-0.6744897501960818, 0.674489750196082}) {
t.Error("Input (50 % ), Expected {-0.6744897501960818, 0.674489750196082}")
}
}
func TestNormMean(t *testing.T) {
if stats.NormMean(0, 1) != 0 {
t.Error("Input (0, 1), Expected 0")
}
}
func TestNormMedian(t *testing.T) {
if stats.NormMedian(0, 1) != 0 {
t.Error("Input (0, 1), Expected 0")
}
}
func TestNormVar(t *testing.T) {
if stats.NormVar(0, 1) != 1 {
t.Error("Input (0, 1), Expected 1")
}
}
func TestNormStd(t *testing.T) {
if stats.NormStd(0, 1) != 1 {
t.Error("Input (0, 1), Expected 1")
}
}
func TestNormPpfRvs(t *testing.T) {
if len(stats.NormPpfRvs(0, 1, 101)) != 101 {
t.Error("Input size=101, Expected 101")
}
}
func TestNormBoxMullerRvs(t *testing.T) {
if len(stats.NormBoxMullerRvs(0, 1, 101)) != 101 {
t.Error("Input size=101, Expected 101")
}
}
func TestNcr(t *testing.T) {
if stats.Ncr(4, 1) != 4 {
t.Error("Input 4 choose 1, Expected 4")
}
if stats.Ncr(4, 3) != 4 {
t.Error("Input 4 choose 3, Expected 4")
}
}
stats-0.7.1/outlier.go 0000664 0000000 0000000 00000002051 14427120666 0014675 0 ustar 00root root 0000000 0000000 package stats
// Outliers holds mild and extreme outliers found in data
type Outliers struct {
Mild Float64Data
Extreme Float64Data
}
// QuartileOutliers finds the mild and extreme outliers
func QuartileOutliers(input Float64Data) (Outliers, error) {
if input.Len() == 0 {
return Outliers{}, EmptyInputErr
}
// Start by sorting a copy of the slice
copy := sortedCopy(input)
// Calculate the quartiles and interquartile range
qs, _ := Quartile(copy)
iqr, _ := InterQuartileRange(copy)
// Calculate the lower and upper inner and outer fences
lif := qs.Q1 - (1.5 * iqr)
uif := qs.Q3 + (1.5 * iqr)
lof := qs.Q1 - (3 * iqr)
uof := qs.Q3 + (3 * iqr)
// Find the data points that are outside of the
// inner and upper fences and add them to mild
// and extreme outlier slices
var mild Float64Data
var extreme Float64Data
for _, v := range copy {
if v < lof || v > uof {
extreme = append(extreme, v)
} else if v < lif || v > uif {
mild = append(mild, v)
}
}
// Wrap them into our struct
return Outliers{mild, extreme}, nil
}
stats-0.7.1/outlier_test.go 0000664 0000000 0000000 00000001264 14427120666 0015741 0 ustar 00root root 0000000 0000000 package stats_test
import (
"testing"
"github.com/montanaflynn/stats"
)
func TestQuartileOutliers(t *testing.T) {
s1 := []float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100}
o, _ := stats.QuartileOutliers(s1)
if o.Mild[0] != 15 {
t.Errorf("First Mild Outlier %v != 15", o.Mild[0])
}
if o.Mild[1] != 18 {
t.Errorf("Second Mild Outlier %v != 18", o.Mild[1])
}
if o.Extreme[0] != -1000 {
t.Errorf("First Extreme Outlier %v != -1000", o.Extreme[0])
}
if o.Extreme[1] != 100 {
t.Errorf("Second Extreme Outlier %v != 100", o.Extreme[1])
}
_, err := stats.QuartileOutliers([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
stats-0.7.1/percentile.go 0000664 0000000 0000000 00000003412 14427120666 0015346 0 ustar 00root root 0000000 0000000 package stats
import (
"math"
)
// Percentile finds the relative standing in a slice of floats
func Percentile(input Float64Data, percent float64) (percentile float64, err error) {
length := input.Len()
if length == 0 {
return math.NaN(), EmptyInputErr
}
if length == 1 {
return input[0], nil
}
if percent <= 0 || percent > 100 {
return math.NaN(), BoundsErr
}
// Start by sorting a copy of the slice
c := sortedCopy(input)
// Multiply percent by length of input
index := (percent / 100) * float64(len(c))
// Check if the index is a whole number
if index == float64(int64(index)) {
// Convert float to int
i := int(index)
// Find the value at the index
percentile = c[i-1]
} else if index > 1 {
// Convert float to int via truncation
i := int(index)
// Find the average of the index and following values
percentile, _ = Mean(Float64Data{c[i-1], c[i]})
} else {
return math.NaN(), BoundsErr
}
return percentile, nil
}
// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method
func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {
// Find the length of items in the slice
il := input.Len()
// Return an error for empty slices
if il == 0 {
return math.NaN(), EmptyInputErr
}
// Return error for less than 0 or greater than 100 percentages
if percent < 0 || percent > 100 {
return math.NaN(), BoundsErr
}
// Start by sorting a copy of the slice
c := sortedCopy(input)
// Return the last item
if percent == 100.0 {
return c[il-1], nil
}
// Find ordinal ranking
or := int(math.Ceil(float64(il) * percent / 100))
// Return the item that is in the place of the ordinal rank
if or == 0 {
return c[0], nil
}
return c[or-1], nil
}
stats-0.7.1/percentile_test.go 0000664 0000000 0000000 00000006562 14427120666 0016416 0 ustar 00root root 0000000 0000000 package stats_test
import (
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func TestPercentile(t *testing.T) {
m, _ := stats.Percentile([]float64{43, 54, 56, 61, 62, 66}, 90)
if m != 64.0 {
t.Errorf("%.1f != %.1f", m, 64.0)
}
m, _ = stats.Percentile([]float64{43}, 90)
if m != 43.0 {
t.Errorf("%.1f != %.1f", m, 43.0)
}
m, _ = stats.Percentile([]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 50)
if m != 5.0 {
t.Errorf("%.1f != %.1f", m, 5.0)
}
m, _ = stats.Percentile([]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 99.9)
if m != 9.5 {
t.Errorf("%.1f != %.1f", m, 9.5)
}
m, _ = stats.Percentile([]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 100)
if m != 10.0 {
t.Errorf("%.1f != %.1f", m, 10.0)
}
_, err := stats.Percentile([]float64{}, 99.9)
if err != stats.EmptyInputErr {
t.Errorf("Empty slice didn't return expected error; got %v", err)
}
_, err = stats.Percentile([]float64{1, 2, 3, 4, 5}, 0)
if err != stats.BoundsErr {
t.Errorf("Zero percent didn't return expected error; got %v", err)
}
_, err = stats.Percentile([]float64{1, 2, 3, 4, 5}, 0.13)
if err != stats.BoundsErr {
t.Errorf("Too low percent didn't return expected error; got %v", err)
}
_, err = stats.Percentile([]float64{1, 2, 3, 4, 5}, 101)
if err != stats.BoundsErr {
t.Errorf("Too high percent didn't return expected error; got %v", err)
}
}
func TestPercentileSortSideEffects(t *testing.T) {
s := []float64{43, 54, 56, 44, 62, 66}
a := []float64{43, 54, 56, 44, 62, 66}
_, _ = stats.Percentile(s, 90)
if !reflect.DeepEqual(s, a) {
t.Errorf("%.1f != %.1f", s, a)
}
}
func BenchmarkPercentileSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Percentile(makeFloatSlice(5), 50)
}
}
func BenchmarkPercentileLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Percentile(lf, 50)
}
}
func TestPercentileNearestRank(t *testing.T) {
f1 := []float64{35, 20, 15, 40, 50}
f2 := []float64{20, 6, 7, 8, 8, 10, 13, 15, 16, 3}
f3 := makeFloatSlice(101)
for _, c := range []struct {
sample []float64
percent float64
result float64
}{
{f1, 30, 20},
{f1, 40, 20},
{f1, 50, 35},
{f1, 75, 40},
{f1, 95, 50},
{f1, 99, 50},
{f1, 99.9, 50},
{f1, 100, 50},
{f2, 25, 7},
{f2, 50, 8},
{f2, 75, 15},
{f2, 100, 20},
{f3, 1, 100},
{f3, 99, 9900},
{f3, 100, 10000},
{f3, 0, 0},
} {
got, err := stats.PercentileNearestRank(c.sample, c.percent)
if err != nil {
t.Errorf("Should not have returned an error")
}
if got != c.result {
t.Errorf("%v != %v", got, c.result)
}
}
_, err := stats.PercentileNearestRank([]float64{}, 50)
if err == nil {
t.Errorf("Should have returned an empty slice error")
}
_, err = stats.PercentileNearestRank([]float64{1, 2, 3, 4, 5}, -0.01)
if err == nil {
t.Errorf("Should have returned an percentage must be above 0 error")
}
_, err = stats.PercentileNearestRank([]float64{1, 2, 3, 4, 5}, 110)
if err == nil {
t.Errorf("Should have returned an percentage must not be above 100 error")
}
}
func BenchmarkPercentileNearestRankSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.PercentileNearestRank(makeFloatSlice(5), 50)
}
}
func BenchmarkPercentileNearestRankLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.PercentileNearestRank(lf, 50)
}
}
stats-0.7.1/quartile.go 0000664 0000000 0000000 00000002765 14427120666 0015054 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Quartiles holds the three quartile points
type Quartiles struct {
Q1 float64
Q2 float64
Q3 float64
}
// Quartile returns the three quartile points from a slice of data
func Quartile(input Float64Data) (Quartiles, error) {
il := input.Len()
if il == 0 {
return Quartiles{}, EmptyInputErr
}
// Start by sorting a copy of the slice
copy := sortedCopy(input)
// Find the cutoff places depeding on if
// the input slice length is even or odd
var c1 int
var c2 int
if il%2 == 0 {
c1 = il / 2
c2 = il / 2
} else {
c1 = (il - 1) / 2
c2 = c1 + 1
}
// Find the Medians with the cutoff points
Q1, _ := Median(copy[:c1])
Q2, _ := Median(copy)
Q3, _ := Median(copy[c2:])
return Quartiles{Q1, Q2, Q3}, nil
}
// InterQuartileRange finds the range between Q1 and Q3
func InterQuartileRange(input Float64Data) (float64, error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
qs, _ := Quartile(input)
iqr := qs.Q3 - qs.Q1
return iqr, nil
}
// Midhinge finds the average of the first and third quartiles
func Midhinge(input Float64Data) (float64, error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
qs, _ := Quartile(input)
mh := (qs.Q1 + qs.Q3) / 2
return mh, nil
}
// Trimean finds the average of the median and the midhinge
func Trimean(input Float64Data) (float64, error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
c := sortedCopy(input)
q, _ := Quartile(c)
return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil
}
stats-0.7.1/quartile_test.go 0000664 0000000 0000000 00000003372 14427120666 0016106 0 ustar 00root root 0000000 0000000 package stats_test
import (
"testing"
"github.com/montanaflynn/stats"
)
func TestQuartile(t *testing.T) {
s1 := []float64{6, 7, 15, 36, 39, 40, 41, 42, 43, 47, 49}
s2 := []float64{7, 15, 36, 39, 40, 41}
for _, c := range []struct {
in []float64
Q1 float64
Q2 float64
Q3 float64
}{
{s1, 15, 40, 43},
{s2, 15, 37.5, 40},
} {
quartiles, err := stats.Quartile(c.in)
if err != nil {
t.Errorf("Should not have returned an error")
}
if quartiles.Q1 != c.Q1 {
t.Errorf("Q1 %v != %v", quartiles.Q1, c.Q1)
}
if quartiles.Q2 != c.Q2 {
t.Errorf("Q2 %v != %v", quartiles.Q2, c.Q2)
}
if quartiles.Q3 != c.Q3 {
t.Errorf("Q3 %v != %v", quartiles.Q3, c.Q3)
}
}
_, err := stats.Quartile([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestInterQuartileRange(t *testing.T) {
s1 := []float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118}
iqr, _ := stats.InterQuartileRange(s1)
if iqr != 10 {
t.Errorf("IQR %v != 10", iqr)
}
_, err := stats.InterQuartileRange([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestMidhinge(t *testing.T) {
s1 := []float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}
mh, _ := stats.Midhinge(s1)
if mh != 7.5 {
t.Errorf("Midhinge %v != 7.5", mh)
}
_, err := stats.Midhinge([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestTrimean(t *testing.T) {
s1 := []float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}
tr, _ := stats.Trimean(s1)
if tr != 7.25 {
t.Errorf("Trimean %v != 7.25", tr)
}
_, err := stats.Trimean([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
stats-0.7.1/ranksum.go 0000664 0000000 0000000 00000012625 14427120666 0014702 0 ustar 00root root 0000000 0000000 package stats
// import "math"
//
// // WilcoxonRankSum tests the null hypothesis that two sets
// // of data are drawn from the same distribution. It does
// // not handle ties between measurements in x and y.
// //
// // Parameters:
// // data1 Float64Data: First set of data points.
// // data2 Float64Data: Second set of data points.
// // Length of both data samples must be equal.
// //
// // Return:
// // statistic float64: The test statistic under the
// // large-sample approximation that the
// // rank sum statistic is normally distributed.
// // pvalue float64: The two-sided p-value of the test
// // err error: Any error from the input data parameters
// //
// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) {
//
// l1 := data1.Len()
// l2 := data2.Len()
//
// if l1 == 0 || l2 == 0 {
// return math.NaN(), math.NaN(), EmptyInputErr
// }
//
// if l1 != l2 {
// return math.NaN(), math.NaN(), SizeErr
// }
//
// alldata := Float64Data{}
// alldata = append(alldata, data1...)
// alldata = append(alldata, data2...)
//
// // ranked :=
//
// return 0.0, 0.0, nil
// }
//
// // x, y = map(np.asarray, (x, y))
// // n1 = len(x)
// // n2 = len(y)
// // alldata = np.concatenate((x, y))
// // ranked = rankdata(alldata)
// // x = ranked[:n1]
// // s = np.sum(x, axis=0)
// // expected = n1 * (n1+n2+1) / 2.0
// // z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
// // prob = 2 * distributions.norm.sf(abs(z))
// //
// // return RanksumsResult(z, prob)
//
// // def rankdata(a, method='average'):
// // """
// // Assign ranks to data, dealing with ties appropriately.
// // Ranks begin at 1. The `method` argument controls how ranks are assigned
// // to equal values. See [1]_ for further discussion of ranking methods.
// // Parameters
// // ----------
// // a : array_like
// // The array of values to be ranked. The array is first flattened.
// // method : str, optional
// // The method used to assign ranks to tied elements.
// // The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
// // 'average':
// // The average of the ranks that would have been assigned to
// // all the tied values is assigned to each value.
// // 'min':
// // The minimum of the ranks that would have been assigned to all
// // the tied values is assigned to each value. (This is also
// // referred to as "competition" ranking.)
// // 'max':
// // The maximum of the ranks that would have been assigned to all
// // the tied values is assigned to each value.
// // 'dense':
// // Like 'min', but the rank of the next highest element is assigned
// // the rank immediately after those assigned to the tied elements.
// // 'ordinal':
// // All values are given a distinct rank, corresponding to the order
// // that the values occur in `a`.
// // The default is 'average'.
// // Returns
// // -------
// // ranks : ndarray
// // An array of length equal to the size of `a`, containing rank
// // scores.
// // References
// // ----------
// // .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
// // Examples
// // --------
// // >>> from scipy.stats import rankdata
// // >>> rankdata([0, 2, 3, 2])
// // array([ 1. , 2.5, 4. , 2.5])
// // """
// //
// // arr = np.ravel(np.asarray(a))
// // algo = 'quicksort'
// // sorter = np.argsort(arr, kind=algo)
// //
// // inv = np.empty(sorter.size, dtype=np.intp)
// // inv[sorter] = np.arange(sorter.size, dtype=np.intp)
// //
// //
// // arr = arr[sorter]
// // obs = np.r_[True, arr[1:] != arr[:-1]]
// // dense = obs.cumsum()[inv]
// //
// //
// // # cumulative counts of each unique value
// // count = np.r_[np.nonzero(obs)[0], len(obs)]
// //
// // # average method
// // return .5 * (count[dense] + count[dense - 1] + 1)
//
// type rankable interface {
// Len() int
// RankEqual(int, int) bool
// }
//
// func StandardRank(d rankable) []float64 {
// r := make([]float64, d.Len())
// var k int
// for i := range r {
// if i == 0 || !d.RankEqual(i, i-1) {
// k = i + 1
// }
// r[i] = float64(k)
// }
// return r
// }
//
// func ModifiedRank(d rankable) []float64 {
// r := make([]float64, d.Len())
// for i := range r {
// k := i + 1
// for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ {
// k = j + 1
// }
// r[i] = float64(k)
// }
// return r
// }
//
// func DenseRank(d rankable) []float64 {
// r := make([]float64, d.Len())
// var k int
// for i := range r {
// if i == 0 || !d.RankEqual(i, i-1) {
// k++
// }
// r[i] = float64(k)
// }
// return r
// }
//
// func OrdinalRank(d rankable) []float64 {
// r := make([]float64, d.Len())
// for i := range r {
// r[i] = float64(i + 1)
// }
// return r
// }
//
// func FractionalRank(d rankable) []float64 {
// r := make([]float64, d.Len())
// for i := 0; i < len(r); {
// var j int
// f := float64(i + 1)
// for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ {
// f += float64(j + 1)
// }
// f /= float64(j - i)
// for ; i < j; i++ {
// r[i] = f
// }
// }
// return r
// }
stats-0.7.1/ranksum_test.go 0000664 0000000 0000000 00000001750 14427120666 0015736 0 ustar 00root root 0000000 0000000 package stats_test
// import (
// "testing"
// )
//
// // >>> y1=[125,115,130,140,140,115,140,125,140,135]
// // >>> y2=[110,122,125,120,140,124,123,137,135,145]
// // >>> ss.wilcoxon(y1, y2)
// // (18.0, 0.5936305914425295)
//
// // func ExampleWilcoxonRankSum() {
// // t, p, err := WilcoxonRankSum([]float64{3.0, 1.0, 0.2}, []float64{3.1, 1.2, 1.2})
// // fmt.Println(t, p, err)
// // // Output: 18.0, 0.5936305914425295, nil
// //
// // }
//
// func TestRanked(t *testing.T) {
//
// var data = []float64{0.1, 3.2, 3.2}
//
// StandardRank(data)
// // show := func(name string, fn func(rankable) []float64) {
// // fmt.Println(name, "Ranking:")
// // r := fn(data)
// // for i, d := range data {
// // fmt.Printf("%4v\n", r[i])
// // }
// // }
// //
// // sort.Sort(data)
// // show("Standard", StandardRank)
// // show("\nModified", ModifiedRank)
// // show("\nDense", DenseRank)
// // show("\nOrdinal", OrdinalRank)
// // show("\nFractional", FractionalRank)
//
// }
stats-0.7.1/regression.go 0000664 0000000 0000000 00000004660 14427120666 0015402 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Series is a container for a series of data
type Series []Coordinate
// Coordinate holds the data in a series
type Coordinate struct {
X, Y float64
}
// LinearRegression finds the least squares linear regression on data series
func LinearRegression(s Series) (regressions Series, err error) {
if len(s) == 0 {
return nil, EmptyInputErr
}
// Placeholder for the math to be done
var sum [5]float64
// Loop over data keeping index in place
i := 0
for ; i < len(s); i++ {
sum[0] += s[i].X
sum[1] += s[i].Y
sum[2] += s[i].X * s[i].X
sum[3] += s[i].X * s[i].Y
sum[4] += s[i].Y * s[i].Y
}
// Find gradient and intercept
f := float64(i)
gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0])
intercept := (sum[1] / f) - (gradient * sum[0] / f)
// Create the new regression series
for j := 0; j < len(s); j++ {
regressions = append(regressions, Coordinate{
X: s[j].X,
Y: s[j].X*gradient + intercept,
})
}
return regressions, nil
}
// ExponentialRegression returns an exponential regression on data series
func ExponentialRegression(s Series) (regressions Series, err error) {
if len(s) == 0 {
return nil, EmptyInputErr
}
var sum [6]float64
for i := 0; i < len(s); i++ {
if s[i].Y < 0 {
return nil, YCoordErr
}
sum[0] += s[i].X
sum[1] += s[i].Y
sum[2] += s[i].X * s[i].X * s[i].Y
sum[3] += s[i].Y * math.Log(s[i].Y)
sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y)
sum[5] += s[i].X * s[i].Y
}
denominator := (sum[1]*sum[2] - sum[5]*sum[5])
a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator)
b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator
for j := 0; j < len(s); j++ {
regressions = append(regressions, Coordinate{
X: s[j].X,
Y: a * math.Exp(b*s[j].X),
})
}
return regressions, nil
}
// LogarithmicRegression returns an logarithmic regression on data series
func LogarithmicRegression(s Series) (regressions Series, err error) {
if len(s) == 0 {
return nil, EmptyInputErr
}
var sum [4]float64
i := 0
for ; i < len(s); i++ {
sum[0] += math.Log(s[i].X)
sum[1] += s[i].Y * math.Log(s[i].X)
sum[2] += s[i].Y
sum[3] += math.Pow(math.Log(s[i].X), 2)
}
f := float64(i)
a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0])
b := (sum[2] - a*sum[0]) / f
for j := 0; j < len(s); j++ {
regressions = append(regressions, Coordinate{
X: s[j].X,
Y: b + a*math.Log(s[j].X),
})
}
return regressions, nil
}
stats-0.7.1/regression_test.go 0000664 0000000 0000000 00000005345 14427120666 0016442 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleLinearRegression() {
data := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
}
r, _ := stats.LinearRegression(data)
fmt.Println(r)
// Output: [{1 2.400000000000001} {2 3.1} {3 3.7999999999999994}]
}
func TestLinearRegression(t *testing.T) {
data := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
{4, 4.3},
{5, 5.3},
}
r, _ := stats.LinearRegression(data)
a := 2.3800000000000026
if !close(r[0].Y, a) {
t.Errorf("%v != %v", r[0].Y, a)
}
a = 3.0800000000000014
if !veryclose(r[1].Y, a) {
t.Errorf("%v != %v", r[1].Y, a)
}
a = 3.7800000000000002
if r[2].Y != a {
t.Errorf("%v != %v", r[2].Y, a)
}
a = 4.479999999999999
if !veryclose(r[3].Y, a) {
t.Errorf("%v != %v", r[3].Y, a)
}
a = 5.179999999999998
if !veryclose(r[4].Y, a) {
t.Errorf("%v != %v", r[4].Y, a)
}
_, err := stats.LinearRegression([]stats.Coordinate{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestExponentialRegression(t *testing.T) {
data := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
{4, 4.3},
{5, 5.3},
}
r, _ := stats.ExponentialRegression(data)
a, _ := stats.Round(r[0].Y, 3)
if a != 2.515 {
t.Errorf("%v != %v", r[0].Y, 2.515)
}
a, _ = stats.Round(r[1].Y, 3)
if a != 3.032 {
t.Errorf("%v != %v", r[1].Y, 3.032)
}
a, _ = stats.Round(r[2].Y, 3)
if a != 3.655 {
t.Errorf("%v != %v", r[2].Y, 3.655)
}
a, _ = stats.Round(r[3].Y, 3)
if a != 4.407 {
t.Errorf("%v != %v", r[3].Y, 4.407)
}
a, _ = stats.Round(r[4].Y, 3)
if a != 5.313 {
t.Errorf("%v != %v", r[4].Y, 5.313)
}
_, err := stats.ExponentialRegression([]stats.Coordinate{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestExponentialRegressionYCoordErr(t *testing.T) {
c := []stats.Coordinate{{1, -5}, {4, 25}, {6, 5}}
_, err := stats.ExponentialRegression(c)
if err != stats.YCoordErr {
t.Errorf(err.Error())
}
}
func TestLogarithmicRegression(t *testing.T) {
data := []stats.Coordinate{
{1, 2.3},
{2, 3.3},
{3, 3.7},
{4, 4.3},
{5, 5.3},
}
r, _ := stats.LogarithmicRegression(data)
a := 2.1520822363811702
if !close(r[0].Y, a) {
t.Errorf("%v != %v", r[0].Y, a)
}
a = 3.3305559222492214
if !veryclose(r[1].Y, a) {
t.Errorf("%v != %v", r[1].Y, a)
}
a = 4.019918836568674
if !veryclose(r[2].Y, a) {
t.Errorf("%v != %v", r[2].Y, a)
}
a = 4.509029608117273
if !veryclose(r[3].Y, a) {
t.Errorf("%v != %v", r[3].Y, a)
}
a = 4.888413396683663
if !veryclose(r[4].Y, a) {
t.Errorf("%v != %v", r[4].Y, a)
}
_, err := stats.LogarithmicRegression([]stats.Coordinate{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
stats-0.7.1/round.go 0000664 0000000 0000000 00000001616 14427120666 0014347 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Round a float to a specific decimal place or precision
func Round(input float64, places int) (rounded float64, err error) {
// If the float is not a number
if math.IsNaN(input) {
return math.NaN(), NaNErr
}
// Find out the actual sign and correct the input for later
sign := 1.0
if input < 0 {
sign = -1
input *= -1
}
// Use the places arg to get the amount of precision wanted
precision := math.Pow(10, float64(places))
// Find the decimal place we are looking to round
digit := input * precision
// Get the actual decimal number as a fraction to be compared
_, decimal := math.Modf(digit)
// If the decimal is less than .5 we round down otherwise up
if decimal >= 0.5 {
rounded = math.Ceil(digit)
} else {
rounded = math.Floor(digit)
}
// Finally we do the math to actually create a rounded number
return rounded / precision * sign, nil
}
stats-0.7.1/round_test.go 0000664 0000000 0000000 00000001460 14427120666 0015403 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"math"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleRound() {
rounded, _ := stats.Round(1.534424, 1)
fmt.Println(rounded)
// Output: 1.5
}
func TestRound(t *testing.T) {
for _, c := range []struct {
number float64
decimals int
result float64
}{
{0.1111, 1, 0.1},
{-0.1111, 2, -0.11},
{5.3253, 3, 5.325},
{5.3258, 3, 5.326},
{5.3253, 0, 5.0},
{5.55, 1, 5.6},
} {
m, err := stats.Round(c.number, c.decimals)
if err != nil {
t.Errorf("Returned an error")
}
if m != c.result {
t.Errorf("%.1f != %.1f", m, c.result)
}
}
_, err := stats.Round(math.NaN(), 2)
if err == nil {
t.Errorf("Round should error on NaN")
}
}
func BenchmarkRound(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Round(0.1111, 1)
}
}
stats-0.7.1/sample.go 0000664 0000000 0000000 00000002623 14427120666 0014500 0 ustar 00root root 0000000 0000000 package stats
import (
"math/rand"
"sort"
)
// Sample returns sample from input with replacement or without
func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {
if input.Len() == 0 {
return nil, EmptyInputErr
}
length := input.Len()
if replacement {
result := Float64Data{}
rand.Seed(unixnano())
// In every step, randomly take the num for
for i := 0; i < takenum; i++ {
idx := rand.Intn(length)
result = append(result, input[idx])
}
return result, nil
} else if !replacement && takenum <= length {
rand.Seed(unixnano())
// Get permutation of number of indexies
perm := rand.Perm(length)
result := Float64Data{}
// Get element of input by permutated index
for _, idx := range perm[0:takenum] {
result = append(result, input[idx])
}
return result, nil
}
return nil, BoundsErr
}
// StableSample like stable sort, it returns samples from input while keeps the order of original data.
func StableSample(input Float64Data, takenum int) ([]float64, error) {
if input.Len() == 0 {
return nil, EmptyInputErr
}
length := input.Len()
if takenum <= length {
rand.Seed(unixnano())
perm := rand.Perm(length)
perm = perm[0:takenum]
// Sort perm before applying
sort.Ints(perm)
result := Float64Data{}
for _, idx := range perm {
result = append(result, input[idx])
}
return result, nil
}
return nil, BoundsErr
}
stats-0.7.1/sample_test.go 0000664 0000000 0000000 00000003116 14427120666 0015535 0 ustar 00root root 0000000 0000000 package stats_test
import (
"testing"
"github.com/montanaflynn/stats"
)
func TestSample(t *testing.T) {
_, err := stats.Sample([]float64{}, 10, false)
if err == nil {
t.Errorf("should return an error")
}
_, err = stats.Sample([]float64{0.1, 0.2}, 10, false)
if err == nil {
t.Errorf("should return an error")
}
}
func TestSampleWithoutReplacement(t *testing.T) {
arr := []float64{0.1, 0.2, 0.3, 0.4, 0.5}
result, _ := stats.Sample(arr, 5, false)
checks := map[float64]bool{}
for _, res := range result {
_, ok := checks[res]
if ok {
t.Errorf("%v already seen", res)
}
checks[res] = true
}
}
func TestSampleWithReplacement(t *testing.T) {
arr := []float64{0.1, 0.2, 0.3, 0.4, 0.5}
numsamples := 100
result, _ := stats.Sample(arr, numsamples, true)
if len(result) != numsamples {
t.Errorf("%v != %v", len(result), numsamples)
}
}
func TestStableSample(t *testing.T) {
_, err := stats.StableSample(stats.Float64Data{}, 10)
if err != stats.EmptyInputErr {
t.Errorf("should return EmptyInputError when sampling an empty data")
}
_, err = stats.StableSample(stats.Float64Data{1.0, 2.0}, 10)
if err != stats.BoundsErr {
t.Errorf("should return BoundsErr when sampling size exceeds the maximum element size of data")
}
arr := []float64{1.0, 3.0, 2.0, -1.0, 5.0}
locations := map[float64]int{
1.0: 0,
3.0: 1,
2.0: 2,
-1.0: 3,
5.0: 4,
}
ret, _ := stats.StableSample(arr, 3)
if len(ret) != 3 {
t.Errorf("returned wrong sample size")
}
for i := 1; i < 3; i++ {
if locations[ret[i]] < locations[ret[i-1]] {
t.Errorf("doesn't keep order")
}
}
}
stats-0.7.1/sigmoid.go 0000664 0000000 0000000 00000000707 14427120666 0014653 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Sigmoid returns the input values in the range of -1 to 1
// along the sigmoid or s-shaped curve, commonly used in
// machine learning while training neural networks as an
// activation function.
func Sigmoid(input Float64Data) ([]float64, error) {
if input.Len() == 0 {
return Float64Data{}, EmptyInput
}
s := make([]float64, len(input))
for i, v := range input {
s[i] = 1 / (1 + math.Exp(-v))
}
return s, nil
}
stats-0.7.1/sigmoid_test.go 0000664 0000000 0000000 00000001440 14427120666 0015705 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleSigmoid() {
s, _ := stats.Sigmoid([]float64{3.0, 1.0, 2.1})
fmt.Println(s)
// Output: [0.9525741268224334 0.7310585786300049 0.8909031788043871]
}
func TestSigmoidEmptyInput(t *testing.T) {
_, err := stats.Sigmoid([]float64{})
if err != stats.EmptyInputErr {
t.Errorf("Should have returned empty input error")
}
}
func TestSigmoid(t *testing.T) {
sm, err := stats.Sigmoid([]float64{-0.54761371, 17.04850603, 4.86054302})
if err != nil {
t.Error(err)
}
a := 0.3664182235138545
if sm[0] != a {
t.Errorf("%v != %v", sm[0], a)
}
a = 0.9999999605608187
if sm[1] != a {
t.Errorf("%v != %v", sm[1], a)
}
a = 0.9923132671908277
if sm[2] != a {
t.Errorf("%v != %v", sm[2], a)
}
}
stats-0.7.1/softmax.go 0000664 0000000 0000000 00000001005 14427120666 0014671 0 ustar 00root root 0000000 0000000 package stats
import "math"
// SoftMax returns the input values in the range of 0 to 1
// with sum of all the probabilities being equal to one. It
// is commonly used in machine learning neural networks.
func SoftMax(input Float64Data) ([]float64, error) {
if input.Len() == 0 {
return Float64Data{}, EmptyInput
}
s := 0.0
c, _ := Max(input)
for _, e := range input {
s += math.Exp(e - c)
}
sm := make([]float64, len(input))
for i, v := range input {
sm[i] = math.Exp(v-c) / s
}
return sm, nil
}
stats-0.7.1/softmax_test.go 0000664 0000000 0000000 00000001417 14427120666 0015737 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleSoftMax() {
sm, _ := stats.SoftMax([]float64{3.0, 1.0, 0.2})
fmt.Println(sm)
// Output: [0.8360188027814407 0.11314284146556013 0.05083835575299916]
}
func TestSoftMaxEmptyInput(t *testing.T) {
_, err := stats.SoftMax([]float64{})
if err != stats.EmptyInputErr {
t.Errorf("Should have returned empty input error")
}
}
func TestSoftMax(t *testing.T) {
sm, err := stats.SoftMax([]float64{3.0, 1.0, 0.2})
if err != nil {
t.Error(err)
}
a := 0.8360188027814407
if sm[0] != a {
t.Errorf("%v != %v", sm[0], a)
}
a = 0.11314284146556013
if sm[1] != a {
t.Errorf("%v != %v", sm[1], a)
}
a = 0.05083835575299916
if sm[2] != a {
t.Errorf("%v != %v", sm[1], a)
}
}
stats-0.7.1/sum.go 0000664 0000000 0000000 00000000417 14427120666 0014022 0 ustar 00root root 0000000 0000000 package stats
import "math"
// Sum adds all the numbers of a slice together
func Sum(input Float64Data) (sum float64, err error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
// Add em up
for _, n := range input {
sum += n
}
return sum, nil
}
stats-0.7.1/sum_test.go 0000664 0000000 0000000 00000001717 14427120666 0015065 0 ustar 00root root 0000000 0000000 package stats_test
import (
"fmt"
"reflect"
"testing"
"github.com/montanaflynn/stats"
)
func ExampleSum() {
d := []float64{1.1, 2.2, 3.3}
a, _ := stats.Sum(d)
fmt.Println(a)
// Output: 6.6
}
func TestSum(t *testing.T) {
for _, c := range []struct {
in []float64
out float64
}{
{[]float64{1, 2, 3}, 6},
{[]float64{1.0, 1.1, 1.2, 2.2}, 5.5},
{[]float64{1, -1, 2, -3}, -1},
} {
got, err := stats.Sum(c.in)
if err != nil {
t.Errorf("Returned an error")
}
if !reflect.DeepEqual(c.out, got) {
t.Errorf("Sum(%.1f) => %.1f != %.1f", c.in, got, c.out)
}
}
_, err := stats.Sum([]float64{})
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func BenchmarkSumSmallFloatSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = stats.Sum(makeFloatSlice(5))
}
}
func BenchmarkSumLargeFloatSlice(b *testing.B) {
lf := makeFloatSlice(100000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = stats.Sum(lf)
}
}
stats-0.7.1/test_utils_test.go 0000664 0000000 0000000 00000001254 14427120666 0016454 0 ustar 00root root 0000000 0000000 package stats_test
// Approximate float comparisons
// Taken from the standard library's math/all_test.go
func tolerance(a, b, e float64) bool {
// Multiplying by e here can underflow denormal values to zero.
// Check a==b so that at least if a and b are small and identical
// we say they match.
if a == b {
return true
}
d := a - b
if d < 0 {
d = -d
}
// note: b is correct (expected) value, a is actual value.
// make error tolerance a fraction of b, not a.
if b != 0 {
e = e * b
if e < 0 {
e = -e
}
}
return d < e
}
func close(a, b float64) bool { return tolerance(a, b, 1e-14) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
stats-0.7.1/util.go 0000664 0000000 0000000 00000001632 14427120666 0014173 0 ustar 00root root 0000000 0000000 package stats
import (
"sort"
"time"
)
// float64ToInt rounds a float64 to an int
func float64ToInt(input float64) (output int) {
r, _ := Round(input, 0)
return int(r)
}
// unixnano returns nanoseconds from UTC epoch
func unixnano() int64 {
return time.Now().UTC().UnixNano()
}
// copyslice copies a slice of float64s
func copyslice(input Float64Data) Float64Data {
s := make(Float64Data, input.Len())
copy(s, input)
return s
}
// sortedCopy returns a sorted copy of float64s
func sortedCopy(input Float64Data) (copy Float64Data) {
copy = copyslice(input)
sort.Float64s(copy)
return
}
// sortedCopyDif returns a sorted copy of float64s
// only if the original data isn't sorted.
// Only use this if returned slice won't be manipulated!
func sortedCopyDif(input Float64Data) (copy Float64Data) {
if sort.Float64sAreSorted(input) {
return input
}
copy = copyslice(input)
sort.Float64s(copy)
return
}
stats-0.7.1/util_test.go 0000664 0000000 0000000 00000000456 14427120666 0015235 0 ustar 00root root 0000000 0000000 package stats
import (
"testing"
)
func TestFloat64ToInt(t *testing.T) {
m := float64ToInt(234.0234)
if m != 234 {
t.Errorf("%x != %x", m, 234)
}
m = float64ToInt(-234.0234)
if m != -234 {
t.Errorf("%x != %x", m, -234)
}
m = float64ToInt(1)
if m != 1 {
t.Errorf("%x != %x", m, 1)
}
}
stats-0.7.1/variance.go 0000664 0000000 0000000 00000004370 14427120666 0015010 0 ustar 00root root 0000000 0000000 package stats
import "math"
// _variance finds the variance for both population and sample data
func _variance(input Float64Data, sample int) (variance float64, err error) {
if input.Len() == 0 {
return math.NaN(), EmptyInputErr
}
// Sum the square of the mean subtracted from each number
m, _ := Mean(input)
for _, n := range input {
variance += (n - m) * (n - m)
}
// When getting the mean of the squared differences
// "sample" will allow us to know if it's a sample
// or population and wether to subtract by one or not
return variance / float64((input.Len() - (1 * sample))), nil
}
// Variance the amount of variation in the dataset
func Variance(input Float64Data) (sdev float64, err error) {
return PopulationVariance(input)
}
// PopulationVariance finds the amount of variance within a population
func PopulationVariance(input Float64Data) (pvar float64, err error) {
v, err := _variance(input, 0)
if err != nil {
return math.NaN(), err
}
return v, nil
}
// SampleVariance finds the amount of variance within a sample
func SampleVariance(input Float64Data) (svar float64, err error) {
v, err := _variance(input, 1)
if err != nil {
return math.NaN(), err
}
return v, nil
}
// Covariance is a measure of how much two sets of data change
func Covariance(data1, data2 Float64Data) (float64, error) {
l1 := data1.Len()
l2 := data2.Len()
if l1 == 0 || l2 == 0 {
return math.NaN(), EmptyInputErr
}
if l1 != l2 {
return math.NaN(), SizeErr
}
m1, _ := Mean(data1)
m2, _ := Mean(data2)
// Calculate sum of squares
var ss float64
for i := 0; i < l1; i++ {
delta1 := (data1.Get(i) - m1)
delta2 := (data2.Get(i) - m2)
ss += (delta1*delta2 - ss) / float64(i+1)
}
return ss * float64(l1) / float64(l1-1), nil
}
// CovariancePopulation computes covariance for entire population between two variables.
func CovariancePopulation(data1, data2 Float64Data) (float64, error) {
l1 := data1.Len()
l2 := data2.Len()
if l1 == 0 || l2 == 0 {
return math.NaN(), EmptyInputErr
}
if l1 != l2 {
return math.NaN(), SizeErr
}
m1, _ := Mean(data1)
m2, _ := Mean(data2)
var s float64
for i := 0; i < l1; i++ {
delta1 := (data1.Get(i) - m1)
delta2 := (data2.Get(i) - m2)
s += delta1 * delta2
}
return s / float64(l1), nil
}
stats-0.7.1/variance_test.go 0000664 0000000 0000000 00000004277 14427120666 0016055 0 ustar 00root root 0000000 0000000 package stats_test
import (
"math"
"testing"
"github.com/montanaflynn/stats"
)
func TestVariance(t *testing.T) {
_, err := stats.Variance([]float64{1, 2, 3})
if err != nil {
t.Errorf("Returned an error")
}
}
func TestPopulationVariance(t *testing.T) {
e, err := stats.PopulationVariance([]float64{})
if !math.IsNaN(e) {
t.Errorf("%.1f != %.1f", e, math.NaN())
}
if err != stats.EmptyInputErr {
t.Errorf("%v != %v", err, stats.EmptyInputErr)
}
pv, _ := stats.PopulationVariance([]float64{1, 2, 3})
a, err := stats.Round(pv, 1)
if err != nil {
t.Errorf("Returned an error")
}
if a != 0.7 {
t.Errorf("%.1f != %.1f", a, 0.7)
}
}
func TestSampleVariance(t *testing.T) {
m, err := stats.SampleVariance([]float64{})
if !math.IsNaN(m) {
t.Errorf("%.1f != %.1f", m, math.NaN())
}
if err != stats.EmptyInputErr {
t.Errorf("%v != %v", err, stats.EmptyInputErr)
}
m, _ = stats.SampleVariance([]float64{1, 2, 3})
if m != 1.0 {
t.Errorf("%.1f != %.1f", m, 1.0)
}
}
func TestCovariance(t *testing.T) {
s1 := []float64{1, 2, 3, 4, 5}
s2 := []float64{10, -51.2, 8}
s3 := []float64{1, 2, 3, 5, 6}
s4 := []float64{}
_, err := stats.Covariance(s1, s2)
if err == nil {
t.Errorf("Mismatched slice lengths should have returned an error")
}
a, err := stats.Covariance(s1, s3)
if err != nil {
t.Errorf("Should not have returned an error")
}
if a != 3.2499999999999996 {
t.Errorf("Covariance %v != %v", a, 3.2499999999999996)
}
_, err = stats.Covariance(s1, s4)
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}
func TestCovariancePopulation(t *testing.T) {
s1 := []float64{1, 2, 3.5, 3.7, 8, 12}
s2 := []float64{10, -51.2, 8}
s3 := []float64{0.5, 1, 2.1, 3.4, 3.4, 4}
s4 := []float64{}
_, err := stats.CovariancePopulation(s1, s2)
if err == nil {
t.Errorf("Mismatched slice lengths should have returned an error")
}
a, err := stats.CovariancePopulation(s1, s3)
if err != nil {
t.Errorf("Should not have returned an error")
}
if a != 4.191666666666666 {
t.Errorf("CovariancePopulation %v != %v", a, 4.191666666666666)
}
_, err = stats.CovariancePopulation(s1, s4)
if err == nil {
t.Errorf("Empty slice should have returned an error")
}
}