pax_global_header00006660000000000000000000000064135563431670014527gustar00rootroot0000000000000052 comment=dfb0946921035f42d7ddd2baf68e6b02efd388da golang-github-oklog-ulid-2.0.2+ds/000077500000000000000000000000001355634316700167255ustar00rootroot00000000000000golang-github-oklog-ulid-2.0.2+ds/.gitignore000066400000000000000000000005111355634316700207120ustar00rootroot00000000000000#### joe made this: http://goel.io/joe #####=== Go ===##### # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof golang-github-oklog-ulid-2.0.2+ds/.travis.yml000066400000000000000000000007731355634316700210450ustar00rootroot00000000000000language: go sudo: false env: GO111MODULE=on go: - 1.10.x - 1.11.x - 1.12.x install: - go get -u -v golang.org/x/lint/golint - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls - go get -d -t -v ./... - go build -v ./... script: - go vet ./... - $HOME/gopath/bin/golint . - go test -v -race ./... - go test -v -covermode=count -coverprofile=cov.out - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true golang-github-oklog-ulid-2.0.2+ds/AUTHORS.md000066400000000000000000000000731355634316700203740ustar00rootroot00000000000000- Peter Bourgon (@peterbourgon) - Tomás Senart (@tsenart) golang-github-oklog-ulid-2.0.2+ds/CHANGELOG.md000066400000000000000000000012671355634316700205440ustar00rootroot00000000000000## 1.3.1 / 2018-10-02 * Use underlying entropy source for random increments in Monotonic (#32) ## 1.3.0 / 2018-09-29 * Monotonic entropy support (#31) ## 1.2.0 / 2018-09-09 * Add a function to convert Unix time in milliseconds back to time.Time (#30) ## 1.1.0 / 2018-08-15 * Ensure random part is always read from the entropy reader in full (#28) ## 1.0.0 / 2018-07-29 * Add ParseStrict and MustParseStrict functions (#26) * Enforce overflow checking when parsing (#20) ## 0.3.0 / 2017-01-03 * Implement ULID.Compare method ## 0.2.0 / 2016-12-13 * Remove year 2262 Timestamp bug. (#1) * Gracefully handle invalid encodings when parsing. ## 0.1.0 / 2016-12-06 * First ULID release golang-github-oklog-ulid-2.0.2+ds/CONTRIBUTING.md000066400000000000000000000014021355634316700211530ustar00rootroot00000000000000# Contributing We use GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) one or more of the maintainers (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. * If you plan to do something more involved, first propose your ideas in a Github issue. This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). golang-github-oklog-ulid-2.0.2+ds/Gopkg.lock000066400000000000000000000006611355634316700206510ustar00rootroot00000000000000# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. [[projects]] branch = "master" name = "github.com/pborman/getopt" packages = ["v2"] revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" [solve-meta] analyzer-name = "dep" analyzer-version = 1 inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" solver-name = "gps-cdcl" solver-version = 1 golang-github-oklog-ulid-2.0.2+ds/Gopkg.toml000066400000000000000000000011401355634316700206650ustar00rootroot00000000000000 # Gopkg.toml example # # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md # for detailed Gopkg.toml documentation. # # required = ["github.com/user/thing/cmd/thing"] # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] # # [[constraint]] # name = "github.com/user/project" # version = "1.0.0" # # [[constraint]] # name = "github.com/user/project2" # branch = "dev" # source = "github.com/myfork/project2" # # [[override]] # name = "github.com/x/y" # version = "2.4.0" [[constraint]] branch = "master" name = "github.com/pborman/getopt" golang-github-oklog-ulid-2.0.2+ds/LICENSE000066400000000000000000000261351355634316700177410ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-github-oklog-ulid-2.0.2+ds/README.md000066400000000000000000000167311355634316700202140ustar00rootroot00000000000000# Universally Unique Lexicographically Sortable Identifier ![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) [![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) [![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) [![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) [![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. ## Background A GUID/UUID can be suboptimal for many use-cases because: - It isn't the most character efficient way of encoding 128 bits - UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address - UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures - UUID v4 provides no other information than randomness which can cause fragmentation in many data structures A ULID however: - Is compatible with UUID/GUID's - 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) - Lexicographically sortable - Canonically encoded as a 26 character string, as opposed to the 36 character UUID - Uses Crockford's base32 for better efficiency and readability (5 bits per character) - Case insensitive - No special characters (URL safe) - Monotonic sort order (correctly detects and handles the same millisecond) ## Install ```shell go get github.com/oklog/ulid ``` ## Usage An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. This design allows for greater flexibility in choosing your trade-offs. Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. ```go func ExampleULID() { t := time.Unix(1000000, 0) entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 } ``` ## Commandline tool This repo also provides a tool to generate and parse ULIDs at the command line. Installation: ```shell go get github.com/oklog/ulid/cmd/ulid ``` Usage: ```shell Usage: ulid [-hlqz] [-f ] [parameters ...] -f, --format= when parsing, show times in this format: default, rfc3339, unix, ms -h, --help print this help text -l, --local when parsing, show local time instead of UTC -q, --quick when generating, use non-crypto-grade entropy -z, --zero when generating, fix entropy to all-zeroes ``` Examples: ```shell $ ulid 01D78XYFJ1PRM1WPBCBT3VHMNV $ ulid -z 01D78XZ44G0000000000000000 $ ulid 01D78XZ44G0000000000000000 Sun Mar 31 03:51:23.536 UTC 2019 $ ulid --format=rfc3339 --local 01D78XZ44G0000000000000000 2019-03-30T20:51:23.536PDT ``` ## Specification Below is the current specification of ULID as implemented in this repository. ### Components **Timestamp** - 48 bits - UNIX-time in milliseconds - Won't run out of space till the year 10895 AD **Entropy** - 80 bits - User defined entropy source. - Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) ### Encoding [Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. ``` 0123456789ABCDEFGHJKMNPQRSTVWXYZ ``` ### Binary Layout and Byte Order The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). ``` 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_time_high | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 16_bit_uint_time_low | 16_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ``` ### String Representation ``` 01AN4Z07BY 79KA1307SR9X4MV3 |----------| |----------------| Timestamp Entropy 10 chars 16 chars 48bits 80bits base32 base32 ``` ## Test ```shell go test ./... ``` ## Benchmarks On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 ``` BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op ``` ## Prior Art - [alizain/ulid](https://github.com/alizain/ulid) - [RobThree/NUlid](https://github.com/RobThree/NUlid) - [imdario/go-ulid](https://github.com/imdario/go-ulid) golang-github-oklog-ulid-2.0.2+ds/cmd/000077500000000000000000000000001355634316700174705ustar00rootroot00000000000000golang-github-oklog-ulid-2.0.2+ds/cmd/ulid/000077500000000000000000000000001355634316700204255ustar00rootroot00000000000000golang-github-oklog-ulid-2.0.2+ds/cmd/ulid/main.go000066400000000000000000000045641355634316700217110ustar00rootroot00000000000000package main import ( cryptorand "crypto/rand" "fmt" mathrand "math/rand" "os" "strings" "time" "github.com/oklog/ulid" getopt "github.com/pborman/getopt/v2" ) const ( defaultms = "Mon Jan 02 15:04:05.999 MST 2006" rfc3339ms = "2006-01-02T15:04:05.999MST" ) func main() { // Completely obnoxious. getopt.HelpColumn = 50 getopt.DisplayWidth = 140 fs := getopt.New() var ( format = fs.StringLong("format", 'f', "default", "when parsing, show times in this format: default, rfc3339, unix, ms", "") local = fs.BoolLong("local", 'l', "when parsing, show local time instead of UTC") quick = fs.BoolLong("quick", 'q', "when generating, use non-crypto-grade entropy") zero = fs.BoolLong("zero", 'z', "when generating, fix entropy to all-zeroes") help = fs.BoolLong("help", 'h', "print this help text") ) if err := fs.Getopt(os.Args, nil); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if *help { fs.PrintUsage(os.Stderr) os.Exit(0) } var formatFunc func(time.Time) string switch strings.ToLower(*format) { case "default": formatFunc = func(t time.Time) string { return t.Format(defaultms) } case "rfc3339": formatFunc = func(t time.Time) string { return t.Format(rfc3339ms) } case "unix": formatFunc = func(t time.Time) string { return fmt.Sprint(t.Unix()) } case "ms": formatFunc = func(t time.Time) string { return fmt.Sprint(t.UnixNano() / 1e6) } default: fmt.Fprintf(os.Stderr, "invalid --format %s\n", *format) os.Exit(1) } switch args := fs.Args(); len(args) { case 0: generate(*quick, *zero) default: parse(args[0], *local, formatFunc) } } func generate(quick, zero bool) { entropy := cryptorand.Reader if quick { seed := time.Now().UnixNano() source := mathrand.NewSource(seed) entropy = mathrand.New(source) } if zero { entropy = zeroReader{} } id, err := ulid.New(ulid.Timestamp(time.Now()), entropy) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Fprintf(os.Stdout, "%s\n", id) } func parse(s string, local bool, f func(time.Time) string) { id, err := ulid.Parse(s) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } t := ulid.Time(id.Time()) if !local { t = t.UTC() } fmt.Fprintf(os.Stderr, "%s\n", f(t)) } type zeroReader struct{} func (zeroReader) Read(p []byte) (int, error) { for i := range p { p[i] = 0 } return len(p), nil } golang-github-oklog-ulid-2.0.2+ds/go.mod000066400000000000000000000001461355634316700200340ustar00rootroot00000000000000module github.com/oklog/ulid/v2 require github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 golang-github-oklog-ulid-2.0.2+ds/go.sum000066400000000000000000000003411355634316700200560ustar00rootroot00000000000000github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= golang-github-oklog-ulid-2.0.2+ds/ulid.go000066400000000000000000000467431355634316700202270ustar00rootroot00000000000000// Copyright 2016 The Oklog Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ulid import ( "bufio" "bytes" "database/sql/driver" "encoding/binary" "errors" "io" "math" "math/bits" "math/rand" "time" ) /* An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier The components are encoded as 16 octets. Each component is encoded with the MSB first (network byte order). 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_time_high | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 16_bit_uint_time_low | 16_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 32_bit_uint_random | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ type ULID [16]byte var ( // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong // data size. ErrDataSize = errors.New("ulid: bad data size when unmarshaling") // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with // invalid Base32 encodings. ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient // size. ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") // ErrBigTime is returned when constructing an ULID with a time that is larger // than MaxTime. ErrBigTime = errors.New("ulid: time too big") // ErrOverflow is returned when unmarshaling a ULID whose first character is // larger than 7, thereby exceeding the valid bit depth of 128. ErrOverflow = errors.New("ulid: overflow when unmarshaling") // ErrMonotonicOverflow is returned by a Monotonic entropy source when // incrementing the previous ULID's entropy bytes would result in overflow. ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") // ErrScanValue is returned when the value passed to scan cannot be unmarshaled // into the ULID. ErrScanValue = errors.New("ulid: source value must be a string or byte slice") ) // MonotonicReader is an interface that should yield monotonically increasing // entropy into the provided slice for all calls with the same ms parameter. If // a MonotonicReader is provided to the New constructor, its MonotonicRead // method will be used instead of Read. type MonotonicReader interface { io.Reader MonotonicRead(ms uint64, p []byte) error } // New returns an ULID with the given Unix milliseconds timestamp and an // optional entropy source. Use the Timestamp function to convert // a time.Time to Unix milliseconds. // // ErrBigTime is returned when passing a timestamp bigger than MaxTime. // Reading from the entropy source may also return an error. // // Safety for concurrent use is only dependent on the safety of the // entropy source. func New(ms uint64, entropy io.Reader) (id ULID, err error) { if err = id.SetTime(ms); err != nil { return id, err } switch e := entropy.(type) { case nil: return id, err case MonotonicReader: err = e.MonotonicRead(ms, id[6:]) default: _, err = io.ReadFull(e, id[6:]) } return id, err } // MustNew is a convenience function equivalent to New that panics on failure // instead of returning an error. func MustNew(ms uint64, entropy io.Reader) ULID { id, err := New(ms, entropy) if err != nil { panic(err) } return id } // Parse parses an encoded ULID, returning an error in case of failure. // // ErrDataSize is returned if the len(ulid) is different from an encoded // ULID's length. Invalid encodings produce undefined ULIDs. For a version that // returns an error instead, see ParseStrict. func Parse(ulid string) (id ULID, err error) { return id, parse([]byte(ulid), false, &id) } // ParseStrict parses an encoded ULID, returning an error in case of failure. // // It is like Parse, but additionally validates that the parsed ULID consists // only of valid base32 characters. It is slightly slower than Parse. // // ErrDataSize is returned if the len(ulid) is different from an encoded // ULID's length. Invalid encodings return ErrInvalidCharacters. func ParseStrict(ulid string) (id ULID, err error) { return id, parse([]byte(ulid), true, &id) } func parse(v []byte, strict bool, id *ULID) error { // Check if a base32 encoded ULID is the right length. if len(v) != EncodedSize { return ErrDataSize } // Check if all the characters in a base32 encoded ULID are part of the // expected base32 character set. if strict && (dec[v[0]] == 0xFF || dec[v[1]] == 0xFF || dec[v[2]] == 0xFF || dec[v[3]] == 0xFF || dec[v[4]] == 0xFF || dec[v[5]] == 0xFF || dec[v[6]] == 0xFF || dec[v[7]] == 0xFF || dec[v[8]] == 0xFF || dec[v[9]] == 0xFF || dec[v[10]] == 0xFF || dec[v[11]] == 0xFF || dec[v[12]] == 0xFF || dec[v[13]] == 0xFF || dec[v[14]] == 0xFF || dec[v[15]] == 0xFF || dec[v[16]] == 0xFF || dec[v[17]] == 0xFF || dec[v[18]] == 0xFF || dec[v[19]] == 0xFF || dec[v[20]] == 0xFF || dec[v[21]] == 0xFF || dec[v[22]] == 0xFF || dec[v[23]] == 0xFF || dec[v[24]] == 0xFF || dec[v[25]] == 0xFF) { return ErrInvalidCharacters } // Check if the first character in a base32 encoded ULID will overflow. This // happens because the base32 representation encodes 130 bits, while the // ULID is only 128 bits. // // See https://github.com/oklog/ulid/issues/9 for details. if v[0] > '7' { return ErrOverflow } // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) // to decode a base32 ULID. // 6 bytes timestamp (48 bits) (*id)[0] = (dec[v[0]] << 5) | dec[v[1]] (*id)[1] = (dec[v[2]] << 3) | (dec[v[3]] >> 2) (*id)[2] = (dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4) (*id)[3] = (dec[v[5]] << 4) | (dec[v[6]] >> 1) (*id)[4] = (dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3) (*id)[5] = (dec[v[8]] << 5) | dec[v[9]] // 10 bytes of entropy (80 bits) (*id)[6] = (dec[v[10]] << 3) | (dec[v[11]] >> 2) (*id)[7] = (dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4) (*id)[8] = (dec[v[13]] << 4) | (dec[v[14]] >> 1) (*id)[9] = (dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3) (*id)[10] = (dec[v[16]] << 5) | dec[v[17]] (*id)[11] = (dec[v[18]] << 3) | dec[v[19]]>>2 (*id)[12] = (dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4) (*id)[13] = (dec[v[21]] << 4) | (dec[v[22]] >> 1) (*id)[14] = (dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3) (*id)[15] = (dec[v[24]] << 5) | dec[v[25]] return nil } // MustParse is a convenience function equivalent to Parse that panics on failure // instead of returning an error. func MustParse(ulid string) ULID { id, err := Parse(ulid) if err != nil { panic(err) } return id } // MustParseStrict is a convenience function equivalent to ParseStrict that // panics on failure instead of returning an error. func MustParseStrict(ulid string) ULID { id, err := ParseStrict(ulid) if err != nil { panic(err) } return id } // String returns a lexicographically sortable string encoded ULID // (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 // Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy func (id ULID) String() string { ulid := make([]byte, EncodedSize) _ = id.MarshalTextTo(ulid) return string(ulid) } // MarshalBinary implements the encoding.BinaryMarshaler interface by // returning the ULID as a byte slice. func (id ULID) MarshalBinary() ([]byte, error) { ulid := make([]byte, len(id)) return ulid, id.MarshalBinaryTo(ulid) } // MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. // ErrBufferSize is returned when the len(dst) != 16. func (id ULID) MarshalBinaryTo(dst []byte) error { if len(dst) != len(id) { return ErrBufferSize } copy(dst, id[:]) return nil } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by // copying the passed data and converting it to an ULID. ErrDataSize is // returned if the data length is different from ULID length. func (id *ULID) UnmarshalBinary(data []byte) error { if len(data) != len(*id) { return ErrDataSize } copy((*id)[:], data) return nil } // Encoding is the base 32 encoding alphabet used in ULID strings. const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" // MarshalText implements the encoding.TextMarshaler interface by // returning the string encoded ULID. func (id ULID) MarshalText() ([]byte, error) { ulid := make([]byte, EncodedSize) return ulid, id.MarshalTextTo(ulid) } // MarshalTextTo writes the ULID as a string to the given buffer. // ErrBufferSize is returned when the len(dst) != 26. func (id ULID) MarshalTextTo(dst []byte) error { // Optimized unrolled loop ahead. // From https://github.com/RobThree/NUlid if len(dst) != EncodedSize { return ErrBufferSize } // 10 byte timestamp dst[0] = Encoding[(id[0]&224)>>5] dst[1] = Encoding[id[0]&31] dst[2] = Encoding[(id[1]&248)>>3] dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] dst[4] = Encoding[(id[2]&62)>>1] dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] dst[7] = Encoding[(id[4]&124)>>2] dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] dst[9] = Encoding[id[5]&31] // 16 bytes of entropy dst[10] = Encoding[(id[6]&248)>>3] dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] dst[12] = Encoding[(id[7]&62)>>1] dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] dst[15] = Encoding[(id[9]&124)>>2] dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] dst[17] = Encoding[id[10]&31] dst[18] = Encoding[(id[11]&248)>>3] dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] dst[20] = Encoding[(id[12]&62)>>1] dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] dst[23] = Encoding[(id[14]&124)>>2] dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] dst[25] = Encoding[id[15]&31] return nil } // Byte to index table for O(1) lookups when unmarshaling. // We use 0xFF as sentinel value for invalid indexes. var dec = [...]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, } // EncodedSize is the length of a text encoded ULID. const EncodedSize = 26 // UnmarshalText implements the encoding.TextUnmarshaler interface by // parsing the data as string encoded ULID. // // ErrDataSize is returned if the len(v) is different from an encoded // ULID's length. Invalid encodings produce undefined ULIDs. func (id *ULID) UnmarshalText(v []byte) error { return parse(v, false, id) } // Time returns the Unix time in milliseconds encoded in the ULID. // Use the top level Time function to convert the returned value to // a time.Time. func (id ULID) Time() uint64 { return uint64(id[5]) | uint64(id[4])<<8 | uint64(id[3])<<16 | uint64(id[2])<<24 | uint64(id[1])<<32 | uint64(id[0])<<40 } // maxTime is the maximum Unix time in milliseconds that can be // represented in an ULID. var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() // MaxTime returns the maximum Unix time in milliseconds that // can be encoded in an ULID. func MaxTime() uint64 { return maxTime } // Now is a convenience function that returns the current // UTC time in Unix milliseconds. Equivalent to: // Timestamp(time.Now().UTC()) func Now() uint64 { return Timestamp(time.Now().UTC()) } // Timestamp converts a time.Time to Unix milliseconds. // // Because of the way ULID stores time, times from the year // 10889 produces undefined results. func Timestamp(t time.Time) uint64 { return uint64(t.Unix())*1000 + uint64(t.Nanosecond()/int(time.Millisecond)) } // Time converts Unix milliseconds in the format // returned by the Timestamp function to a time.Time. func Time(ms uint64) time.Time { s := int64(ms / 1e3) ns := int64((ms % 1e3) * 1e6) return time.Unix(s, ns) } // SetTime sets the time component of the ULID to the given Unix time // in milliseconds. func (id *ULID) SetTime(ms uint64) error { if ms > maxTime { return ErrBigTime } (*id)[0] = byte(ms >> 40) (*id)[1] = byte(ms >> 32) (*id)[2] = byte(ms >> 24) (*id)[3] = byte(ms >> 16) (*id)[4] = byte(ms >> 8) (*id)[5] = byte(ms) return nil } // Entropy returns the entropy from the ULID. func (id ULID) Entropy() []byte { e := make([]byte, 10) copy(e, id[6:]) return e } // SetEntropy sets the ULID entropy to the passed byte slice. // ErrDataSize is returned if len(e) != 10. func (id *ULID) SetEntropy(e []byte) error { if len(e) != 10 { return ErrDataSize } copy((*id)[6:], e) return nil } // Compare returns an integer comparing id and other lexicographically. // The result will be 0 if id==other, -1 if id < other, and +1 if id > other. func (id ULID) Compare(other ULID) int { return bytes.Compare(id[:], other[:]) } // Scan implements the sql.Scanner interface. It supports scanning // a string or byte slice. func (id *ULID) Scan(src interface{}) error { switch x := src.(type) { case nil: return nil case string: return id.UnmarshalText([]byte(x)) case []byte: return id.UnmarshalBinary(x) } return ErrScanValue } // Value implements the sql/driver.Valuer interface. This returns the value // represented as a byte slice. If instead a string is desirable, a wrapper // type can be created that calls String(). // // // stringValuer wraps a ULID as a string-based driver.Valuer. // type stringValuer ULID // // func (id stringValuer) Value() (driver.Value, error) { // return ULID(id).String(), nil // } // // // Example usage. // db.Exec("...", stringValuer(id)) func (id ULID) Value() (driver.Value, error) { return id.MarshalBinary() } // Monotonic returns an entropy source that is guaranteed to yield // strictly increasing entropy bytes for the same ULID timestamp. // On conflicts, the previous ULID entropy is incremented with a // random number between 1 and `inc` (inclusive). // // The provided entropy source must actually yield random bytes or else // monotonic reads are not guaranteed to terminate, since there isn't // enough randomness to compute an increment number. // // When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. // The lower the value of `inc`, the easier the next ULID within the // same millisecond is to guess. If your code depends on ULIDs having // secure entropy bytes, then don't go under this default unless you know // what you're doing. // // The returned type isn't safe for concurrent use. func Monotonic(entropy io.Reader, inc uint64) *MonotonicEntropy { m := MonotonicEntropy{ Reader: bufio.NewReader(entropy), inc: inc, } if m.inc == 0 { m.inc = math.MaxUint32 } if rng, ok := entropy.(*rand.Rand); ok { m.rng = rng } return &m } // MonotonicEntropy is an opaque type that provides monotonic entropy. type MonotonicEntropy struct { io.Reader ms uint64 inc uint64 entropy uint80 rand [8]byte rng *rand.Rand } // MonotonicRead implements the MonotonicReader interface. func (m *MonotonicEntropy) MonotonicRead(ms uint64, entropy []byte) (err error) { if !m.entropy.IsZero() && m.ms == ms { err = m.increment() m.entropy.AppendTo(entropy) } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { m.ms = ms m.entropy.SetBytes(entropy) } return err } // increment the previous entropy number with a random number // of up to m.inc (inclusive). func (m *MonotonicEntropy) increment() error { if inc, err := m.random(); err != nil { return err } else if m.entropy.Add(inc) { return ErrMonotonicOverflow } return nil } // random returns a uniform random value in [1, m.inc), reading entropy // from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. // Adapted from: https://golang.org/pkg/crypto/rand/#Int func (m *MonotonicEntropy) random() (inc uint64, err error) { if m.inc <= 1 { return 1, nil } // Fast path for using a underlying rand.Rand directly. if m.rng != nil { // Range: [1, m.inc) return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil } // bitLen is the maximum bit length needed to encode a value < m.inc. bitLen := bits.Len64(m.inc) // byteLen is the maximum byte length needed to encode a value < m.inc. byteLen := uint(bitLen+7) / 8 // msbitLen is the number of bits in the most significant byte of m.inc-1. msbitLen := uint(bitLen % 8) if msbitLen == 0 { msbitLen = 8 } for inc == 0 || inc >= m.inc { if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { return 0, err } // Clear bits in the first byte to increase the probability // that the candidate is < m.inc. m.rand[0] &= uint8(int(1< t2 && s1 > s2 && ord == +1) || (t1 < t2 && s1 < s2 && ord == -1) } top := ulid.MustNew(ulid.MaxTime(), nil) for i := 0; i < 10; i++ { // test upper boundary state space next := ulid.MustNew(top.Time()-1, nil) if !prop(top, next) { t.Fatalf("bad lexicographical order: (%v, %q) > (%v, %q) == false", top.Time(), top, next.Time(), next, ) } top = next } if err := quick.Check(prop, &quick.Config{MaxCount: 1E6}); err != nil { t.Fatal(err) } } func TestCaseInsensitivity(t *testing.T) { t.Parallel() upper := func(id ulid.ULID) (out ulid.ULID) { return ulid.MustParse(strings.ToUpper(id.String())) } lower := func(id ulid.ULID) (out ulid.ULID) { return ulid.MustParse(strings.ToLower(id.String())) } err := quick.CheckEqual(upper, lower, nil) if err != nil { t.Fatal(err) } } func TestParseRobustness(t *testing.T) { t.Parallel() cases := [][]byte{ {0x1, 0xc0, 0x73, 0x62, 0x4a, 0xaf, 0x39, 0x78, 0x51, 0x4e, 0xf8, 0x44, 0x3b, 0xb2, 0xa8, 0x59, 0xc7, 0x5f, 0xc3, 0xcc, 0x6a, 0xf2, 0x6d, 0x5a, 0xaa, 0x20}, } for _, tc := range cases { if _, err := ulid.Parse(string(tc)); err != nil { t.Error(err) } } prop := func(s [26]byte) (ok bool) { defer func() { if err := recover(); err != nil { t.Error(err) ok = false } }() // quick.Check doesn't constrain input, // so we need to do so artificially. if s[0] > '7' { s[0] %= '7' } var err error if _, err = ulid.Parse(string(s[:])); err != nil { t.Error(err) } return err == nil } err := quick.Check(prop, &quick.Config{MaxCount: 1E4}) if err != nil { t.Fatal(err) } } func TestNow(t *testing.T) { t.Parallel() before := ulid.Now() after := ulid.Timestamp(time.Now().UTC().Add(time.Millisecond)) if before >= after { t.Fatalf("clock went mad: before %v, after %v", before, after) } } func TestTimestamp(t *testing.T) { t.Parallel() tm := time.Unix(1, 1000) // will be truncated if got, want := ulid.Timestamp(tm), uint64(1000); got != want { t.Errorf("for %v, got %v, want %v", tm, got, want) } mt := ulid.MaxTime() dt := time.Unix(int64(mt/1000), int64((mt%1000)*1000000)).Truncate(time.Millisecond) ts := ulid.Timestamp(dt) if got, want := ts, mt; got != want { t.Errorf("got timestamp %d, want %d", got, want) } } func TestTime(t *testing.T) { t.Parallel() original := time.Now() diff := original.Sub(ulid.Time(ulid.Timestamp(original))) if diff >= time.Millisecond { t.Errorf("difference between original and recovered time (%d) greater"+ "than a millisecond", diff) } } func TestTimestampRoundTrips(t *testing.T) { t.Parallel() prop := func(ts uint64) bool { return ts == ulid.Timestamp(ulid.Time(ts)) } err := quick.Check(prop, &quick.Config{MaxCount: 1E5}) if err != nil { t.Fatal(err) } } func TestULIDTime(t *testing.T) { t.Parallel() maxTime := ulid.MaxTime() var id ulid.ULID if got, want := id.SetTime(maxTime+1), ulid.ErrBigTime; got != want { t.Errorf("got err %v, want %v", got, want) } rng := rand.New(rand.NewSource(time.Now().UnixNano())) for i := 0; i < 1e6; i++ { ms := uint64(rng.Int63n(int64(maxTime))) var id ulid.ULID if err := id.SetTime(ms); err != nil { t.Fatal(err) } if got, want := id.Time(), ms; got != want { t.Fatalf("\nfor %v:\ngot %v\nwant %v", id, got, want) } } } func TestEntropy(t *testing.T) { t.Parallel() var id ulid.ULID if got, want := id.SetEntropy([]byte{}), ulid.ErrDataSize; got != want { t.Errorf("got err %v, want %v", got, want) } prop := func(e [10]byte) bool { var id ulid.ULID if err := id.SetEntropy(e[:]); err != nil { t.Fatalf("got err %v", err) } got, want := id.Entropy(), e[:] eq := bytes.Equal(got, want) if !eq { t.Errorf("\n(!= %v\n %v)", got, want) } return eq } if err := quick.Check(prop, nil); err != nil { t.Fatal(err) } } func TestEntropyRead(t *testing.T) { t.Parallel() prop := func(e [10]byte) bool { flakyReader := iotest.HalfReader(bytes.NewReader(e[:])) id, err := ulid.New(ulid.Now(), flakyReader) if err != nil { t.Fatalf("got err %v", err) } got, want := id.Entropy(), e[:] eq := bytes.Equal(got, want) if !eq { t.Errorf("\n(!= %v\n %v)", got, want) } return eq } if err := quick.Check(prop, &quick.Config{MaxCount: 1E4}); err != nil { t.Fatal(err) } } func TestCompare(t *testing.T) { t.Parallel() a := func(a, b ulid.ULID) int { return strings.Compare(a.String(), b.String()) } b := func(a, b ulid.ULID) int { return a.Compare(b) } err := quick.CheckEqual(a, b, &quick.Config{MaxCount: 1E5}) if err != nil { t.Error(err) } } func TestOverflowHandling(t *testing.T) { t.Parallel() for s, want := range map[string]error{ "00000000000000000000000000": nil, "70000000000000000000000000": nil, "7ZZZZZZZZZZZZZZZZZZZZZZZZZ": nil, "80000000000000000000000000": ulid.ErrOverflow, "80000000000000000000000001": ulid.ErrOverflow, "ZZZZZZZZZZZZZZZZZZZZZZZZZZ": ulid.ErrOverflow, } { if _, have := ulid.Parse(s); want != have { t.Errorf("%s: want error %v, have %v", s, want, have) } } } func TestScan(t *testing.T) { id := ulid.MustNew(123, crand.Reader) for _, tc := range []struct { name string in interface{} out ulid.ULID err error }{ {"string", id.String(), id, nil}, {"bytes", id[:], id, nil}, {"nil", nil, ulid.ULID{}, nil}, {"other", 44, ulid.ULID{}, ulid.ErrScanValue}, } { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() var out ulid.ULID err := out.Scan(tc.in) if got, want := out, tc.out; got.Compare(want) != 0 { t.Errorf("got ULID %s, want %s", got, want) } if got, want := fmt.Sprint(err), fmt.Sprint(tc.err); got != want { t.Errorf("got err %q, want %q", got, want) } }) } } func TestMonotonic(t *testing.T) { now := ulid.Now() for _, e := range []struct { name string mk func() io.Reader }{ {"cryptorand", func() io.Reader { return crand.Reader }}, {"mathrand", func() io.Reader { return rand.New(rand.NewSource(int64(now))) }}, } { for _, inc := range []uint64{ 0, 1, 2, math.MaxUint8 + 1, math.MaxUint16 + 1, math.MaxUint32 + 1, } { inc := inc entropy := ulid.Monotonic(e.mk(), uint64(inc)) t.Run(fmt.Sprintf("entropy=%s/inc=%d", e.name, inc), func(t *testing.T) { t.Parallel() var prev ulid.ULID for i := 0; i < 10000; i++ { next, err := ulid.New(123, entropy) if err != nil { t.Fatal(err) } if prev.Compare(next) >= 0 { t.Fatalf("prev: %v %v > next: %v %v", prev.Time(), prev.Entropy(), next.Time(), next.Entropy()) } prev = next } }) } } } func TestMonotonicOverflow(t *testing.T) { t.Parallel() entropy := ulid.Monotonic( io.MultiReader( bytes.NewReader(bytes.Repeat([]byte{0xFF}, 10)), // Entropy for first ULID crand.Reader, // Following random entropy ), 0, ) prev, err := ulid.New(0, entropy) if err != nil { t.Fatal(err) } next, err := ulid.New(prev.Time(), entropy) if have, want := err, ulid.ErrMonotonicOverflow; have != want { t.Errorf("have ulid: %v %v err: %v, want err: %v", next.Time(), next.Entropy(), have, want) } } func TestMonotonicSafe(t *testing.T) { t.Parallel() var ( src = rand.NewSource(time.Now().UnixNano()) entropy = rand.New(src) monotonic = ulid.Monotonic(entropy, 0) safe = &safeMonotonicReader{MonotonicReader: monotonic} t0 = ulid.Timestamp(time.Now()) ) errs := make(chan error, 100) for i := 0; i < cap(errs); i++ { go func() { u0 := ulid.MustNew(t0, safe) u1 := ulid.MustNew(t0, safe) for j := 0; j < 1024; j++ { u0, u1 = u1, ulid.MustNew(t0, safe) if u0.String() >= u1.String() { errs <- fmt.Errorf( "%s (%d %x) >= %s (%d %x)", u0.String(), u0.Time(), u0.Entropy(), u1.String(), u1.Time(), u1.Entropy(), ) return } } errs <- nil }() } for i := 0; i < cap(errs); i++ { if err := <-errs; err != nil { t.Fatal(err) } } } type safeMonotonicReader struct { mtx sync.Mutex ulid.MonotonicReader } func (r *safeMonotonicReader) MonotonicRead(ms uint64, p []byte) (err error) { r.mtx.Lock() err = r.MonotonicReader.MonotonicRead(ms, p) r.mtx.Unlock() return err } func BenchmarkNew(b *testing.B) { benchmarkMakeULID(b, func(timestamp uint64, entropy io.Reader) { _, _ = ulid.New(timestamp, entropy) }) } func BenchmarkMustNew(b *testing.B) { benchmarkMakeULID(b, func(timestamp uint64, entropy io.Reader) { _ = ulid.MustNew(timestamp, entropy) }) } func benchmarkMakeULID(b *testing.B, f func(uint64, io.Reader)) { b.ReportAllocs() b.SetBytes(int64(len(ulid.ULID{}))) rng := rand.New(rand.NewSource(time.Now().UnixNano())) for _, tc := range []struct { name string timestamps []uint64 entropy io.Reader }{ {"WithCrypoEntropy", []uint64{123}, crand.Reader}, {"WithEntropy", []uint64{123}, rng}, {"WithMonotonicEntropy_SameTimestamp_Inc0", []uint64{123}, ulid.Monotonic(rng, 0)}, {"WithMonotonicEntropy_DifferentTimestamp_Inc0", []uint64{122, 123}, ulid.Monotonic(rng, 0)}, {"WithMonotonicEntropy_SameTimestamp_Inc1", []uint64{123}, ulid.Monotonic(rng, 1)}, {"WithMonotonicEntropy_DifferentTimestamp_Inc1", []uint64{122, 123}, ulid.Monotonic(rng, 1)}, {"WithCryptoMonotonicEntropy_SameTimestamp_Inc1", []uint64{123}, ulid.Monotonic(crand.Reader, 1)}, {"WithCryptoMonotonicEntropy_DifferentTimestamp_Inc1", []uint64{122, 123}, ulid.Monotonic(crand.Reader, 1)}, {"WithoutEntropy", []uint64{123}, nil}, } { tc := tc b.Run(tc.name, func(b *testing.B) { b.StopTimer() b.ResetTimer() b.StartTimer() for i := 0; i < b.N; i++ { f(tc.timestamps[i%len(tc.timestamps)], tc.entropy) } }) } } func BenchmarkParse(b *testing.B) { const s = "0000XSNJG0MQJHBF4QX1EFD6Y3" b.SetBytes(int64(len(s))) for i := 0; i < b.N; i++ { _, _ = ulid.Parse(s) } } func BenchmarkParseStrict(b *testing.B) { const s = "0000XSNJG0MQJHBF4QX1EFD6Y3" b.SetBytes(int64(len(s))) for i := 0; i < b.N; i++ { _, _ = ulid.ParseStrict(s) } } func BenchmarkMustParse(b *testing.B) { const s = "0000XSNJG0MQJHBF4QX1EFD6Y3" b.SetBytes(int64(len(s))) for i := 0; i < b.N; i++ { _ = ulid.MustParse(s) } } func BenchmarkString(b *testing.B) { entropy := rand.New(rand.NewSource(time.Now().UnixNano())) id := ulid.MustNew(123456, entropy) b.SetBytes(int64(len(id))) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.String() } } func BenchmarkMarshal(b *testing.B) { entropy := rand.New(rand.NewSource(time.Now().UnixNano())) buf := make([]byte, ulid.EncodedSize) id := ulid.MustNew(123456, entropy) b.Run("Text", func(b *testing.B) { b.SetBytes(int64(len(id))) b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = id.MarshalText() } }) b.Run("TextTo", func(b *testing.B) { b.SetBytes(int64(len(id))) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.MarshalTextTo(buf) } }) b.Run("Binary", func(b *testing.B) { b.SetBytes(int64(len(id))) b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = id.MarshalBinary() } }) b.Run("BinaryTo", func(b *testing.B) { b.SetBytes(int64(len(id))) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.MarshalBinaryTo(buf) } }) } func BenchmarkUnmarshal(b *testing.B) { var id ulid.ULID s := "0000XSNJG0MQJHBF4QX1EFD6Y3" txt := []byte(s) bin, _ := ulid.MustParse(s).MarshalBinary() b.Run("Text", func(b *testing.B) { b.SetBytes(int64(len(txt))) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.UnmarshalText(txt) } }) b.Run("Binary", func(b *testing.B) { b.SetBytes(int64(len(bin))) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.UnmarshalBinary(bin) } }) } func BenchmarkNow(b *testing.B) { b.SetBytes(8) b.ResetTimer() for i := 0; i < b.N; i++ { _ = ulid.Now() } } func BenchmarkTimestamp(b *testing.B) { now := time.Now() b.SetBytes(8) b.ResetTimer() for i := 0; i < b.N; i++ { _ = ulid.Timestamp(now) } } func BenchmarkTime(b *testing.B) { id := ulid.MustNew(123456789, nil) b.SetBytes(8) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.Time() } } func BenchmarkSetTime(b *testing.B) { var id ulid.ULID b.SetBytes(8) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.SetTime(123456789) } } func BenchmarkEntropy(b *testing.B) { id := ulid.MustNew(0, strings.NewReader("ABCDEFGHIJKLMNOP")) b.SetBytes(10) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.Entropy() } } func BenchmarkSetEntropy(b *testing.B) { var id ulid.ULID e := []byte("ABCDEFGHIJKLMNOP") b.SetBytes(10) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.SetEntropy(e) } } func BenchmarkCompare(b *testing.B) { id, other := ulid.MustNew(12345, nil), ulid.MustNew(54321, nil) b.SetBytes(int64(len(id) * 2)) b.ResetTimer() for i := 0; i < b.N; i++ { _ = id.Compare(other) } }