pax_global_header00006660000000000000000000000064127477234140014525gustar00rootroot0000000000000052 comment=b6121c6199b7beecde08e6853f256a6475fe8031 mgo-r2016.08.01/000077500000000000000000000000001274772341400130705ustar00rootroot00000000000000mgo-r2016.08.01/.travis.yml000066400000000000000000000020041274772341400151750ustar00rootroot00000000000000language: go go_import_path: gopkg.in/mgo.v2 addons: apt: packages: env: global: - BUCKET=https://niemeyer.s3.amazonaws.com matrix: - GO=1.4.1 MONGODB=x86_64-2.2.7 - GO=1.4.1 MONGODB=x86_64-2.4.14 - GO=1.4.1 MONGODB=x86_64-2.6.11 - GO=1.4.1 MONGODB=x86_64-3.0.9 - GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal - GO=1.5.3 MONGODB=x86_64-3.0.9 - GO=1.6 MONGODB=x86_64-3.0.9 install: - eval "$(gimme $GO)" - wget $BUCKET/mongodb-linux-$MONGODB.tgz - tar xzvf mongodb-linux-$MONGODB.tgz - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH - wget $BUCKET/daemontools.tar.gz - tar xzvf daemontools.tar.gz - export PATH=$PWD/daemontools:$PATH - go get gopkg.in/check.v1 - go get gopkg.in/yaml.v2 - go get gopkg.in/tomb.v2 before_script: - export NOIPV6=1 - make startdb script: - (cd bson && go test -check.v) - go test -check.v -fast - (cd txn && go test -check.v) # vim:sw=4:ts=4:et mgo-r2016.08.01/LICENSE000066400000000000000000000025241274772341400141000ustar00rootroot00000000000000mgo - MongoDB driver for Go Copyright (c) 2010-2013 - Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mgo-r2016.08.01/Makefile000066400000000000000000000001031274772341400145220ustar00rootroot00000000000000startdb: @harness/setup.sh start stopdb: @harness/setup.sh stop mgo-r2016.08.01/README.md000066400000000000000000000002101274772341400143400ustar00rootroot00000000000000The MongoDB driver for Go ------------------------- Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details. mgo-r2016.08.01/auth.go000066400000000000000000000302151274772341400143610ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "crypto/sha1" "encoding/hex" "errors" "fmt" "sync" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/internal/scram" ) type authCmd struct { Authenticate int Nonce string User string Key string } type startSaslCmd struct { StartSASL int `bson:"startSasl"` } type authResult struct { ErrMsg string Ok bool } type getNonceCmd struct { GetNonce int } type getNonceResult struct { Nonce string Err string "$err" Code int } type logoutCmd struct { Logout int } type saslCmd struct { Start int `bson:"saslStart,omitempty"` Continue int `bson:"saslContinue,omitempty"` ConversationId int `bson:"conversationId,omitempty"` Mechanism string `bson:"mechanism,omitempty"` Payload []byte } type saslResult struct { Ok bool `bson:"ok"` NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) Done bool ConversationId int `bson:"conversationId"` Payload []byte ErrMsg string } type saslStepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } func (socket *mongoSocket) getNonce() (nonce string, err error) { socket.Lock() for socket.cachedNonce == "" && socket.dead == nil { debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) socket.gotNonce.Wait() } if socket.cachedNonce == "mongos" { socket.Unlock() return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") } debugf("Socket %p to %s: got nonce", socket, socket.addr) nonce, err = socket.cachedNonce, socket.dead socket.cachedNonce = "" socket.Unlock() if err != nil { nonce = "" } return } func (socket *mongoSocket) resetNonce() { debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) op := &queryOp{} op.query = &getNonceCmd{GetNonce: 1} op.collection = "admin.$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { if err != nil { socket.kill(errors.New("getNonce: "+err.Error()), true) return } result := &getNonceResult{} err = bson.Unmarshal(docData, &result) if err != nil { socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) return } debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) if result.Code == 13390 { // mongos doesn't yet support auth (see http://j.mp/mongos-auth) result.Nonce = "mongos" } else if result.Nonce == "" { var msg string if result.Err != "" { msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) } else { msg = "Got an empty nonce" } socket.kill(errors.New(msg), true) return } socket.Lock() if socket.cachedNonce != "" { socket.Unlock() panic("resetNonce: nonce already cached") } socket.cachedNonce = result.Nonce socket.gotNonce.Signal() socket.Unlock() } err := socket.Query(op) if err != nil { socket.kill(errors.New("resetNonce: "+err.Error()), true) } } func (socket *mongoSocket) Login(cred Credential) error { socket.Lock() if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { cred.Mechanism = "SCRAM-SHA-1" } for _, sockCred := range socket.creds { if sockCred == cred { debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) socket.Unlock() return nil } } if socket.dropLogout(cred) { debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) socket.creds = append(socket.creds, cred) socket.Unlock() return nil } socket.Unlock() debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) var err error switch cred.Mechanism { case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. err = socket.loginClassic(cred) case "PLAIN": err = socket.loginPlain(cred) case "MONGODB-X509": err = socket.loginX509(cred) default: // Try SASL for everything else, if it is available. err = socket.loginSASL(cred) } if err != nil { debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) } else { debugf("Socket %p to %s: login successful", socket, socket.addr) } return err } func (socket *mongoSocket) loginClassic(cred Credential) error { // Note that this only works properly because this function is // synchronous, which means the nonce won't get reset while we're // using it and any other login requests will block waiting for a // new nonce provided in the defer call below. nonce, err := socket.getNonce() if err != nil { return err } defer socket.resetNonce() psum := md5.New() psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) ksum := md5.New() ksum.Write([]byte(nonce + cred.Username)) ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) key := hex.EncodeToString(ksum.Sum(nil)) cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } type authX509Cmd struct { Authenticate int User string Mechanism string } func (socket *mongoSocket) loginX509(cred Credential) error { cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginPlain(cred Credential) error { cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginSASL(cred Credential) error { var sasl saslStepper var err error if cred.Mechanism == "SCRAM-SHA-1" { // SCRAM is handled without external libraries. sasl = saslNewScram(cred) } else if len(cred.ServiceHost) > 0 { sasl, err = saslNew(cred, cred.ServiceHost) } else { sasl, err = saslNew(cred, socket.Server().Addr) } if err != nil { return err } defer sasl.Close() // The goal of this logic is to carry a locked socket until the // local SASL step confirms the auth is valid; the socket needs to be // locked so that concurrent action doesn't leave the socket in an // auth state that doesn't reflect the operations that took place. // As a simple case, imagine inverting login=>logout to logout=>login. // // The logic below works because the lock func isn't called concurrently. locked := false lock := func(b bool) { if locked != b { locked = b if b { socket.Lock() } else { socket.Unlock() } } } lock(true) defer lock(false) start := 1 cmd := saslCmd{} res := saslResult{} for { payload, done, err := sasl.Step(res.Payload) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } lock(false) cmd = saslCmd{ Start: start, Continue: 1 - start, ConversationId: res.ConversationId, Mechanism: cred.Mechanism, Payload: payload, } start = 0 err = socket.loginRun(cred.Source, &cmd, &res, func() error { // See the comment on lock for why this is necessary. lock(true) if !res.Ok || res.NotOk { return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) } return nil }) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } } return nil } func saslNewScram(cred Credential) *saslScram { credsum := md5.New() credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) return &saslScram{cred: cred, client: client} } type saslScram struct { cred Credential client *scram.Client } func (s *saslScram) Close() {} func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { more := s.client.Step(serverData) return s.client.Out(), !more, s.client.Err() } func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { var mutex sync.Mutex var replyErr error mutex.Lock() op := queryOp{} op.query = query op.collection = db + ".$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { defer mutex.Unlock() if err != nil { replyErr = err return } err = bson.Unmarshal(docData, result) if err != nil { replyErr = err } else { // Must handle this within the read loop for the socket, so // that concurrent login requests are properly ordered. replyErr = f() } } err := socket.Query(&op) if err != nil { return err } mutex.Lock() // Wait. return replyErr } func (socket *mongoSocket) Logout(db string) { socket.Lock() cred, found := socket.dropAuth(db) if found { debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) socket.logout = append(socket.logout, cred) } socket.Unlock() } func (socket *mongoSocket) LogoutAll() { socket.Lock() if l := len(socket.creds); l > 0 { debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) socket.logout = append(socket.logout, socket.creds...) socket.creds = socket.creds[0:0] } socket.Unlock() } func (socket *mongoSocket) flushLogout() (ops []interface{}) { socket.Lock() if l := len(socket.logout); l > 0 { debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) for i := 0; i != l; i++ { op := queryOp{} op.query = &logoutCmd{1} op.collection = socket.logout[i].Source + ".$cmd" op.limit = -1 ops = append(ops, &op) } socket.logout = socket.logout[0:0] } socket.Unlock() return } func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { for i, sockCred := range socket.creds { if sockCred.Source == db { copy(socket.creds[i:], socket.creds[i+1:]) socket.creds = socket.creds[:len(socket.creds)-1] return sockCred, true } } return cred, false } func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { for i, sockCred := range socket.logout { if sockCred == cred { copy(socket.logout[i:], socket.logout[i+1:]) socket.logout = socket.logout[:len(socket.logout)-1] return true } } return false } mgo-r2016.08.01/auth_test.go000066400000000000000000000761051274772341400154300ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "crypto/tls" "flag" "fmt" "io/ioutil" "net" "net/url" "os" "runtime" "sync" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" ) func (s *S) TestAuthLoginDatabase(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") admindb := session.DB("admin") err = admindb.Login("root", "wrong") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } } func (s *S) TestAuthLoginSession(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") cred := mgo.Credential{ Username: "root", Password: "wrong", } err = session.Login(&cred) c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") cred.Password = "rapadura" err = session.Login(&cred) c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } } func (s *S) TestAuthLoginLogout(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") // Must have dropped auth from the session too. session = session.Copy() defer session.Close() coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } } func (s *S) TestAuthLoginLogoutAll(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session.LogoutAll() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") // Must have dropped auth from the session too. session = session.Copy() defer session.Close() coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } func (s *S) TestAuthUpsertUserErrors(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.UpsertUser(&mgo.User{}) c.Assert(err, ErrorMatches, "user has no Username") err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"}) c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases") } func (s *S) TestAuthUpsertUser(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") ruser := &mgo.User{ Username: "myruser", Password: "mypass", Roles: []mgo.Role{mgo.RoleRead}, } rwuser := &mgo.User{ Username: "myrwuser", Password: "mypass", Roles: []mgo.Role{mgo.RoleReadWrite}, } err = mydb.UpsertUser(ruser) c.Assert(err, IsNil) err = mydb.UpsertUser(rwuser) c.Assert(err, IsNil) err = mydb.Login("myruser", "mypass") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = mydb.Login("myrwuser", "mypass") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) myotherdb := session.DB("myotherdb") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Test UserSource. rwuserother := &mgo.User{ Username: "myrwuser", UserSource: "mydb", Roles: []mgo.Role{mgo.RoleRead}, } err = myotherdb.UpsertUser(rwuserother) if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`) return } c.Assert(err, IsNil) admindb.Logout() // Test indirection via UserSource: we can't write to it, because // the roles for myrwuser are different there. othercoll := myotherdb.C("myothercoll") err = othercoll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // Reading works, though. err = othercoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) // Can't login directly into the database using UserSource, though. err = myotherdb.Login("myrwuser", "mypass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) ruser := &mgo.User{ Username: "myruser", Password: "mypass", OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}}, } err = admindb.UpsertUser(ruser) c.Assert(err, IsNil) defer admindb.RemoveUser("myruser") admindb.Logout() err = admindb.Login("myruser", "mypass") coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = coll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthUpsertUserUpdates(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") // Insert a user that can read. user := &mgo.User{ Username: "myruser", Password: "mypass", Roles: []mgo.Role{mgo.RoleRead}, } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Now update the user password. user = &mgo.User{ Username: "myruser", Password: "mynewpass", } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Login with the new user. usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb") c.Assert(err, IsNil) defer usession.Close() // Can read, but not write. err = usession.DB("mydb").C("mycoll").Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // Update the user role. user = &mgo.User{ Username: "myruser", Roles: []mgo.Role{mgo.RoleReadWrite}, } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Dial again to ensure the password hasn't changed. usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb") c.Assert(err, IsNil) defer usession.Close() // Now it can write. err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthAddUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myruser", "mypass", true) c.Assert(err, IsNil) err = mydb.AddUser("mywuser", "mypass", false) c.Assert(err, IsNil) err = mydb.Login("myruser", "mypass") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = mydb.Login("mywuser", "mypass") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthAddUserReplaces(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "myoldpass", false) c.Assert(err, IsNil) err = mydb.AddUser("myuser", "mynewpass", true) c.Assert(err, IsNil) admindb.Logout() err = mydb.Login("myuser", "myoldpass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = mydb.Login("myuser", "mynewpass") c.Assert(err, IsNil) // ReadOnly flag was changed too. err = mydb.C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } func (s *S) TestAuthRemoveUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", true) c.Assert(err, IsNil) err = mydb.RemoveUser("myuser") c.Assert(err, IsNil) err = mydb.RemoveUser("myuser") c.Assert(err, Equals, mgo.ErrNotFound) err = mydb.Login("myuser", "mypass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) oldStats := mgo.GetStats() err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) } func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) oldStats := mgo.GetStats() admindb.Logout() err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) } func (s *S) TestAuthLoginSwitchUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) err = admindb.Login("reader", "rapadura") c.Assert(err, IsNil) // Can't write. err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // But can read. result := struct{ N int }{} err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } func (s *S) TestAuthLoginChangePassword(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "myoldpass", false) c.Assert(err, IsNil) err = mydb.Login("myuser", "myoldpass") c.Assert(err, IsNil) err = mydb.AddUser("myuser", "mynewpass", true) c.Assert(err, IsNil) err = mydb.Login("myuser", "mynewpass") c.Assert(err, IsNil) admindb.Logout() // The second login must be in effect, which means read-only. err = mydb.C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session.Refresh() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.Copy() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithSessionClone(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.Clone() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithNewSession(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } func (s *S) TestAuthLoginCachingAcrossPool(c *C) { // Logins are cached even when the conenction goes back // into the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", false) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Logout root explicitly, to test both cases. admindb.Logout() // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("admin").Login("root", "rapadura") c.Assert(err, IsNil) err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Both logins were cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // And they actually worked. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) other.DB("admin").Logout() err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { // Now verify that logouts are properly flushed if they // are not revalidated after leaving the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", true) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Just some data to query later. err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Login was cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // Can't write, since root has been implicitly logged out // when the collection went into the pool, and not revalidated. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // But can read due to the revalidated myuser login. result := struct{ N int }{} err = other.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } func (s *S) TestAuthEventual(c *C) { // Eventual sessions don't keep sockets around, so they are // an interesting test case. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) var wg sync.WaitGroup wg.Add(20) for i := 0; i != 10; i++ { go func() { defer wg.Done() var result struct{ N int } err := session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) }() } for i := 0; i != 10; i++ { go func() { defer wg.Done() err := session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) }() } wg.Wait() } func (s *S) TestAuthURL(c *C) { session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") c.Assert(err, IsNil) defer session.Close() err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthURLWrongCredentials(c *C) { session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/") if session != nil { session.Close() } c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") c.Assert(session, IsNil) } func (s *S) TestAuthURLWithNewSession(c *C) { // When authentication is in the URL, the new session will // actually carry it on as well, even if logged out explicitly. session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") c.Assert(err, IsNil) defer session.Close() session.DB("admin").Logout() // Do it twice to ensure it passes the needed data on. session = session.New() defer session.Close() session = session.New() defer session.Close() err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthURLWithDatabase(c *C) { session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002") c.Assert(err, IsNil) defer session.Close() mydb := session.DB("mydb") err = mydb.AddUser("myruser", "mypass", true) c.Assert(err, IsNil) // Test once with database, and once with source. for i := 0; i < 2; i++ { var url string if i == 0 { url = "mongodb://myruser:mypass@localhost:40002/mydb" } else { url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb" } usession, err := mgo.Dial(url) c.Assert(err, IsNil) defer usession.Close() ucoll := usession.DB("mydb").C("mycoll") err = ucoll.FindId(0).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = ucoll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } } func (s *S) TestDefaultDatabase(c *C) { tests := []struct{ url, db string }{ {"mongodb://root:rapadura@localhost:40002", "test"}, {"mongodb://root:rapadura@localhost:40002/admin", "admin"}, {"mongodb://localhost:40001", "test"}, {"mongodb://localhost:40001/", "test"}, {"mongodb://localhost:40001/mydb", "mydb"}, } for _, test := range tests { session, err := mgo.Dial(test.url) c.Assert(err, IsNil) defer session.Close() c.Logf("test: %#v", test) c.Assert(session.DB("").Name, Equals, test.db) scopy := session.Copy() c.Check(scopy.DB("").Name, Equals, test.db) scopy.Close() } } func (s *S) TestAuthDirect(c *C) { // Direct connections must work to the master and slaves. for _, port := range []string{"40031", "40032", "40033"} { url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port) session, err := mgo.Dial(url) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) var result struct{} err = session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) } } func (s *S) TestAuthDirectWithLogin(c *C) { // Direct connections must work to the master and slaves. for _, port := range []string{"40031", "40032", "40033"} { url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port) session, err := mgo.Dial(url) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) session.SetSyncTimeout(3 * time.Second) err = session.DB("admin").Login("root", "rapadura") c.Assert(err, IsNil) var result struct{} err = session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) } } func (s *S) TestAuthScramSha1Cred(c *C) { if !s.versionAtLeast(2, 7, 7) { c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") } cred := &mgo.Credential{ Username: "root", Password: "rapadura", Mechanism: "SCRAM-SHA-1", Source: "admin", } host := "localhost:40002" c.Logf("Connecting to %s...", host) session, err := mgo.Dial(host) c.Assert(err, IsNil) defer session.Close() mycoll := session.DB("admin").C("mycoll") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthScramSha1URL(c *C) { if !s.versionAtLeast(2, 7, 7) { c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") } host := "localhost:40002" c.Logf("Connecting to %s...", host) session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host)) c.Assert(err, IsNil) defer session.Close() mycoll := session.DB("admin").C("mycoll") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthX509Cred(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() binfo, err := session.BuildInfo() c.Assert(err, IsNil) if binfo.OpenSSLVersion == "" { c.Skip("server does not support SSL") } clientCertPEM, err := ioutil.ReadFile("harness/certs/client.pem") c.Assert(err, IsNil) clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) c.Assert(err, IsNil) tlsConfig := &tls.Config{ // Isolating tests to client certs, don't care about server validation. InsecureSkipVerify: true, Certificates: []tls.Certificate{clientCert}, } var host = "localhost:40003" c.Logf("Connecting to %s...", host) session, err = mgo.DialWithInfo(&mgo.DialInfo{ Addrs: []string{host}, DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { return tls.Dial("tcp", addr.String(), tlsConfig) }, }) c.Assert(err, IsNil) defer session.Close() err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) c.Assert(err, IsNil) // This needs to be kept in sync with client.pem x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" externalDB := session.DB("$external") var x509User mgo.User = mgo.User{ Username: x509Subject, OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, } err = externalDB.UpsertUser(&x509User) c.Assert(err, IsNil) session.LogoutAll() c.Logf("Connected! Ensuring authentication is required...") names, err := session.DatabaseNames() c.Assert(err, ErrorMatches, "not authorized .*") cred := &mgo.Credential{ Username: x509Subject, Mechanism: "MONGODB-X509", Source: "$external", } c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") names, err = session.DatabaseNames() c.Assert(err, IsNil) c.Assert(len(names) > 0, Equals, true) } var ( plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") plainUser = "einstein" plainPass = "password" ) func (s *S) TestAuthPlainCred(c *C) { if *plainFlag == "" { c.Skip("no -plain") } cred := &mgo.Credential{ Username: plainUser, Password: plainPass, Source: "$external", Mechanism: "PLAIN", } c.Logf("Connecting to %s...", *plainFlag) session, err := mgo.Dial(*plainFlag) c.Assert(err, IsNil) defer session.Close() records := session.DB("records").C("records") c.Logf("Connected! Testing the need for authentication...") err = records.Find(nil).One(nil) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") c.Logf("Connected! Testing the need for authentication...") err = records.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthPlainURL(c *C) { if *plainFlag == "" { c.Skip("no -plain") } c.Logf("Connecting to %s...", *plainFlag) session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag)) c.Assert(err, IsNil) defer session.Close() c.Logf("Connected! Testing the need for authentication...") err = session.DB("records").C("records").Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } var ( kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") kerberosHost = "ldaptest.10gen.cc" kerberosUser = "drivers@LDAPTEST.10GEN.CC" winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) // Kerberos has its own suite because it talks to a remote server // that is prepared to authenticate against a kerberos deployment. type KerberosSuite struct{} var _ = Suite(&KerberosSuite{}) func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) { mgo.SetDebug(true) mgo.SetStats(true) } func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) { mgo.SetDebug(false) mgo.SetStats(false) } func (kerberosSuite *KerberosSuite) SetUpTest(c *C) { mgo.SetLogger((*cLogger)(c)) mgo.ResetStats() } func (kerberosSuite *KerberosSuite) TearDownTest(c *C) { mgo.SetLogger(nil) } func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Connected! Testing the need for authentication...") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, ErrorMatches, ".*authorized.*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err = session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } c.Logf("Connecting to %s...", kerberosHost) connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" if runtime.GOOS == "windows" { connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" } session, err := mgo.Dial(connectUri) c.Assert(err, IsNil) defer session.Close() n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } wrongServiceName := "wrong" rightServiceName := "mongodb" cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", Service: wrongServiceName, } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Authenticating with incorrect service name...") err = session.Login(cred) c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.Service = rightServiceName c.Logf("Authenticating with correct service name...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } wrongServiceHost := "eggs.bacon.tk" rightServiceHost := kerberosHost cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", ServiceHost: wrongServiceHost, } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Authenticating with incorrect service host...") err = session.Login(cred) c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.ServiceHost = rightServiceHost c.Logf("Authenticating with correct service host...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } // No kinit on SSPI-style Kerberos, so we need to provide a password. In order // to avoid inlining password, require it to be set as an environment variable, // for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password` func getWindowsKerberosPassword() string { pw := os.Getenv(winKerberosPasswordEnv) if pw == "" { panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv)) } return pw } func windowsAppendPasswordToCredential(cred *mgo.Credential) { if runtime.GOOS == "windows" { cred.Password = getWindowsKerberosPassword() } } mgo-r2016.08.01/bson/000077500000000000000000000000001274772341400140315ustar00rootroot00000000000000mgo-r2016.08.01/bson/LICENSE000066400000000000000000000025141274772341400150400ustar00rootroot00000000000000BSON library for Go Copyright (c) 2010-2012 - Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mgo-r2016.08.01/bson/bson.go000066400000000000000000000551571274772341400153360ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package bson is an implementation of the BSON specification for Go: // // http://bsonspec.org // // It was created as part of the mgo MongoDB driver for Go, but is standalone // and may be used on its own without the driver. package bson import ( "bytes" "crypto/md5" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "reflect" "runtime" "strings" "sync" "sync/atomic" "time" ) // -------------------------------------------------------------------------- // The public API. // A value implementing the bson.Getter interface will have its GetBSON // method called when the given value has to be marshalled, and the result // of this method will be marshaled in place of the actual object. // // If GetBSON returns return a non-nil error, the marshalling procedure // will stop and error out with the provided value. type Getter interface { GetBSON() (interface{}, error) } // A value implementing the bson.Setter interface will receive the BSON // value via the SetBSON method during unmarshaling, and the object // itself will not be changed as usual. // // If setting the value works, the method should return nil or alternatively // bson.SetZero to set the respective field to its zero value (nil for // pointer types). If SetBSON returns a value of type bson.TypeError, the // BSON value will be omitted from a map or slice being decoded and the // unmarshalling will continue. If it returns any other non-nil error, the // unmarshalling procedure will stop and error out with the provided value. // // This interface is generally useful in pointer receivers, since the method // will want to change the receiver. A type field that implements the Setter // interface doesn't have to be a pointer, though. // // Unlike the usual behavior, unmarshalling onto a value that implements a // Setter interface will NOT reset the value to its zero state. This allows // the value to decide by itself how to be unmarshalled. // // For example: // // type MyString string // // func (s *MyString) SetBSON(raw bson.Raw) error { // return raw.Unmarshal(s) // } // type Setter interface { SetBSON(raw Raw) error } // SetZero may be returned from a SetBSON method to have the value set to // its respective zero value. When used in pointer values, this will set the // field to nil rather than to the pre-allocated value. var SetZero = errors.New("set to zero") // M is a convenient alias for a map[string]interface{} map, useful for // dealing with BSON in a native way. For instance: // // bson.M{"a": 1, "b": true} // // There's no special handling for this type in addition to what's done anyway // for an equivalent map type. Elements in the map will be dumped in an // undefined ordered. See also the bson.D type for an ordered alternative. type M map[string]interface{} // D represents a BSON document containing ordered elements. For example: // // bson.D{{"a", 1}, {"b", true}} // // In some situations, such as when creating indexes for MongoDB, the order in // which the elements are defined is important. If the order is not important, // using a map is generally more comfortable. See bson.M and bson.RawD. type D []DocElem // DocElem is an element of the bson.D document representation. type DocElem struct { Name string Value interface{} } // Map returns a map out of the ordered element name/value pairs in d. func (d D) Map() (m M) { m = make(M, len(d)) for _, item := range d { m[item.Name] = item.Value } return m } // The Raw type represents raw unprocessed BSON documents and elements. // Kind is the kind of element as defined per the BSON specification, and // Data is the raw unprocessed data for the respective element. // Using this type it is possible to unmarshal or marshal values partially. // // Relevant documentation: // // http://bsonspec.org/#/specification // type Raw struct { Kind byte Data []byte } // RawD represents a BSON document containing raw unprocessed elements. // This low-level representation may be useful when lazily processing // documents of uncertain content, or when manipulating the raw content // documents in general. type RawD []RawDocElem // See the RawD type. type RawDocElem struct { Name string Value Raw } // ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes // long. MongoDB objects by default have such a property set in their "_id" // property. // // http://www.mongodb.org/display/DOCS/Object+IDs type ObjectId string // ObjectIdHex returns an ObjectId from the provided hex representation. // Calling this function with an invalid hex representation will // cause a runtime panic. See the IsObjectIdHex function. func ObjectIdHex(s string) ObjectId { d, err := hex.DecodeString(s) if err != nil || len(d) != 12 { panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s)) } return ObjectId(d) } // IsObjectIdHex returns whether s is a valid hex representation of // an ObjectId. See the ObjectIdHex function. func IsObjectIdHex(s string) bool { if len(s) != 24 { return false } _, err := hex.DecodeString(s) return err == nil } // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. var objectIdCounter uint32 = readRandomUint32() // readRandomUint32 returns a random objectIdCounter. func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { panic(fmt.Errorf("cannot read random object id: %v", err)) } return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) } // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() var processId = os.Getpid() // readMachineId generates and returns a machine id. // If this function fails to get the hostname it will cause a runtime error. func readMachineId() []byte { var sum [3]byte id := sum[:] hostname, err1 := os.Hostname() if err1 != nil { _, err2 := io.ReadFull(rand.Reader, id) if err2 != nil { panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) } return id } hw := md5.New() hw.Write([]byte(hostname)) copy(id, hw.Sum(nil)) return id } // NewObjectId returns a new unique ObjectId. func NewObjectId() ObjectId { var b [12]byte // Timestamp, 4 bytes, big endian binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) // Machine, first 3 bytes of md5(hostname) b[4] = machineId[0] b[5] = machineId[1] b[6] = machineId[2] // Pid, 2 bytes, specs don't specify endianness, but we use big endian. b[7] = byte(processId >> 8) b[8] = byte(processId) // Increment, 3 bytes, big endian i := atomic.AddUint32(&objectIdCounter, 1) b[9] = byte(i >> 16) b[10] = byte(i >> 8) b[11] = byte(i) return ObjectId(b[:]) } // NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled // with the provided number of seconds from epoch UTC, and all other parts // filled with zeroes. It's not safe to insert a document with an id generated // by this method, it is useful only for queries to find documents with ids // generated before or after the specified timestamp. func NewObjectIdWithTime(t time.Time) ObjectId { var b [12]byte binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) return ObjectId(string(b[:])) } // String returns a hex string representation of the id. // Example: ObjectIdHex("4d88e15b60f486e428412dc9"). func (id ObjectId) String() string { return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) } // Hex returns a hex representation of the ObjectId. func (id ObjectId) Hex() string { return hex.EncodeToString([]byte(id)) } // MarshalJSON turns a bson.ObjectId into a json.Marshaller. func (id ObjectId) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%x"`, string(id))), nil } var nullBytes = []byte("null") // UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. func (id *ObjectId) UnmarshalJSON(data []byte) error { if len(data) > 0 && (data[0] == '{' || data[0] == 'O') { var v struct { Id json.RawMessage `json:"$oid"` Func struct { Id json.RawMessage } `json:"$oidFunc"` } err := jdec(data, &v) if err == nil { if len(v.Id) > 0 { data = []byte(v.Id) } else { data = []byte(v.Func.Id) } } } if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { *id = "" return nil } if len(data) != 26 || data[0] != '"' || data[25] != '"' { return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data))) } var buf [12]byte _, err := hex.Decode(buf[:], data[1:25]) if err != nil { return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err)) } *id = ObjectId(string(buf[:])) return nil } // MarshalText turns bson.ObjectId into an encoding.TextMarshaler. func (id ObjectId) MarshalText() ([]byte, error) { return []byte(fmt.Sprintf("%x", string(id))), nil } // UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler. func (id *ObjectId) UnmarshalText(data []byte) error { if len(data) == 1 && data[0] == ' ' || len(data) == 0 { *id = "" return nil } if len(data) != 24 { return fmt.Errorf("invalid ObjectId: %s", data) } var buf [12]byte _, err := hex.Decode(buf[:], data[:]) if err != nil { return fmt.Errorf("invalid ObjectId: %s (%s)", data, err) } *id = ObjectId(string(buf[:])) return nil } // Valid returns true if id is valid. A valid id must contain exactly 12 bytes. func (id ObjectId) Valid() bool { return len(id) == 12 } // byteSlice returns byte slice of id from start to end. // Calling this function with an invalid id will cause a runtime panic. func (id ObjectId) byteSlice(start, end int) []byte { if len(id) != 12 { panic(fmt.Sprintf("invalid ObjectId: %q", string(id))) } return []byte(string(id)[start:end]) } // Time returns the timestamp part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Time() time.Time { // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) return time.Unix(secs, 0) } // Machine returns the 3-byte machine id part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Machine() []byte { return id.byteSlice(4, 7) } // Pid returns the process id part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Pid() uint16 { return binary.BigEndian.Uint16(id.byteSlice(7, 9)) } // Counter returns the incrementing value part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Counter() int32 { b := id.byteSlice(9, 12) // Counter is stored as big-endian 3-byte value return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) } // The Symbol type is similar to a string and is used in languages with a // distinct symbol type. type Symbol string // Now returns the current time with millisecond precision. MongoDB stores // timestamps with the same precision, so a Time returned from this method // will not change after a roundtrip to the database. That's the only reason // why this function exists. Using the time.Now function also works fine // otherwise. func Now() time.Time { return time.Unix(0, time.Now().UnixNano()/1e6*1e6) } // MongoTimestamp is a special internal type used by MongoDB that for some // strange reason has its own datatype defined in BSON. type MongoTimestamp int64 type orderKey int64 // MaxKey is a special value that compares higher than all other possible BSON // values in a MongoDB database. var MaxKey = orderKey(1<<63 - 1) // MinKey is a special value that compares lower than all other possible BSON // values in a MongoDB database. var MinKey = orderKey(-1 << 63) type undefined struct{} // Undefined represents the undefined BSON value. var Undefined undefined // Binary is a representation for non-standard binary values. Any kind should // work, but the following are known as of this writing: // // 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. // 0x01 - Function (!?) // 0x02 - Obsolete generic. // 0x03 - UUID // 0x05 - MD5 // 0x80 - User defined. // type Binary struct { Kind byte Data []byte } // RegEx represents a regular expression. The Options field may contain // individual characters defining the way in which the pattern should be // applied, and must be sorted. Valid options as of this writing are 'i' for // case insensitive matching, 'm' for multi-line matching, 'x' for verbose // mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all // mode (a '.' matches everything), and 'u' to make \w, \W, and similar match // unicode. The value of the Options parameter is not verified before being // marshaled into the BSON format. type RegEx struct { Pattern string Options string } // JavaScript is a type that holds JavaScript code. If Scope is non-nil, it // will be marshaled as a mapping from identifiers to values that may be // used when evaluating the provided Code. type JavaScript struct { Code string Scope interface{} } // DBPointer refers to a document id in a namespace. // // This type is deprecated in the BSON specification and should not be used // except for backwards compatibility with ancient applications. type DBPointer struct { Namespace string Id ObjectId } const initialBufferSize = 64 func handleErr(err *error) { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } else if _, ok := r.(externalPanic); ok { panic(r) } else if s, ok := r.(string); ok { *err = errors.New(s) } else if e, ok := r.(error); ok { *err = e } else { panic(r) } } } // Marshal serializes the in value, which may be a map or a struct value. // In the case of struct values, only exported fields will be serialized, // and the order of serialized fields will match that of the struct itself. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for // the field. The tag formats accepted are: // // "[][,[,]]" // // `(...) bson:"[][,[,]]" (...)` // // The following flags are currently supported: // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. // // minsize Marshal an int64 value as an int32, if that's feasible // while preserving the numeric value. // // inline Inline the field, which must be a struct or a map, // causing all of its fields or keys to be processed as if // they were part of the outer struct. For maps, keys must // not conflict with the bson keys of other struct fields. // // Some examples: // // type T struct { // A bool // B int "myb" // C string "myc,omitempty" // D string `bson:",omitempty" json:"jsonkey"` // E int64 ",minsize" // F int64 "myf,omitempty,minsize" // } // func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := &encoder{make([]byte, 0, initialBufferSize)} e.addDoc(reflect.ValueOf(in)) return e.out, nil } // Unmarshal deserializes data from in into the out value. The out value // must be a map, a pointer to a struct, or a pointer to a bson.D value. // In the case of struct values, only exported fields will be deserialized. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for // the field. The tag formats accepted are: // // "[][,[,]]" // // `(...) bson:"[][,[,]]" (...)` // // The following flags are currently supported during unmarshal (see the // Marshal method for other flags): // // inline Inline the field, which must be a struct or a map. // Inlined structs are handled as if its fields were part // of the outer struct. An inlined map causes keys that do // not match any other struct field to be inserted in the // map rather than being discarded as usual. // // The target field or element types of out may not necessarily match // the BSON values of the provided data. The following conversions are // made automatically: // // - Numeric types are converted if at least the integer part of the // value would be preserved correctly // - Bools are converted to numeric types as 1 or 0 // - Numeric types are converted to bools as true if not 0 or false otherwise // - Binary and string BSON data is converted to a string, array or byte slice // // If the value would not fit the type and cannot be converted, it's // silently skipped. // // Pointer values are initialized when necessary. func Unmarshal(in []byte, out interface{}) (err error) { if raw, ok := out.(*Raw); ok { raw.Kind = 3 raw.Data = in return nil } defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { case reflect.Ptr: fallthrough case reflect.Map: d := newDecoder(in) d.readDocTo(v) case reflect.Struct: return errors.New("Unmarshal can't deal with struct values. Use a pointer.") default: return errors.New("Unmarshal needs a map or a pointer to a struct.") } return nil } // Unmarshal deserializes raw into the out value. If the out value type // is not compatible with raw, a *bson.TypeError is returned. // // See the Unmarshal function documentation for more details on the // unmarshalling process. func (raw Raw) Unmarshal(out interface{}) (err error) { defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { case reflect.Ptr: v = v.Elem() fallthrough case reflect.Map: d := newDecoder(raw.Data) good := d.readElemTo(v, raw.Kind) if !good { return &TypeError{v.Type(), raw.Kind} } case reflect.Struct: return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.") default: return errors.New("Raw Unmarshal needs a map or a valid pointer.") } return nil } type TypeError struct { Type reflect.Type Kind byte } func (e *TypeError) Error() string { return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) } // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes type structInfo struct { FieldsMap map[string]fieldInfo FieldsList []fieldInfo InlineMap int Zero reflect.Value } type fieldInfo struct { Key string Num int OmitEmpty bool MinSize bool Inline []int } var structMap = make(map[reflect.Type]*structInfo) var structMapMutex sync.RWMutex type externalPanic string func (e externalPanic) String() string { return string(e) } func getStructInfo(st reflect.Type) (*structInfo, error) { structMapMutex.RLock() sinfo, found := structMap[st] structMapMutex.RUnlock() if found { return sinfo, nil } n := st.NumField() fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" && !field.Anonymous { continue // Private field } info := fieldInfo{Num: i} tag := field.Tag.Get("bson") if tag == "" && strings.Index(string(field.Tag), ":") < 0 { tag = string(field.Tag) } if tag == "-" { continue } inline := false fields := strings.Split(tag, ",") if len(fields) > 1 { for _, flag := range fields[1:] { switch flag { case "omitempty": info.OmitEmpty = true case "minsize": info.MinSize = true case "inline": inline = true default: msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) panic(externalPanic(msg)) } } tag = fields[0] } if inline { switch field.Type.Kind() { case reflect.Map: if inlineMap >= 0 { return nil, errors.New("Multiple ,inline maps in struct " + st.String()) } if field.Type.Key() != reflect.TypeOf("") { return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) } inlineMap = info.Num case reflect.Struct: sinfo, err := getStructInfo(field.Type) if err != nil { return nil, err } for _, finfo := range sinfo.FieldsList { if _, found := fieldsMap[finfo.Key]; found { msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() return nil, errors.New(msg) } if finfo.Inline == nil { finfo.Inline = []int{i, finfo.Num} } else { finfo.Inline = append([]int{i}, finfo.Inline...) } fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } default: panic("Option ,inline needs a struct value or map field") } continue } if tag != "" { info.Key = tag } else { info.Key = strings.ToLower(field.Name) } if _, found = fieldsMap[info.Key]; found { msg := "Duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } sinfo = &structInfo{ fieldsMap, fieldsList, inlineMap, reflect.New(st).Elem(), } structMapMutex.Lock() structMap[st] = sinfo structMapMutex.Unlock() return sinfo, nil } mgo-r2016.08.01/bson/bson_test.go000066400000000000000000001504261274772341400163700ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson_test import ( "encoding/binary" "encoding/hex" "encoding/json" "encoding/xml" "errors" "net/url" "reflect" "strings" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" "gopkg.in/yaml.v2" ) func TestAll(t *testing.T) { TestingT(t) } type S struct{} var _ = Suite(&S{}) // Wrap up the document elements contained in data, prepending the int32 // length of the data, and appending the '\x00' value closing the document. func wrapInDoc(data string) string { result := make([]byte, len(data)+5) binary.LittleEndian.PutUint32(result, uint32(len(result))) copy(result[4:], []byte(data)) return string(result) } func makeZeroDoc(value interface{}) (zero interface{}) { v := reflect.ValueOf(value) t := v.Type() switch t.Kind() { case reflect.Map: mv := reflect.MakeMap(t) zero = mv.Interface() case reflect.Ptr: pv := reflect.New(v.Type().Elem()) zero = pv.Interface() case reflect.Slice, reflect.Int, reflect.Int64, reflect.Struct: zero = reflect.New(t).Interface() default: panic("unsupported doc type: " + t.Name()) } return zero } func testUnmarshal(c *C, data string, obj interface{}) { zero := makeZeroDoc(obj) err := bson.Unmarshal([]byte(data), zero) c.Assert(err, IsNil) c.Assert(zero, DeepEquals, obj) } type testItemType struct { obj interface{} data string } // -------------------------------------------------------------------------- // Samples from bsonspec.org: var sampleItems = []testItemType{ {bson.M{"hello": "world"}, "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, } func (s *S) TestMarshalSampleItems(c *C) { for i, item := range sampleItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalSampleItems(c *C) { for i, item := range sampleItems { value := bson.M{} err := bson.Unmarshal([]byte(item.data), value) c.Assert(err, IsNil) c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) } } // -------------------------------------------------------------------------- // Every type, ordered by the type flag. These are not wrapped with the // length and last \x00 from the document. wrapInDoc() computes them. // Note that all of them should be supported as two-way conversions. var allItems = []testItemType{ {bson.M{}, ""}, {bson.M{"_": float64(5.05)}, "\x01_\x00333333\x14@"}, {bson.M{"_": "yo"}, "\x02_\x00\x03\x00\x00\x00yo\x00"}, {bson.M{"_": bson.M{"a": true}}, "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, {bson.M{"_": []interface{}{true, false}}, "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, {bson.M{"_": []byte("yo")}, "\x05_\x00\x02\x00\x00\x00\x00yo"}, {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, "\x05_\x00\x04\x00\x00\x00\x80udef"}, {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. "\x06_\x00"}, {bson.M{"_": bson.ObjectId("0123456789ab")}, "\x07_\x000123456789ab"}, {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}}, "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"}, {bson.M{"_": false}, "\x08_\x00\x00"}, {bson.M{"_": true}, "\x08_\x00\x01"}, {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": nil}, "\x0A_\x00"}, {bson.M{"_": bson.RegEx{"ab", "cd"}}, "\x0B_\x00ab\x00cd\x00"}, {bson.M{"_": bson.JavaScript{"code", nil}}, "\x0D_\x00\x05\x00\x00\x00code\x00"}, {bson.M{"_": bson.Symbol("sym")}, "\x0E_\x00\x04\x00\x00\x00sym\x00"}, {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, {bson.M{"_": 258}, "\x10_\x00\x02\x01\x00\x00"}, {bson.M{"_": bson.MongoTimestamp(258)}, "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": int64(258)}, "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": int64(258 << 32)}, "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, {bson.M{"_": bson.MaxKey}, "\x7F_\x00"}, {bson.M{"_": bson.MinKey}, "\xFF_\x00"}, } func (s *S) TestMarshalAllItems(c *C) { for i, item := range allItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalAllItems(c *C) { for i, item := range allItems { value := bson.M{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) c.Assert(err, IsNil) c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawAllItems(c *C) { for i, item := range allItems { if len(item.data) == 0 { continue } value := item.obj.(bson.M)["_"] if value == nil { continue } pv := reflect.New(reflect.ValueOf(value).Type()) raw := bson.Raw{item.data[0], []byte(item.data[3:])} c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) err := raw.Unmarshal(pv.Interface()) c.Assert(err, IsNil) c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawIncompatible(c *C) { raw := bson.Raw{0x08, []byte{0x01}} // true err := raw.Unmarshal(&struct{}{}) c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") } func (s *S) TestUnmarshalZeroesStruct(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) type T struct{ A, B int } v := T{A: 1} err = bson.Unmarshal(data, &v) c.Assert(err, IsNil) c.Assert(v.A, Equals, 0) c.Assert(v.B, Equals, 2) } func (s *S) TestUnmarshalZeroesMap(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) m := bson.M{"a": 1} err = bson.Unmarshal(data, &m) c.Assert(err, IsNil) c.Assert(m, DeepEquals, bson.M{"b": 2}) } func (s *S) TestUnmarshalNonNilInterface(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) m := bson.M{"a": 1} var i interface{} i = m err = bson.Unmarshal(data, &i) c.Assert(err, IsNil) c.Assert(i, DeepEquals, bson.M{"b": 2}) c.Assert(m, DeepEquals, bson.M{"a": 1}) } // -------------------------------------------------------------------------- // Some one way marshaling operations which would unmarshal differently. var oneWayMarshalItems = []testItemType{ // These are being passed as pointers, and will unmarshal as values. {bson.M{"": &bson.Binary{0x02, []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, "\x05\x00\x04\x00\x00\x00\x80udef"}, {bson.M{"": &bson.RegEx{"ab", "cd"}}, "\x0B\x00ab\x00cd\x00"}, {bson.M{"": &bson.JavaScript{"code", nil}}, "\x0D\x00\x05\x00\x00\x00code\x00"}, {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, // There's no float32 type in BSON. Will encode as a float64. {bson.M{"": float32(5.05)}, "\x01\x00\x00\x00\x00@33\x14@"}, // The array will be unmarshaled as a slice instead. {bson.M{"": [2]bool{true, false}}, "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, // The typed slice will be unmarshaled as []interface{}. {bson.M{"": []bool{true, false}}, "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, // Will unmarshal as a []byte. {bson.M{"": bson.Binary{0x00, []byte("yo")}}, "\x05\x00\x02\x00\x00\x00\x00yo"}, {bson.M{"": bson.Binary{0x02, []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, // No way to preserve the type information here. We might encode as a zero // value, but this would mean that pointer values in structs wouldn't be // able to correctly distinguish between unset and set to the zero value. {bson.M{"": (*byte)(nil)}, "\x0A\x00"}, // No int types smaller than int32 in BSON. Could encode this as a char, // but it would still be ambiguous, take more, and be awkward in Go when // loaded without typing information. {bson.M{"": byte(8)}, "\x10\x00\x08\x00\x00\x00"}, // There are no unsigned types in BSON. Will unmarshal as int32 or int64. {bson.M{"": uint32(258)}, "\x10\x00\x02\x01\x00\x00"}, {bson.M{"": uint64(258)}, "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"": uint64(258 << 32)}, "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, // This will unmarshal as int. {bson.M{"": int32(258)}, "\x10\x00\x02\x01\x00\x00"}, // That's a special case. The unsigned value is too large for an int32, // so an int64 is used instead. {bson.M{"": uint32(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, {bson.M{"": uint(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, } func (s *S) TestOneWayMarshalItems(c *C) { for i, item := range oneWayMarshalItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d", i)) } } // -------------------------------------------------------------------------- // Two-way tests for user-defined structures using the samples // from bsonspec.org. type specSample1 struct { Hello string } type specSample2 struct { BSON []interface{} "BSON" } var structSampleItems = []testItemType{ {&specSample1{"world"}, "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, } func (s *S) TestMarshalStructSampleItems(c *C) { for i, item := range structSampleItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalStructSampleItems(c *C) { for _, item := range structSampleItems { testUnmarshal(c, item.data, item.obj) } } func (s *S) Test64bitInt(c *C) { var i int64 = (1 << 31) if int(i) > 0 { data, err := bson.Marshal(bson.M{"i": int(i)}) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) var result struct{ I int } err = bson.Unmarshal(data, &result) c.Assert(err, IsNil) c.Assert(int64(result.I), Equals, i) } } // -------------------------------------------------------------------------- // Generic two-way struct marshaling tests. var bytevar = byte(8) var byteptr = &bytevar var structItems = []testItemType{ {&struct{ Ptr *byte }{nil}, "\x0Aptr\x00"}, {&struct{ Ptr *byte }{&bytevar}, "\x10ptr\x00\x08\x00\x00\x00"}, {&struct{ Ptr **byte }{&byteptr}, "\x10ptr\x00\x08\x00\x00\x00"}, {&struct{ Byte byte }{8}, "\x10byte\x00\x08\x00\x00\x00"}, {&struct{ Byte byte }{0}, "\x10byte\x00\x00\x00\x00\x00"}, {&struct { V byte "Tag" }{8}, "\x10Tag\x00\x08\x00\x00\x00"}, {&struct { V *struct { Byte byte } }{&struct{ Byte byte }{8}}, "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, {&struct{ priv byte }{}, ""}, // The order of the dumped fields should be the same in the struct. {&struct{ A, C, B, D, F, E *byte }{}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, "\x10v\x00" + "\x00\x00\x00\x00"}, // Byte arrays. {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, "\x05v\x00\x02\x00\x00\x00\x00yo"}, } func (s *S) TestMarshalStructItems(c *C) { for i, item := range structItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalStructItems(c *C) { for _, item := range structItems { testUnmarshal(c, wrapInDoc(item.data), item.obj) } } func (s *S) TestUnmarshalRawStructItems(c *C) { for i, item := range structItems { raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} zero := makeZeroDoc(item.obj) err := raw.Unmarshal(zero) c.Assert(err, IsNil) c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawNil(c *C) { // Regression test: shouldn't try to nil out the pointer itself, // as it's not settable. raw := bson.Raw{0x0A, []byte{}} err := raw.Unmarshal(&struct{}{}) c.Assert(err, IsNil) } // -------------------------------------------------------------------------- // One-way marshaling tests. type dOnIface struct { D interface{} } type ignoreField struct { Before string Ignore string `bson:"-"` After string } var marshalItems = []testItemType{ // Ordered document dump. Will unmarshal as a dictionary by default. {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, {&ignoreField{"before", "ignore", "after"}, "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, // Marshalling a Raw document does nothing. {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, "anything"}, {bson.Raw{Data: []byte(wrapInDoc("anything"))}, "anything"}, } func (s *S) TestMarshalOneWayItems(c *C) { for _, item := range marshalItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data)) } } // -------------------------------------------------------------------------- // One-way unmarshaling tests. var unmarshalItems = []testItemType{ // Field is private. Should not attempt to unmarshal it. {&struct{ priv byte }{}, "\x10priv\x00\x08\x00\x00\x00"}, // Wrong casing. Field names are lowercased. {&struct{ Byte byte }{}, "\x10Byte\x00\x08\x00\x00\x00"}, // Ignore non-existing field. {&struct{ Byte byte }{9}, "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, // Do not unmarshal on ignored field. {&ignoreField{"before", "", "after"}, "\x02before\x00\a\x00\x00\x00before\x00" + "\x02-\x00\a\x00\x00\x00ignore\x00" + "\x02after\x00\x06\x00\x00\x00after\x00"}, // Ignore unsuitable types silently. {map[string]string{"str": "s"}, "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, {map[string][]int{"array": []int{5, 9}}, "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, // Wrong type. Shouldn't init pointer. {&struct{ Str *byte }{}, "\x02str\x00\x02\x00\x00\x00s\x00"}, {&struct{ Str *struct{ Str string } }{}, "\x02str\x00\x02\x00\x00\x00s\x00"}, // Ordered document. {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, // Raw document. {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, "\x10byte\x00\x08\x00\x00\x00"}, // RawD document. {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, // Decode old binary. {bson.M{"_": []byte("old")}, "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, // Decode old binary without length. According to the spec, this shouldn't happen. {bson.M{"_": []byte("old")}, "\x05_\x00\x03\x00\x00\x00\x02old"}, // Decode a doc within a doc in to a slice within a doc; shouldn't error {&struct{ Foo []string }{}, "\x03\x66\x6f\x6f\x00\x05\x00\x00\x00\x00"}, } func (s *S) TestUnmarshalOneWayItems(c *C) { for _, item := range unmarshalItems { testUnmarshal(c, wrapInDoc(item.data), item.obj) } } func (s *S) TestUnmarshalNilInStruct(c *C) { // Nil is the default value, so we need to ensure it's indeed being set. b := byte(1) v := &struct{ Ptr *byte }{&b} err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) c.Assert(err, IsNil) c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) } // -------------------------------------------------------------------------- // Marshalling error cases. type structWithDupKeys struct { Name byte Other byte "name" // Tag should precede. } var marshalErrorItems = []testItemType{ {bson.M{"": uint64(1 << 63)}, "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, {bson.M{"": bson.ObjectId("tooshort")}, "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, {int64(123), "Can't marshal int64 as a BSON document"}, {bson.M{"": 1i}, "Can't marshal complex128 in a BSON document"}, {&structWithDupKeys{}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, {bson.Raw{0xA, []byte{}}, "Attempted to marshal Raw kind 10 as a document"}, {bson.Raw{0x3, []byte{}}, "Attempted to marshal empty Raw document"}, {bson.M{"w": bson.Raw{0x3, []byte{}}}, "Attempted to marshal empty Raw document"}, {&inlineCantPtr{&struct{ A, B int }{1, 2}}, "Option ,inline needs a struct value or map field"}, {&inlineDupName{1, struct{ A, B int }{2, 3}}, "Duplicated key 'a' in struct bson_test.inlineDupName"}, {&inlineDupMap{}, "Multiple ,inline maps in struct bson_test.inlineDupMap"}, {&inlineBadKeyMap{}, "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, `Can't have key "a" in inlined map; conflicts with struct field`}, } func (s *S) TestMarshalErrorItems(c *C) { for _, item := range marshalErrorItems { data, err := bson.Marshal(item.obj) c.Assert(err, ErrorMatches, item.data) c.Assert(data, IsNil) } } // -------------------------------------------------------------------------- // Unmarshalling error cases. type unmarshalErrorType struct { obj interface{} data string error string } var unmarshalErrorItems = []unmarshalErrorType{ // Tag name conflicts with existing parameter. {&structWithDupKeys{}, "\x10name\x00\x08\x00\x00\x00", "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, // Non-string map key. {map[int]interface{}{}, "\x10name\x00\x08\x00\x00\x00", "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, {nil, "\xEEname\x00", "Unknown element kind \\(0xEE\\)"}, {struct{ Name bool }{}, "\x10name\x00\x08\x00\x00\x00", "Unmarshal can't deal with struct values. Use a pointer."}, {123, "\x10name\x00\x08\x00\x00\x00", "Unmarshal needs a map or a pointer to a struct."}, {nil, "\x08\x62\x00\x02", "encoded boolean must be 1 or 0, found 2"}, } func (s *S) TestUnmarshalErrorItems(c *C) { for _, item := range unmarshalErrorItems { data := []byte(wrapInDoc(item.data)) var value interface{} switch reflect.ValueOf(item.obj).Kind() { case reflect.Map, reflect.Ptr: value = makeZeroDoc(item.obj) case reflect.Invalid: value = bson.M{} default: value = item.obj } err := bson.Unmarshal(data, value) c.Assert(err, ErrorMatches, item.error) } } type unmarshalRawErrorType struct { obj interface{} raw bson.Raw error string } var unmarshalRawErrorItems = []unmarshalRawErrorType{ // Tag name conflicts with existing parameter. {&structWithDupKeys{}, bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, {&struct{}{}, bson.Raw{0xEE, []byte{}}, "Unknown element kind \\(0xEE\\)"}, {struct{ Name bool }{}, bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, "Raw Unmarshal can't deal with struct values. Use a pointer."}, {123, bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, "Raw Unmarshal needs a map or a valid pointer."}, } func (s *S) TestUnmarshalRawErrorItems(c *C) { for i, item := range unmarshalRawErrorItems { err := item.raw.Unmarshal(item.obj) c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) } } var corruptedData = []string{ "\x04\x00\x00\x00\x00", // Document shorter than minimum "\x06\x00\x00\x00\x00", // Not enough data "\x05\x00\x00", // Broken length "\x05\x00\x00\x00\xff", // Corrupted termination "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string // Array end past end of string (s[2]=0x07 is correct) wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), // Array end within string, but past acceptable. wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), // Document end within string, but past acceptable. wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), // String with corrupted end. wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), // String with negative length (issue #116). "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00", // String with zero length (must include trailing '\x00') "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00", // Binary with negative length. "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00", } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { for _, data := range corruptedData { err := bson.Unmarshal([]byte(data), bson.M{}) c.Assert(err, ErrorMatches, "Document is corrupted") err = bson.Unmarshal([]byte(data), &struct{}{}) c.Assert(err, ErrorMatches, "Document is corrupted") } } // -------------------------------------------------------------------------- // Setter test cases. var setterResult = map[string]error{} type setterType struct { received interface{} } func (o *setterType) SetBSON(raw bson.Raw) error { err := raw.Unmarshal(&o.received) if err != nil { panic("The panic:" + err.Error()) } if s, ok := o.received.(string); ok { if result, ok := setterResult[s]; ok { return result } } return nil } type ptrSetterDoc struct { Field *setterType "_" } type valSetterDoc struct { Field setterType "_" } func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { for _, item := range allItems { for i := 0; i != 2; i++ { var field *setterType if i == 0 { obj := &ptrSetterDoc{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) c.Assert(err, IsNil) field = obj.Field } else { obj := &valSetterDoc{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) c.Assert(err, IsNil) field = &obj.Field } if item.data == "" { // Nothing to unmarshal. Should be untouched. if i == 0 { c.Assert(field, IsNil) } else { c.Assert(field.received, IsNil) } } else { expected := item.obj.(bson.M)["_"] c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) c.Assert(field.received, DeepEquals, expected) } } } } func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { obj := &setterType{} err := bson.Unmarshal([]byte(sampleItems[0].data), obj) c.Assert(err, IsNil) c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) } func (s *S) TestUnmarshalSetterOmits(c *C) { setterResult["2"] = &bson.TypeError{} setterResult["4"] = &bson.TypeError{} defer func() { delete(setterResult, "2") delete(setterResult, "4") }() m := map[string]*setterType{} data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + "\x02def\x00\x02\x00\x00\x002\x00" + "\x02ghi\x00\x02\x00\x00\x003\x00" + "\x02jkl\x00\x02\x00\x00\x004\x00") err := bson.Unmarshal([]byte(data), m) c.Assert(err, IsNil) c.Assert(m["abc"], NotNil) c.Assert(m["def"], IsNil) c.Assert(m["ghi"], NotNil) c.Assert(m["jkl"], IsNil) c.Assert(m["abc"].received, Equals, "1") c.Assert(m["ghi"].received, Equals, "3") } func (s *S) TestUnmarshalSetterErrors(c *C) { boom := errors.New("BOOM") setterResult["2"] = boom defer delete(setterResult, "2") m := map[string]*setterType{} data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + "\x02def\x00\x02\x00\x00\x002\x00" + "\x02ghi\x00\x02\x00\x00\x003\x00") err := bson.Unmarshal([]byte(data), m) c.Assert(err, Equals, boom) c.Assert(m["abc"], NotNil) c.Assert(m["def"], IsNil) c.Assert(m["ghi"], IsNil) c.Assert(m["abc"].received, Equals, "1") } func (s *S) TestDMap(c *C) { d := bson.D{{"a", 1}, {"b", 2}} c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) } func (s *S) TestUnmarshalSetterSetZero(c *C) { setterResult["foo"] = bson.SetZero defer delete(setterResult, "field") data, err := bson.Marshal(bson.M{"field": "foo"}) c.Assert(err, IsNil) m := map[string]*setterType{} err = bson.Unmarshal([]byte(data), m) c.Assert(err, IsNil) value, ok := m["field"] c.Assert(ok, Equals, true) c.Assert(value, IsNil) } // -------------------------------------------------------------------------- // Getter test cases. type typeWithGetter struct { result interface{} err error } func (t *typeWithGetter) GetBSON() (interface{}, error) { if t == nil { return "", nil } return t.result, t.err } type docWithGetterField struct { Field *typeWithGetter "_" } func (s *S) TestMarshalAllItemsWithGetter(c *C) { for i, item := range allItems { if item.data == "" { continue } obj := &docWithGetterField{} obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} data, err := bson.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item #%d", i)) } } func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { obj := &typeWithGetter{result: sampleItems[0].obj} data, err := bson.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, sampleItems[0].data) } func (s *S) TestGetterErrors(c *C) { e := errors.New("oops") obj1 := &docWithGetterField{} obj1.Field = &typeWithGetter{sampleItems[0].obj, e} data, err := bson.Marshal(obj1) c.Assert(err, ErrorMatches, "oops") c.Assert(data, IsNil) obj2 := &typeWithGetter{sampleItems[0].obj, e} data, err = bson.Marshal(obj2) c.Assert(err, ErrorMatches, "oops") c.Assert(data, IsNil) } type intGetter int64 func (t intGetter) GetBSON() (interface{}, error) { return int64(t), nil } type typeWithIntGetter struct { V intGetter ",minsize" } func (s *S) TestMarshalShortWithGetter(c *C) { obj := typeWithIntGetter{42} data, err := bson.Marshal(obj) c.Assert(err, IsNil) m := bson.M{} err = bson.Unmarshal(data, m) c.Assert(err, IsNil) c.Assert(m["v"], Equals, 42) } func (s *S) TestMarshalWithGetterNil(c *C) { obj := docWithGetterField{} data, err := bson.Marshal(obj) c.Assert(err, IsNil) m := bson.M{} err = bson.Unmarshal(data, m) c.Assert(err, IsNil) c.Assert(m, DeepEquals, bson.M{"_": ""}) } // -------------------------------------------------------------------------- // Cross-type conversion tests. type crossTypeItem struct { obj1 interface{} obj2 interface{} } type condStr struct { V string ",omitempty" } type condStrNS struct { V string `a:"A" bson:",omitempty" b:"B"` } type condBool struct { V bool ",omitempty" } type condInt struct { V int ",omitempty" } type condUInt struct { V uint ",omitempty" } type condFloat struct { V float64 ",omitempty" } type condIface struct { V interface{} ",omitempty" } type condPtr struct { V *bool ",omitempty" } type condSlice struct { V []string ",omitempty" } type condMap struct { V map[string]int ",omitempty" } type namedCondStr struct { V string "myv,omitempty" } type condTime struct { V time.Time ",omitempty" } type condStruct struct { V struct{ A []int } ",omitempty" } type condRaw struct { V bson.Raw ",omitempty" } type shortInt struct { V int64 ",minsize" } type shortUint struct { V uint64 ",minsize" } type shortIface struct { V interface{} ",minsize" } type shortPtr struct { V *int64 ",minsize" } type shortNonEmptyInt struct { V int64 ",minsize,omitempty" } type inlineInt struct { V struct{ A, B int } ",inline" } type inlineCantPtr struct { V *struct{ A, B int } ",inline" } type inlineDupName struct { A int V struct{ A, B int } ",inline" } type inlineMap struct { A int M map[string]interface{} ",inline" } type inlineMapInt struct { A int M map[string]int ",inline" } type inlineMapMyM struct { A int M MyM ",inline" } type inlineDupMap struct { M1 map[string]interface{} ",inline" M2 map[string]interface{} ",inline" } type inlineBadKeyMap struct { M map[int]int ",inline" } type inlineUnexported struct { M map[string]interface{} ",inline" unexported ",inline" } type unexported struct { A int } type getterSetterD bson.D func (s getterSetterD) GetBSON() (interface{}, error) { if len(s) == 0 { return bson.D{}, nil } return bson.D(s[:len(s)-1]), nil } func (s *getterSetterD) SetBSON(raw bson.Raw) error { var doc bson.D err := raw.Unmarshal(&doc) doc = append(doc, bson.DocElem{"suffix", true}) *s = getterSetterD(doc) return err } type getterSetterInt int func (i getterSetterInt) GetBSON() (interface{}, error) { return bson.D{{"a", int(i)}}, nil } func (i *getterSetterInt) SetBSON(raw bson.Raw) error { var doc struct{ A int } err := raw.Unmarshal(&doc) *i = getterSetterInt(doc.A) return err } type ifaceType interface { Hello() } type ifaceSlice []ifaceType func (s *ifaceSlice) SetBSON(raw bson.Raw) error { var ns []int if err := raw.Unmarshal(&ns); err != nil { return err } *s = make(ifaceSlice, ns[0]) return nil } func (s ifaceSlice) GetBSON() (interface{}, error) { return []int{len(s)}, nil } type ( MyString string MyBytes []byte MyBool bool MyD []bson.DocElem MyRawD []bson.RawDocElem MyM map[string]interface{} ) var ( truevar = true falsevar = false int64var = int64(42) int64ptr = &int64var intvar = int(42) intptr = &intvar gsintvar = getterSetterInt(42) ) func parseURL(s string) *url.URL { u, err := url.Parse(s) if err != nil { panic(err) } return u } // That's a pretty fun test. It will dump the first item, generate a zero // value equivalent to the second one, load the dumped data onto it, and then // verify that the resulting value is deep-equal to the untouched second value. // Then, it will do the same in the *opposite* direction! var twoWayCrossItems = []crossTypeItem{ // int<=>int {&struct{ I int }{42}, &struct{ I int8 }{42}}, {&struct{ I int }{42}, &struct{ I int32 }{42}}, {&struct{ I int }{42}, &struct{ I int64 }{42}}, {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, // uint<=>uint {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, // float32<=>float64 {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, // int<=>uint {&struct{ I uint }{42}, &struct{ I int }{42}}, {&struct{ I uint }{42}, &struct{ I int8 }{42}}, {&struct{ I uint }{42}, &struct{ I int32 }{42}}, {&struct{ I uint }{42}, &struct{ I int64 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int }{42}}, {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int }{42}}, {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int }{42}}, {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, // int <=> float {&struct{ I int }{42}, &struct{ I float64 }{42}}, // int <=> bool {&struct{ I int }{1}, &struct{ I bool }{true}}, {&struct{ I int }{0}, &struct{ I bool }{false}}, // uint <=> float64 {&struct{ I uint }{42}, &struct{ I float64 }{42}}, // uint <=> bool {&struct{ I uint }{1}, &struct{ I bool }{true}}, {&struct{ I uint }{0}, &struct{ I bool }{false}}, // float64 <=> bool {&struct{ I float64 }{1}, &struct{ I bool }{true}}, {&struct{ I float64 }{0}, &struct{ I bool }{false}}, // string <=> string and string <=> []byte {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, // map <=> struct {&struct { A struct { B, C int } }{struct{ B, C int }{1, 2}}, map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, {&struct{ A uint }{42}, map[string]int{"a": 42}}, {&struct{ A uint }{42}, map[string]float64{"a": 42}}, {&struct{ A uint }{1}, map[string]bool{"a": true}}, {&struct{ A int }{42}, map[string]uint{"a": 42}}, {&struct{ A int }{42}, map[string]float64{"a": 42}}, {&struct{ A int }{1}, map[string]bool{"a": true}}, {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, {&struct{ A float64 }{42}, map[string]int{"a": 42}}, {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, {&struct{ A float64 }{1}, map[string]bool{"a": true}}, {&struct{ A bool }{true}, map[string]int{"a": 1}}, {&struct{ A bool }{true}, map[string]uint{"a": 1}}, {&struct{ A bool }{true}, map[string]float64{"a": 1}}, {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, // url.URL <=> string {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, // Slices {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, // Conditionals {&condBool{true}, map[string]bool{"v": true}}, {&condBool{}, map[string]bool{}}, {&condInt{1}, map[string]int{"v": 1}}, {&condInt{}, map[string]int{}}, {&condUInt{1}, map[string]uint{"v": 1}}, {&condUInt{}, map[string]uint{}}, {&condFloat{}, map[string]int{}}, {&condStr{"yo"}, map[string]string{"v": "yo"}}, {&condStr{}, map[string]string{}}, {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, {&condStrNS{}, map[string]string{}}, {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, {&condSlice{}, map[string][]string{}}, {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, {&condMap{}, map[string][]string{}}, {&condIface{"yo"}, map[string]string{"v": "yo"}}, {&condIface{""}, map[string]string{"v": ""}}, {&condIface{}, map[string]string{}}, {&condPtr{&truevar}, map[string]bool{"v": true}}, {&condPtr{&falsevar}, map[string]bool{"v": false}}, {&condPtr{}, map[string]string{}}, {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, {&condTime{}, map[string]string{}}, {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, {&condStruct{struct{ A []int }{}}, bson.M{}}, {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}}, {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}}, {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, {&namedCondStr{}, map[string]string{}}, {&shortInt{1}, map[string]interface{}{"v": 1}}, {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortNonEmptyInt{}, map[string]interface{}{}}, {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, {&inlineUnexported{M: map[string]interface{}{"b": 1}, unexported: unexported{A: 2}}, map[string]interface{}{"b": 1, "a": 2}}, // []byte <=> Binary {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, // []byte <=> MyBytes {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, {&struct{ B MyBytes }{}, map[string]bool{}}, {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, // bool <=> MyBool {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, {&struct{ B MyBool }{}, map[string]bool{"b": false}}, {&struct{ B MyBool }{}, map[string]string{}}, {&struct{ B bool }{}, map[string]MyBool{"b": false}}, // arrays {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, {&struct{ V [2]byte }{[...]byte{1, 2}}, map[string][2]byte{"v": [2]byte{1, 2}}}, // zero time {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, // bson.D <=> []DocElem {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, // bson.RawD <=> []RawDocElem {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, // bson.M <=> map {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, // bson.M <=> map[MyString] {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, // json.Number <=> int64, float64 {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, // bson.D <=> non-struct getter/setter {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, {&bson.D{{"a", 42}}, &gsintvar}, // Interface slice setter. {&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}}, } // Same thing, but only one way (obj1 => obj2). var oneWayCrossItems = []crossTypeItem{ // map <=> struct {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, // inline map elides badly typed values {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, // Can't decode int into struct. {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, // Would get decoded into a int32 too in the opposite direction. {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, // Ensure omitempty on struct with private fields works properly. {&struct { V struct{ v time.Time } ",omitempty" }{}, map[string]interface{}{}}, // Attempt to marshal slice into RawD (issue #120). {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { c.Logf("Dump: %#v", dump) c.Logf("Load: %#v", load) zero := makeZeroDoc(load) data, err := bson.Marshal(dump) c.Assert(err, IsNil) c.Logf("Dumped: %#v", string(data)) err = bson.Unmarshal(data, zero) c.Assert(err, IsNil) c.Logf("Loaded: %#v", zero) c.Assert(zero, DeepEquals, load) } func (s *S) TestTwoWayCrossPairs(c *C) { for _, item := range twoWayCrossItems { testCrossPair(c, item.obj1, item.obj2) testCrossPair(c, item.obj2, item.obj1) } } func (s *S) TestOneWayCrossPairs(c *C) { for _, item := range oneWayCrossItems { testCrossPair(c, item.obj1, item.obj2) } } // -------------------------------------------------------------------------- // ObjectId hex representation test. func (s *S) TestObjectIdHex(c *C) { id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") } func (s *S) TestIsObjectIdHex(c *C) { test := []struct { id string valid bool }{ {"4d88e15b60f486e428412dc9", true}, {"4d88e15b60f486e428412dc", false}, {"4d88e15b60f486e428412dc9e", false}, {"4d88e15b60f486e428412dcx", false}, } for _, t := range test { c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) } } // -------------------------------------------------------------------------- // ObjectId parts extraction tests. type objectIdParts struct { id bson.ObjectId timestamp int64 machine []byte pid uint16 counter int32 } var objectIds = []objectIdParts{ objectIdParts{ bson.ObjectIdHex("4d88e15b60f486e428412dc9"), 1300816219, []byte{0x60, 0xf4, 0x86}, 0xe428, 4271561, }, objectIdParts{ bson.ObjectIdHex("000000000000000000000000"), 0, []byte{0x00, 0x00, 0x00}, 0x0000, 0, }, objectIdParts{ bson.ObjectIdHex("00000000aabbccddee000001"), 0, []byte{0xaa, 0xbb, 0xcc}, 0xddee, 1, }, } func (s *S) TestObjectIdPartsExtraction(c *C) { for i, v := range objectIds { t := time.Unix(v.timestamp, 0) c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) } } func (s *S) TestNow(c *C) { before := time.Now() time.Sleep(1e6) now := bson.Now() time.Sleep(1e6) after := time.Now() c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) } // -------------------------------------------------------------------------- // ObjectId generation tests. func (s *S) TestNewObjectId(c *C) { // Generate 10 ids ids := make([]bson.ObjectId, 10) for i := 0; i < 10; i++ { ids[i] = bson.NewObjectId() } for i := 1; i < 10; i++ { prevId := ids[i-1] id := ids[i] // Test for uniqueness among all other 9 generated ids for j, tid := range ids { if j != i { c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) } } // Check that timestamp was incremented and is within 30 seconds of the previous one secs := id.Time().Sub(prevId.Time()).Seconds() c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) // Check that machine ids are the same c.Assert(id.Machine(), DeepEquals, prevId.Machine()) // Check that pids are the same c.Assert(id.Pid(), Equals, prevId.Pid()) // Test for proper increment delta := int(id.Counter() - prevId.Counter()) c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) } } func (s *S) TestNewObjectIdWithTime(c *C) { t := time.Unix(12345678, 0) id := bson.NewObjectIdWithTime(t) c.Assert(id.Time(), Equals, t) c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) c.Assert(int(id.Pid()), Equals, 0) c.Assert(int(id.Counter()), Equals, 0) } // -------------------------------------------------------------------------- // ObjectId JSON marshalling. type jsonType struct { Id bson.ObjectId } var jsonIdTests = []struct { value jsonType json string marshal bool unmarshal bool error string }{{ value: jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, json: `{"Id":"4d88e15b60f486e428412dc9"}`, marshal: true, unmarshal: true, }, { value: jsonType{}, json: `{"Id":""}`, marshal: true, unmarshal: true, }, { value: jsonType{}, json: `{"Id":null}`, marshal: false, unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dc9A"}`, error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, marshal: false, unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dcZ"}`, error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, marshal: false, unmarshal: true, }} func (s *S) TestObjectIdJSONMarshaling(c *C) { for _, test := range jsonIdTests { if test.marshal { data, err := json.Marshal(&test.value) if test.error == "" { c.Assert(err, IsNil) c.Assert(string(data), Equals, test.json) } else { c.Assert(err, ErrorMatches, test.error) } } if test.unmarshal { var value jsonType err := json.Unmarshal([]byte(test.json), &value) if test.error == "" { c.Assert(err, IsNil) c.Assert(value, DeepEquals, test.value) } else { c.Assert(err, ErrorMatches, test.error) } } } } // -------------------------------------------------------------------------- // Spec tests type specTest struct { Description string Documents []struct { Decoded map[string]interface{} Encoded string DecodeOnly bool `yaml:"decodeOnly"` Error interface{} } } func (s *S) TestSpecTests(c *C) { for _, data := range specTests { var test specTest err := yaml.Unmarshal([]byte(data), &test) c.Assert(err, IsNil) c.Logf("Running spec test set %q", test.Description) for _, doc := range test.Documents { if doc.Error != nil { continue } c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded) var decoded map[string]interface{} encoded, err := hex.DecodeString(doc.Encoded) c.Assert(err, IsNil) err = bson.Unmarshal(encoded, &decoded) c.Assert(err, IsNil) c.Assert(decoded, DeepEquals, doc.Decoded) } for _, doc := range test.Documents { if doc.DecodeOnly || doc.Error != nil { continue } c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded) encoded, err := bson.Marshal(doc.Decoded) c.Assert(err, IsNil) c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded) } for _, doc := range test.Documents { if doc.Error == nil { continue } c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error) var decoded map[string]interface{} encoded, err := hex.DecodeString(doc.Encoded) c.Assert(err, IsNil) err = bson.Unmarshal(encoded, &decoded) c.Assert(err, NotNil) c.Logf("Failed with: %v", err) } } } // -------------------------------------------------------------------------- // ObjectId Text encoding.TextUnmarshaler. var textIdTests = []struct { value bson.ObjectId text string marshal bool unmarshal bool error string }{{ value: bson.ObjectIdHex("4d88e15b60f486e428412dc9"), text: "4d88e15b60f486e428412dc9", marshal: true, unmarshal: true, }, { text: "", marshal: true, unmarshal: true, }, { text: "4d88e15b60f486e428412dc9A", marshal: false, unmarshal: true, error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`, }, { text: "4d88e15b60f486e428412dcZ", marshal: false, unmarshal: true, error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`, }} func (s *S) TestObjectIdTextMarshaling(c *C) { for _, test := range textIdTests { if test.marshal { data, err := test.value.MarshalText() if test.error == "" { c.Assert(err, IsNil) c.Assert(string(data), Equals, test.text) } else { c.Assert(err, ErrorMatches, test.error) } } if test.unmarshal { err := test.value.UnmarshalText([]byte(test.text)) if test.error == "" { c.Assert(err, IsNil) if test.value != "" { value := bson.ObjectIdHex(test.text) c.Assert(value, DeepEquals, test.value) } } else { c.Assert(err, ErrorMatches, test.error) } } } } // -------------------------------------------------------------------------- // ObjectId XML marshalling. type xmlType struct { Id bson.ObjectId } var xmlIdTests = []struct { value xmlType xml string marshal bool unmarshal bool error string }{{ value: xmlType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, xml: "4d88e15b60f486e428412dc9", marshal: true, unmarshal: true, }, { value: xmlType{}, xml: "", marshal: true, unmarshal: true, }, { xml: "4d88e15b60f486e428412dc9A", marshal: false, unmarshal: true, error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`, }, { xml: "4d88e15b60f486e428412dcZ", marshal: false, unmarshal: true, error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`, }} func (s *S) TestObjectIdXMLMarshaling(c *C) { for _, test := range xmlIdTests { if test.marshal { data, err := xml.Marshal(&test.value) if test.error == "" { c.Assert(err, IsNil) c.Assert(string(data), Equals, test.xml) } else { c.Assert(err, ErrorMatches, test.error) } } if test.unmarshal { var value xmlType err := xml.Unmarshal([]byte(test.xml), &value) if test.error == "" { c.Assert(err, IsNil) c.Assert(value, DeepEquals, test.value) } else { c.Assert(err, ErrorMatches, test.error) } } } } // -------------------------------------------------------------------------- // Some simple benchmarks. type BenchT struct { A, B, C, D, E, F string } type BenchRawT struct { A string B int C bson.M D []float64 } func (s *S) BenchmarkUnmarhsalStruct(c *C) { v := BenchT{A: "A", D: "D", E: "E"} data, err := bson.Marshal(&v) if err != nil { panic(err) } c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &v) } if err != nil { panic(err) } } func (s *S) BenchmarkUnmarhsalMap(c *C) { m := bson.M{"a": "a", "d": "d", "e": "e"} data, err := bson.Marshal(&m) if err != nil { panic(err) } c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &m) } if err != nil { panic(err) } } func (s *S) BenchmarkUnmarshalRaw(c *C) { var err error m := BenchRawT{ A: "test_string", B: 123, C: bson.M{ "subdoc_int": 12312, "subdoc_doc": bson.M{"1": 1}, }, D: []float64{0.0, 1.3333, -99.9997, 3.1415}, } data, err := bson.Marshal(&m) if err != nil { panic(err) } raw := bson.Raw{} c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &raw) } if err != nil { panic(err) } } func (s *S) BenchmarkNewObjectId(c *C) { for i := 0; i < c.N; i++ { bson.NewObjectId() } } mgo-r2016.08.01/bson/decimal.go000066400000000000000000000154761274772341400157730ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package bson import ( "fmt" "strconv" "strings" ) // Decimal128 holds decimal128 BSON values. type Decimal128 struct { h, l uint64 } func (d Decimal128) String() string { var pos int // positive sign var e int // exponent var h, l uint64 // significand high/low if d.h>>63&1 == 0 { pos = 1 } switch d.h >> 58 & (1<<5 - 1) { case 0x1F: return "NaN" case 0x1E: return "-Inf"[pos:] } l = d.l if d.h>>61&3 == 3 { // Bits: 1*sign 2*ignored 14*exponent 111*significand. // Implicit 0b100 prefix in significand. e = int(d.h>>47&(1<<14-1)) - 6176 //h = 4<<47 | d.h&(1<<47-1) // Spec says all of these values are out of range. h, l = 0, 0 } else { // Bits: 1*sign 14*exponent 113*significand e = int(d.h>>49&(1<<14-1)) - 6176 h = d.h & (1<<49 - 1) } // Would be handled by the logic below, but that's trivial and common. if h == 0 && l == 0 && e == 0 { return "-0"[pos:] } var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. var last = len(repr) var i = len(repr) var dot = len(repr) + e var rem uint32 Loop: for d9 := 0; d9 < 5; d9++ { h, l, rem = divmod(h, l, 1e9) for d1 := 0; d1 < 9; d1++ { // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) { e += len(repr) - i i-- repr[i] = '.' last = i - 1 dot = len(repr) // Unmark. } c := '0' + byte(rem%10) rem /= 10 i-- repr[i] = c // Handle "0E+3", "1E+3", etc. if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) { last = i break Loop } if c != '0' { last = i } // Break early. Works without it, but why. if dot > i && l == 0 && h == 0 && rem == 0 { break Loop } } } repr[last-1] = '-' last-- if e > 0 { return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) } if e < 0 { return string(repr[last+pos:]) + "E" + strconv.Itoa(e) } return string(repr[last+pos:]) } func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { div64 := uint64(div) a := h >> 32 aq := a / div64 ar := a % div64 b := ar<<32 + h&(1<<32-1) bq := b / div64 br := b % div64 c := br<<32 + l>>32 cq := c / div64 cr := c % div64 d := cr<<32 + l&(1<<32-1) dq := d / div64 dr := d % div64 return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) } var dNaN = Decimal128{0x1F << 58, 0} var dPosInf = Decimal128{0x1E << 58, 0} var dNegInf = Decimal128{0x3E << 58, 0} func dErr(s string) (Decimal128, error) { return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) } func ParseDecimal128(s string) (Decimal128, error) { orig := s if s == "" { return dErr(orig) } neg := s[0] == '-' if neg || s[0] == '+' { s = s[1:] } if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { return dNaN, nil } if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { if neg { return dNegInf, nil } return dPosInf, nil } return dErr(orig) } var h, l uint64 var e int var add, ovr uint32 var mul uint32 = 1 var dot = -1 var digits = 0 var i = 0 for i < len(s) { c := s[i] if mul == 1e9 { h, l, ovr = muladd(h, l, mul, add) mul, add = 1, 0 if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } if c >= '0' && c <= '9' { i++ if c > '0' || digits > 0 { digits++ } if digits > 34 { if c == '0' { // Exact rounding. e++ continue } return dErr(orig) } mul *= 10 add *= 10 add += uint32(c - '0') continue } if c == '.' { i++ if dot >= 0 || i == 1 && len(s) == 1 { return dErr(orig) } if i == len(s) { break } if s[i] < '0' || s[i] > '9' || e > 0 { return dErr(orig) } dot = i continue } break } if i == 0 { return dErr(orig) } if mul > 1 { h, l, ovr = muladd(h, l, mul, add) if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } if dot >= 0 { e += dot - i } if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { i++ eneg := s[i] == '-' if eneg || s[i] == '+' { i++ if i == len(s) { return dErr(orig) } } n := 0 for i < len(s) && n < 1e4 { c := s[i] i++ if c < '0' || c > '9' { return dErr(orig) } n *= 10 n += int(c - '0') } if eneg { n = -n } e += n for e < -6176 { // Subnormal. var div uint32 = 1 for div < 1e9 && e < -6176 { div *= 10 e++ } var rem uint32 h, l, rem = divmod(h, l, div) if rem > 0 { return dErr(orig) } } for e > 6111 { // Clamped. var mul uint32 = 1 for mul < 1e9 && e > 6111 { mul *= 10 e-- } h, l, ovr = muladd(h, l, mul, 0) if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } if e < -6176 || e > 6111 { return dErr(orig) } } if i < len(s) { return dErr(orig) } h |= uint64(e+6176) & uint64(1<<14-1) << 49 if neg { h |= 1 << 63 } return Decimal128{h, l}, nil } func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { mul64 := uint64(mul) a := mul64 * (l & (1<<32 - 1)) b := a>>32 + mul64*(l>>32) c := b>>32 + mul64*(h&(1<<32-1)) d := c>>32 + mul64*(h>>32) a = a&(1<<32-1) + uint64(add) b = b&(1<<32-1) + a>>32 c = c&(1<<32-1) + b>>32 d = d&(1<<32-1) + c>>32 return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) } mgo-r2016.08.01/bson/decimal_test.go000066400000000000000000005556061274772341400170360ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package bson_test import ( "encoding/hex" "encoding/json" "fmt" "regexp" "strings" "gopkg.in/mgo.v2/bson" . "gopkg.in/check.v1" ) // -------------------------------------------------------------------------- // Decimal tests type decimalTests struct { Valid []struct { Description string `json:"description"` BSON string `json:"bson"` CanonicalBSON string `json:"canonical_bson"` ExtJSON string `json:"extjson"` CanonicalExtJSON string `json:"canonical_extjson"` Lossy bool `json:"lossy"` } `json:"valid"` ParseErrors []struct { Description string `json:"description"` String string `json:"string"` } `json:"parseErrors"` } func extJSONRepr(s string) string { var value struct { D struct { Repr string `json:"$numberDecimal"` } `json:"d"` } err := json.Unmarshal([]byte(s), &value) if err != nil { panic(err) } return value.D.Repr } func (s *S) TestDecimalTests(c *C) { // These also conform to the spec and are used by Go elsewhere. // (e.g. math/big won't parse "Infinity"). goStr := func(s string) string { switch s { case "Infinity": return "Inf" case "-Infinity": return "-Inf" } return s } for _, testEntry := range decimalTestsJSON { testFile := testEntry.file var tests decimalTests err := json.Unmarshal([]byte(testEntry.json), &tests) c.Assert(err, IsNil) for _, test := range tests.Valid { c.Logf("Running %s test: %s", testFile, test.Description) test.BSON = strings.ToLower(test.BSON) // Unmarshal value from BSON data. bsonData, err := hex.DecodeString(test.BSON) var bsonValue struct{ D interface{} } err = bson.Unmarshal(bsonData, &bsonValue) c.Assert(err, IsNil) dec128, ok := bsonValue.D.(bson.Decimal128) c.Assert(ok, Equals, true) // Extract ExtJSON representations (canonical and not). extjRepr := extJSONRepr(test.ExtJSON) cextjRepr := extjRepr if test.CanonicalExtJSON != "" { cextjRepr = extJSONRepr(test.CanonicalExtJSON) } wantRepr := goStr(cextjRepr) // Generate canonical representation. c.Assert(dec128.String(), Equals, wantRepr) // Parse original canonical representation. parsed, err := bson.ParseDecimal128(cextjRepr) c.Assert(err, IsNil) c.Assert(parsed.String(), Equals, wantRepr) // Parse non-canonical representation. parsed, err = bson.ParseDecimal128(extjRepr) c.Assert(err, IsNil) c.Assert(parsed.String(), Equals, wantRepr) // Parse Go canonical representation (Inf vs. Infinity). parsed, err = bson.ParseDecimal128(wantRepr) c.Assert(err, IsNil) c.Assert(parsed.String(), Equals, wantRepr) // Marshal original value back into BSON data. data, err := bson.Marshal(bsonValue) c.Assert(err, IsNil) c.Assert(hex.EncodeToString(data), Equals, test.BSON) if test.Lossy { continue } // Marshal the parsed canonical representation. var parsedValue struct{ D interface{} } parsedValue.D = parsed data, err = bson.Marshal(parsedValue) c.Assert(err, IsNil) c.Assert(hex.EncodeToString(data), Equals, test.BSON) } for _, test := range tests.ParseErrors { c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String) _, err := bson.ParseDecimal128(test.String) quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String)) c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`) } } } const decBenchNum = "9.999999999999999999999999999999999E+6144" func (s *S) BenchmarkDecimal128String(c *C) { d, err := bson.ParseDecimal128(decBenchNum) c.Assert(err, IsNil) c.Assert(d.String(), Equals, decBenchNum) c.ResetTimer() for i := 0; i < c.N; i++ { d.String() } } func (s *S) BenchmarkDecimal128Parse(c *C) { var err error c.ResetTimer() for i := 0; i < c.N; i++ { _, err = bson.ParseDecimal128(decBenchNum) } if err != nil { panic(err) } } var decimalTestsJSON = []struct{ file, json string }{ {"decimal128-1.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "valid": [ { "description": "Special - Canonical NaN", "bson": "180000001364000000000000000000000000000000007C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - Negative NaN", "bson": "18000000136400000000000000000000000000000000FC00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", "lossy": true }, { "description": "Special - Negative NaN", "bson": "18000000136400000000000000000000000000000000FC00", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}", "lossy": true }, { "description": "Special - Canonical SNaN", "bson": "180000001364000000000000000000000000000000007E00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", "lossy": true }, { "description": "Special - Negative SNaN", "bson": "18000000136400000000000000000000000000000000FE00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", "lossy": true }, { "description": "Special - NaN with a payload", "bson": "180000001364001200000000000000000000000000007E00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", "lossy": true }, { "description": "Special - Canonical Positive Infinity", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Special - Canonical Negative Infinity", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Special - Invalid representation treated as 0", "bson": "180000001364000000000000000000000000000000106C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}", "lossy": true }, { "description": "Special - Invalid representation treated as -0", "bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}", "lossy": true }, { "description": "Special - Invalid representation treated as 0E3", "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}", "lossy": true }, { "description": "Regular - Adjusted Exponent Limit", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", "extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}" }, { "description": "Regular - Smallest", "bson": "18000000136400D204000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}" }, { "description": "Regular - Smallest with Trailing Zeros", "bson": "1800000013640040EF5A07000000000000000000002A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}" }, { "description": "Regular - 0.1", "bson": "1800000013640001000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}" }, { "description": "Regular - 0.1234567890123456789012345678901234", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}" }, { "description": "Regular - 0", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "Regular - -0", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "Regular - -0.0", "bson": "1800000013640000000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" }, { "description": "Regular - 2", "bson": "180000001364000200000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}" }, { "description": "Regular - 2.000", "bson": "18000000136400D0070000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}" }, { "description": "Regular - Largest", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" }, { "description": "Scientific - Tiniest", "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}" }, { "description": "Scientific - Tiny", "bson": "180000001364000100000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "Scientific - Negative Tiny", "bson": "180000001364000100000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" }, { "description": "Scientific - Adjusted Exponent Limit", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", "extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}" }, { "description": "Scientific - Fractional", "bson": "1800000013640064000000000000000000000000002CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" }, { "description": "Scientific - 0 with Exponent", "bson": "180000001364000000000000000000000000000000205F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}" }, { "description": "Scientific - 0 with Negative Exponent", "bson": "1800000013640000000000000000000000000000007A2B00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}" }, { "description": "Scientific - No Decimal with Signed Exponent", "bson": "180000001364000100000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" }, { "description": "Scientific - Trailing Zero", "bson": "180000001364001A04000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}" }, { "description": "Scientific - With Decimal", "bson": "180000001364006900000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}" }, { "description": "Scientific - Full", "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}" }, { "description": "Scientific - Large", "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" }, { "description": "Scientific - Largest", "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" }, { "description": "Non-Canonical Parsing - Exponent Normalization", "bson": "1800000013640064000000000000000000000000002CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" }, { "description": "Non-Canonical Parsing - Unsigned Positive Exponent", "bson": "180000001364000100000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" }, { "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", "bson": "180000001364000100000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" }, { "description": "Non-Canonical Parsing - Long Significand with Exponent", "bson": "1800000013640079D9E0F9763ADA429D0200000000583000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}" }, { "description": "Non-Canonical Parsing - Positive Sign", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" }, { "description": "Non-Canonical Parsing - Long Decimal String", "bson": "180000001364000100000000000000000000000000722800", "extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}" }, { "description": "Non-Canonical Parsing - nan", "bson": "180000001364000000000000000000000000000000007C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Non-Canonical Parsing - nAn", "bson": "180000001364000000000000000000000000000000007C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Non-Canonical Parsing - +infinity", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Non-Canonical Parsing - infinity", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Non-Canonical Parsing - infiniTY", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Non-Canonical Parsing - inf", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Non-Canonical Parsing - inF", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Non-Canonical Parsing - -infinity", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Non-Canonical Parsing - -infiniTy", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Non-Canonical Parsing - -Inf", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Non-Canonical Parsing - -inf", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Non-Canonical Parsing - -inF", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Rounded Subnormal number", "bson": "180000001364000100000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "Clamped", "bson": "180000001364000a00000000000000000000000000fe5f00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" }, { "description": "Exact rounding", "bson": "18000000136400000000000a5bc138938d44c64d31cc3700", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" } ] } `}, {"decimal128-2.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "valid": [ { "description": "[decq021] Normality", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}" }, { "description": "[decq823] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400010000800000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}" }, { "description": "[decq822] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400000000800000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}" }, { "description": "[decq821] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FFFFFF7F0000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}" }, { "description": "[decq820] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FEFFFF7F0000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}" }, { "description": "[decq152] fold-downs (more below)", "bson": "18000000136400393000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}" }, { "description": "[decq154] fold-downs (more below)", "bson": "18000000136400D20400000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}" }, { "description": "[decq006] derivative canonical plain strings", "bson": "18000000136400EE0200000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}" }, { "description": "[decq164] fold-downs (more below)", "bson": "1800000013640039300000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}" }, { "description": "[decq156] fold-downs (more below)", "bson": "180000001364007B0000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}" }, { "description": "[decq008] derivative canonical plain strings", "bson": "18000000136400EE020000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}" }, { "description": "[decq158] fold-downs (more below)", "bson": "180000001364000C0000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}" }, { "description": "[decq122] Nmax and similar", "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}" }, { "description": "[decq002] (mostly derived from the Strawman 4 document and examples)", "bson": "18000000136400EE020000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}" }, { "description": "[decq004] derivative canonical plain strings", "bson": "18000000136400EE0200000000000000000000000042B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}" }, { "description": "[decq018] derivative canonical plain strings", "bson": "18000000136400EE020000000000000000000000002EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}" }, { "description": "[decq125] Nmax and similar", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}" }, { "description": "[decq131] fold-downs (more below)", "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" }, { "description": "[decq162] fold-downs (more below)", "bson": "180000001364007B000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}" }, { "description": "[decq176] Nmin and below", "bson": "18000000136400010000000A5BC138938D44C64D31008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}" }, { "description": "[decq174] Nmin and below", "bson": "18000000136400000000000A5BC138938D44C64D31008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}" }, { "description": "[decq133] fold-downs (more below)", "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" }, { "description": "[decq160] fold-downs (more below)", "bson": "18000000136400010000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" }, { "description": "[decq172] Nmin and below", "bson": "180000001364000100000000000000000000000000428000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}" }, { "description": "[decq010] derivative canonical plain strings", "bson": "18000000136400EE020000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}" }, { "description": "[decq012] derivative canonical plain strings", "bson": "18000000136400EE0200000000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}" }, { "description": "[decq014] derivative canonical plain strings", "bson": "18000000136400EE0200000000000000000000000034B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}" }, { "description": "[decq016] derivative canonical plain strings", "bson": "18000000136400EE0200000000000000000000000030B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}" }, { "description": "[decq404] zeros", "bson": "180000001364000000000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" }, { "description": "[decq424] negative zeros", "bson": "180000001364000000000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" }, { "description": "[decq407] zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[decq427] negative zeros", "bson": "1800000013640000000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" }, { "description": "[decq409] zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[decq428] negative zeros", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "[decq700] Selected DPD codes", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[decq406] zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[decq426] negative zeros", "bson": "1800000013640000000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" }, { "description": "[decq410] zeros", "bson": "180000001364000000000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" }, { "description": "[decq431] negative zeros", "bson": "18000000136400000000000000000000000000000046B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}" }, { "description": "[decq419] clamped zeros...", "bson": "180000001364000000000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" }, { "description": "[decq432] negative zeros", "bson": "180000001364000000000000000000000000000000FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" }, { "description": "[decq405] zeros", "bson": "180000001364000000000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" }, { "description": "[decq425] negative zeros", "bson": "180000001364000000000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" }, { "description": "[decq508] Specials", "bson": "180000001364000000000000000000000000000000007800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "[decq528] Specials", "bson": "18000000136400000000000000000000000000000000F800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "[decq541] Specials", "bson": "180000001364000000000000000000000000000000007C00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "[decq074] Nmin and below", "bson": "18000000136400000000000A5BC138938D44C64D31000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}" }, { "description": "[decq602] fold-down full sequence", "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" }, { "description": "[decq604] fold-down full sequence", "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" }, { "description": "[decq606] fold-down full sequence", "bson": "1800000013640000000080264B91C02220BE377E00FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" }, { "description": "[decq608] fold-down full sequence", "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" }, { "description": "[decq610] fold-down full sequence", "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" }, { "description": "[decq612] fold-down full sequence", "bson": "18000000136400000000106102253E5ECE4F200000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" }, { "description": "[decq614] fold-down full sequence", "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" }, { "description": "[decq616] fold-down full sequence", "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" }, { "description": "[decq618] fold-down full sequence", "bson": "180000001364000000004A48011416954508000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" }, { "description": "[decq620] fold-down full sequence", "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" }, { "description": "[decq622] fold-down full sequence", "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" }, { "description": "[decq624] fold-down full sequence", "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" }, { "description": "[decq626] fold-down full sequence", "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" }, { "description": "[decq628] fold-down full sequence", "bson": "18000000136400000010632D5EC76B050000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" }, { "description": "[decq630] fold-down full sequence", "bson": "180000001364000000E8890423C78A000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" }, { "description": "[decq632] fold-down full sequence", "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" }, { "description": "[decq634] fold-down full sequence", "bson": "1800000013640000008A5D78456301000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" }, { "description": "[decq636] fold-down full sequence", "bson": "180000001364000000C16FF2862300000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" }, { "description": "[decq638] fold-down full sequence", "bson": "180000001364000080C6A47E8D0300000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" }, { "description": "[decq640] fold-down full sequence", "bson": "1800000013640000407A10F35A0000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" }, { "description": "[decq642] fold-down full sequence", "bson": "1800000013640000A0724E18090000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" }, { "description": "[decq644] fold-down full sequence", "bson": "180000001364000010A5D4E8000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" }, { "description": "[decq646] fold-down full sequence", "bson": "1800000013640000E8764817000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" }, { "description": "[decq648] fold-down full sequence", "bson": "1800000013640000E40B5402000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" }, { "description": "[decq650] fold-down full sequence", "bson": "1800000013640000CA9A3B00000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" }, { "description": "[decq652] fold-down full sequence", "bson": "1800000013640000E1F50500000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" }, { "description": "[decq654] fold-down full sequence", "bson": "180000001364008096980000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" }, { "description": "[decq656] fold-down full sequence", "bson": "1800000013640040420F0000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" }, { "description": "[decq658] fold-down full sequence", "bson": "18000000136400A086010000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" }, { "description": "[decq660] fold-down full sequence", "bson": "180000001364001027000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" }, { "description": "[decq662] fold-down full sequence", "bson": "18000000136400E803000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" }, { "description": "[decq664] fold-down full sequence", "bson": "180000001364006400000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" }, { "description": "[decq666] fold-down full sequence", "bson": "180000001364000A00000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" }, { "description": "[decq060] fold-downs (more below)", "bson": "180000001364000100000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" }, { "description": "[decq670] fold-down full sequence", "bson": "180000001364000100000000000000000000000000FC5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}" }, { "description": "[decq668] fold-down full sequence", "bson": "180000001364000100000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}" }, { "description": "[decq072] Nmin and below", "bson": "180000001364000100000000000000000000000000420000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}" }, { "description": "[decq076] Nmin and below", "bson": "18000000136400010000000A5BC138938D44C64D31000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}" }, { "description": "[decq036] fold-downs (more below)", "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" }, { "description": "[decq062] fold-downs (more below)", "bson": "180000001364007B000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}" }, { "description": "[decq034] Nmax and similar", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}" }, { "description": "[decq441] exponent lengths", "bson": "180000001364000700000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" }, { "description": "[decq449] exponent lengths", "bson": "1800000013640007000000000000000000000000001E5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}" }, { "description": "[decq447] exponent lengths", "bson": "1800000013640007000000000000000000000000000E3800", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}" }, { "description": "[decq445] exponent lengths", "bson": "180000001364000700000000000000000000000000063100", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}" }, { "description": "[decq443] exponent lengths", "bson": "180000001364000700000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" }, { "description": "[decq842] VG testcase", "bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}" }, { "description": "[decq841] VG testcase", "bson": "180000001364000000203B9DB5056F000000000000002400", "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}" }, { "description": "[decq840] VG testcase", "bson": "180000001364003C17258419D710C42F0000000000002400", "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}" }, { "description": "[decq701] Selected DPD codes", "bson": "180000001364000900000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}" }, { "description": "[decq032] Nmax and similar", "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" }, { "description": "[decq702] Selected DPD codes", "bson": "180000001364000A00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" }, { "description": "[decq057] fold-downs (more below)", "bson": "180000001364000C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" }, { "description": "[decq703] Selected DPD codes", "bson": "180000001364001300000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}" }, { "description": "[decq704] Selected DPD codes", "bson": "180000001364001400000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}" }, { "description": "[decq705] Selected DPD codes", "bson": "180000001364001D00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}" }, { "description": "[decq706] Selected DPD codes", "bson": "180000001364001E00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}" }, { "description": "[decq707] Selected DPD codes", "bson": "180000001364002700000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}" }, { "description": "[decq708] Selected DPD codes", "bson": "180000001364002800000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}" }, { "description": "[decq709] Selected DPD codes", "bson": "180000001364003100000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}" }, { "description": "[decq710] Selected DPD codes", "bson": "180000001364003200000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}" }, { "description": "[decq711] Selected DPD codes", "bson": "180000001364003B00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}" }, { "description": "[decq712] Selected DPD codes", "bson": "180000001364003C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}" }, { "description": "[decq713] Selected DPD codes", "bson": "180000001364004500000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}" }, { "description": "[decq714] Selected DPD codes", "bson": "180000001364004600000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}" }, { "description": "[decq715] Selected DPD codes", "bson": "180000001364004700000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}" }, { "description": "[decq716] Selected DPD codes", "bson": "180000001364004800000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}" }, { "description": "[decq717] Selected DPD codes", "bson": "180000001364004900000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}" }, { "description": "[decq718] Selected DPD codes", "bson": "180000001364004A00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}" }, { "description": "[decq719] Selected DPD codes", "bson": "180000001364004B00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}" }, { "description": "[decq720] Selected DPD codes", "bson": "180000001364004C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}" }, { "description": "[decq721] Selected DPD codes", "bson": "180000001364004D00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}" }, { "description": "[decq722] Selected DPD codes", "bson": "180000001364004E00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}" }, { "description": "[decq723] Selected DPD codes", "bson": "180000001364004F00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}" }, { "description": "[decq056] fold-downs (more below)", "bson": "180000001364007B00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}" }, { "description": "[decq064] fold-downs (more below)", "bson": "1800000013640039300000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}" }, { "description": "[decq732] Selected DPD codes", "bson": "180000001364000802000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}" }, { "description": "[decq733] Selected DPD codes", "bson": "180000001364000902000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}" }, { "description": "[decq740] DPD: one of each of the huffman groups", "bson": "180000001364000903000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}" }, { "description": "[decq741] DPD: one of each of the huffman groups", "bson": "180000001364000A03000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}" }, { "description": "[decq742] DPD: one of each of the huffman groups", "bson": "180000001364001303000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}" }, { "description": "[decq746] DPD: one of each of the huffman groups", "bson": "180000001364001F03000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}" }, { "description": "[decq743] DPD: one of each of the huffman groups", "bson": "180000001364006D03000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}" }, { "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)", "bson": "180000001364007803000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}" }, { "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)", "bson": "180000001364007903000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}" }, { "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)", "bson": "180000001364008203000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}" }, { "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)", "bson": "180000001364008303000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}" }, { "description": "[decq745] DPD: one of each of the huffman groups", "bson": "18000000136400D303000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}" }, { "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)", "bson": "18000000136400DC03000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}" }, { "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)", "bson": "18000000136400DD03000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}" }, { "description": "[decq730] Selected DPD codes", "bson": "18000000136400E203000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}" }, { "description": "[decq731] Selected DPD codes", "bson": "18000000136400E303000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}" }, { "description": "[decq744] DPD: one of each of the huffman groups", "bson": "18000000136400E503000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}" }, { "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)", "bson": "18000000136400E603000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}" }, { "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)", "bson": "18000000136400E703000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}" }, { "description": "[decq053] fold-downs (more below)", "bson": "18000000136400D204000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}" }, { "description": "[decq052] fold-downs (more below)", "bson": "180000001364003930000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}" }, { "description": "[decq792] Miscellaneous (testers' queries, etc.)", "bson": "180000001364003075000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}" }, { "description": "[decq793] Miscellaneous (testers' queries, etc.)", "bson": "1800000013640090940D0000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}" }, { "description": "[decq824] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FEFFFF7F00000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}" }, { "description": "[decq825] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FFFFFF7F00000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}" }, { "description": "[decq826] values around [u]int32 edges (zeros done earlier)", "bson": "180000001364000000008000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}" }, { "description": "[decq827] values around [u]int32 edges (zeros done earlier)", "bson": "180000001364000100008000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}" }, { "description": "[decq828] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FEFFFFFF00000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}" }, { "description": "[decq829] values around [u]int32 edges (zeros done earlier)", "bson": "18000000136400FFFFFFFF00000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}" }, { "description": "[decq830] values around [u]int32 edges (zeros done earlier)", "bson": "180000001364000000000001000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}" }, { "description": "[decq831] values around [u]int32 edges (zeros done earlier)", "bson": "180000001364000100000001000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}" }, { "description": "[decq022] Normality", "bson": "18000000136400C7711CC7B548F377DC80A131C836403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}" }, { "description": "[decq020] Normality", "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" }, { "description": "[decq550] Specials", "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}" } ] } `}, {"decimal128-3.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "valid": [ { "description": "[basx066] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE0000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" }, { "description": "[basx065] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE0000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" }, { "description": "[basx064] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE0000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" }, { "description": "[basx041] strings without E cannot generate E in result", "bson": "180000001364004C0000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}" }, { "description": "[basx027] conform to rules and exponent will be in permitted range).", "bson": "180000001364000F270000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}" }, { "description": "[basx026] conform to rules and exponent will be in permitted range).", "bson": "180000001364009F230000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}" }, { "description": "[basx025] conform to rules and exponent will be in permitted range).", "bson": "180000001364008F030000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}" }, { "description": "[basx024] conform to rules and exponent will be in permitted range).", "bson": "180000001364005B000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}" }, { "description": "[dqbsr531] negatives (Rounded)", "bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}" }, { "description": "[basx022] conform to rules and exponent will be in permitted range).", "bson": "180000001364000A000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}" }, { "description": "[basx021] conform to rules and exponent will be in permitted range).", "bson": "18000000136400010000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" }, { "description": "[basx601] Zeros", "bson": "1800000013640000000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" }, { "description": "[basx622] Zeros", "bson": "1800000013640000000000000000000000000000002EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}" }, { "description": "[basx602] Zeros", "bson": "180000001364000000000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" }, { "description": "[basx621] Zeros", "bson": "18000000136400000000000000000000000000000030B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}" }, { "description": "[basx603] Zeros", "bson": "180000001364000000000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" }, { "description": "[basx620] Zeros", "bson": "18000000136400000000000000000000000000000032B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" }, { "description": "[basx604] Zeros", "bson": "180000001364000000000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" }, { "description": "[basx619] Zeros", "bson": "18000000136400000000000000000000000000000034B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" }, { "description": "[basx605] Zeros", "bson": "180000001364000000000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" }, { "description": "[basx618] Zeros", "bson": "18000000136400000000000000000000000000000036B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" }, { "description": "[basx680] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx606] Zeros", "bson": "180000001364000000000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" }, { "description": "[basx617] Zeros", "bson": "18000000136400000000000000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" }, { "description": "[basx681] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx686] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx687] Zeros", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "[basx019] conform to rules and exponent will be in permitted range).", "bson": "1800000013640000000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" }, { "description": "[basx607] Zeros", "bson": "1800000013640000000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" }, { "description": "[basx616] Zeros", "bson": "1800000013640000000000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" }, { "description": "[basx682] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx155] Numbers with E", "bson": "1800000013640000000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" }, { "description": "[basx130] Numbers with E", "bson": "180000001364000000000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" }, { "description": "[basx290] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" }, { "description": "[basx131] Numbers with E", "bson": "180000001364000000000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" }, { "description": "[basx291] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000036B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" }, { "description": "[basx132] Numbers with E", "bson": "180000001364000000000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" }, { "description": "[basx292] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000034B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" }, { "description": "[basx133] Numbers with E", "bson": "180000001364000000000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" }, { "description": "[basx293] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000032B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" }, { "description": "[basx608] Zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[basx615] Zeros", "bson": "1800000013640000000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" }, { "description": "[basx683] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx630] Zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[basx670] Zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[basx631] Zeros", "bson": "1800000013640000000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" }, { "description": "[basx671] Zeros", "bson": "1800000013640000000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" }, { "description": "[basx134] Numbers with E", "bson": "180000001364000000000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" }, { "description": "[basx294] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" }, { "description": "[basx632] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx672] Zeros", "bson": "180000001364000000000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" }, { "description": "[basx135] Numbers with E", "bson": "180000001364000000000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" }, { "description": "[basx295] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000036B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" }, { "description": "[basx633] Zeros", "bson": "180000001364000000000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" }, { "description": "[basx673] Zeros", "bson": "180000001364000000000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" }, { "description": "[basx136] Numbers with E", "bson": "180000001364000000000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" }, { "description": "[basx674] Zeros", "bson": "180000001364000000000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" }, { "description": "[basx634] Zeros", "bson": "180000001364000000000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" }, { "description": "[basx137] Numbers with E", "bson": "180000001364000000000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" }, { "description": "[basx635] Zeros", "bson": "180000001364000000000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" }, { "description": "[basx675] Zeros", "bson": "180000001364000000000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" }, { "description": "[basx636] Zeros", "bson": "180000001364000000000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" }, { "description": "[basx676] Zeros", "bson": "180000001364000000000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" }, { "description": "[basx637] Zeros", "bson": "1800000013640000000000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" }, { "description": "[basx677] Zeros", "bson": "1800000013640000000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" }, { "description": "[basx638] Zeros", "bson": "1800000013640000000000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" }, { "description": "[basx678] Zeros", "bson": "1800000013640000000000000000000000000000002C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" }, { "description": "[basx149] Numbers with E", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx639] Zeros", "bson": "1800000013640000000000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" }, { "description": "[basx679] Zeros", "bson": "1800000013640000000000000000000000000000002A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}" }, { "description": "[basx063] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE00000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" }, { "description": "[basx018] conform to rules and exponent will be in permitted range).", "bson": "1800000013640000000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" }, { "description": "[basx609] Zeros", "bson": "1800000013640000000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" }, { "description": "[basx614] Zeros", "bson": "1800000013640000000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" }, { "description": "[basx684] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx640] Zeros", "bson": "1800000013640000000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" }, { "description": "[basx660] Zeros", "bson": "1800000013640000000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" }, { "description": "[basx641] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx661] Zeros", "bson": "1800000013640000000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" }, { "description": "[basx296] some more negative zeros [systematic tests below]", "bson": "1800000013640000000000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" }, { "description": "[basx642] Zeros", "bson": "180000001364000000000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" }, { "description": "[basx662] Zeros", "bson": "1800000013640000000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" }, { "description": "[basx297] some more negative zeros [systematic tests below]", "bson": "18000000136400000000000000000000000000000038B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" }, { "description": "[basx643] Zeros", "bson": "180000001364000000000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" }, { "description": "[basx663] Zeros", "bson": "180000001364000000000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" }, { "description": "[basx644] Zeros", "bson": "180000001364000000000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" }, { "description": "[basx664] Zeros", "bson": "180000001364000000000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" }, { "description": "[basx645] Zeros", "bson": "180000001364000000000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" }, { "description": "[basx665] Zeros", "bson": "180000001364000000000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" }, { "description": "[basx646] Zeros", "bson": "1800000013640000000000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" }, { "description": "[basx666] Zeros", "bson": "180000001364000000000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" }, { "description": "[basx647] Zeros", "bson": "1800000013640000000000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" }, { "description": "[basx667] Zeros", "bson": "180000001364000000000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" }, { "description": "[basx648] Zeros", "bson": "1800000013640000000000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" }, { "description": "[basx668] Zeros", "bson": "1800000013640000000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" }, { "description": "[basx160] Numbers with E", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx161] Numbers with E", "bson": "1800000013640000000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" }, { "description": "[basx649] Zeros", "bson": "180000001364000000000000000000000000000000503000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" }, { "description": "[basx669] Zeros", "bson": "1800000013640000000000000000000000000000002C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" }, { "description": "[basx062] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE00000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" }, { "description": "[basx001] conform to rules and exponent will be in permitted range).", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx017] conform to rules and exponent will be in permitted range).", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "[basx611] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx613] Zeros", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "[basx685] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx688] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx689] Zeros", "bson": "18000000136400000000000000000000000000000040B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "[basx650] Zeros", "bson": "180000001364000000000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "[basx651] Zeros", "bson": "180000001364000000000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" }, { "description": "[basx298] some more negative zeros [systematic tests below]", "bson": "1800000013640000000000000000000000000000003CB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" }, { "description": "[basx652] Zeros", "bson": "180000001364000000000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" }, { "description": "[basx299] some more negative zeros [systematic tests below]", "bson": "1800000013640000000000000000000000000000003AB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" }, { "description": "[basx653] Zeros", "bson": "180000001364000000000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" }, { "description": "[basx654] Zeros", "bson": "180000001364000000000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" }, { "description": "[basx655] Zeros", "bson": "1800000013640000000000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" }, { "description": "[basx656] Zeros", "bson": "1800000013640000000000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" }, { "description": "[basx657] Zeros", "bson": "1800000013640000000000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" }, { "description": "[basx658] Zeros", "bson": "180000001364000000000000000000000000000000503000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" }, { "description": "[basx138] Numbers with E", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx139] Numbers with E", "bson": "18000000136400000000000000000000000000000052B000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}" }, { "description": "[basx144] Numbers with E", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx154] Numbers with E", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx659] Zeros", "bson": "180000001364000000000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" }, { "description": "[basx042] strings without E cannot generate E in result", "bson": "18000000136400FC040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" }, { "description": "[basx143] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx061] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE00000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" }, { "description": "[basx036] conform to rules and exponent will be in permitted range).", "bson": "1800000013640015CD5B0700000000000000000000203000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}" }, { "description": "[basx035] conform to rules and exponent will be in permitted range).", "bson": "1800000013640015CD5B0700000000000000000000223000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}" }, { "description": "[basx034] conform to rules and exponent will be in permitted range).", "bson": "1800000013640015CD5B0700000000000000000000243000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}" }, { "description": "[basx053] strings without E cannot generate E in result", "bson": "180000001364003200000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" }, { "description": "[basx033] conform to rules and exponent will be in permitted range).", "bson": "1800000013640015CD5B0700000000000000000000263000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}" }, { "description": "[basx016] conform to rules and exponent will be in permitted range).", "bson": "180000001364000C000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}" }, { "description": "[basx015] conform to rules and exponent will be in permitted range).", "bson": "180000001364007B000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}" }, { "description": "[basx037] conform to rules and exponent will be in permitted range).", "bson": "1800000013640078DF0D8648700000000000000000223000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}" }, { "description": "[basx038] conform to rules and exponent will be in permitted range).", "bson": "1800000013640079DF0D8648700000000000000000223000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}" }, { "description": "[basx250] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx257] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx256] Numbers with E", "bson": "18000000136400F104000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" }, { "description": "[basx258] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx251] Numbers with E", "bson": "18000000136400F104000000000000000000000000103000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}" }, { "description": "[basx263] Numbers with E", "bson": "18000000136400F104000000000000000000000000603000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}" }, { "description": "[basx255] Numbers with E", "bson": "18000000136400F104000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" }, { "description": "[basx259] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx254] Numbers with E", "bson": "18000000136400F104000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" }, { "description": "[basx260] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx253] Numbers with E", "bson": "18000000136400F104000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" }, { "description": "[basx261] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx252] Numbers with E", "bson": "18000000136400F104000000000000000000000000283000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}" }, { "description": "[basx262] Numbers with E", "bson": "18000000136400F104000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" }, { "description": "[basx159] Numbers with E", "bson": "1800000013640049000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}" }, { "description": "[basx004] conform to rules and exponent will be in permitted range).", "bson": "1800000013640064000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}" }, { "description": "[basx003] conform to rules and exponent will be in permitted range).", "bson": "180000001364000A000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" }, { "description": "[basx002] conform to rules and exponent will be in permitted range).", "bson": "180000001364000100000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" }, { "description": "[basx148] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx153] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx141] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx146] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx151] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx142] Numbers with E", "bson": "180000001364000100000000000000000000000000F43000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" }, { "description": "[basx147] Numbers with E", "bson": "180000001364000100000000000000000000000000F43000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" }, { "description": "[basx152] Numbers with E", "bson": "180000001364000100000000000000000000000000F43000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" }, { "description": "[basx140] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx150] Numbers with E", "bson": "180000001364000100000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" }, { "description": "[basx014] conform to rules and exponent will be in permitted range).", "bson": "18000000136400D2040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}" }, { "description": "[basx170] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx177] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx176] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx178] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx171] Numbers with E", "bson": "18000000136400F104000000000000000000000000123000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}" }, { "description": "[basx183] Numbers with E", "bson": "18000000136400F104000000000000000000000000623000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}" }, { "description": "[basx175] Numbers with E", "bson": "18000000136400F104000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" }, { "description": "[basx179] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx174] Numbers with E", "bson": "18000000136400F104000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" }, { "description": "[basx180] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx173] Numbers with E", "bson": "18000000136400F104000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" }, { "description": "[basx181] Numbers with E", "bson": "18000000136400F104000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" }, { "description": "[basx172] Numbers with E", "bson": "18000000136400F1040000000000000000000000002A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}" }, { "description": "[basx182] Numbers with E", "bson": "18000000136400F1040000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}" }, { "description": "[basx157] Numbers with E", "bson": "180000001364000400000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}" }, { "description": "[basx067] examples", "bson": "180000001364000500000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" }, { "description": "[basx069] examples", "bson": "180000001364000500000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" }, { "description": "[basx385] Engineering notation tests", "bson": "180000001364000700000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" }, { "description": "[basx365] Engineering notation tests", "bson": "180000001364000700000000000000000000000000543000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}" }, { "description": "[basx405] Engineering notation tests", "bson": "1800000013640007000000000000000000000000002C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}" }, { "description": "[basx363] Engineering notation tests", "bson": "180000001364000700000000000000000000000000563000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}" }, { "description": "[basx407] Engineering notation tests", "bson": "1800000013640007000000000000000000000000002A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}" }, { "description": "[basx361] Engineering notation tests", "bson": "180000001364000700000000000000000000000000583000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}" }, { "description": "[basx409] Engineering notation tests", "bson": "180000001364000700000000000000000000000000283000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}" }, { "description": "[basx411] Engineering notation tests", "bson": "180000001364000700000000000000000000000000263000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}" }, { "description": "[basx383] Engineering notation tests", "bson": "180000001364000700000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}" }, { "description": "[basx387] Engineering notation tests", "bson": "1800000013640007000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}" }, { "description": "[basx381] Engineering notation tests", "bson": "180000001364000700000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}" }, { "description": "[basx389] Engineering notation tests", "bson": "1800000013640007000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}" }, { "description": "[basx379] Engineering notation tests", "bson": "180000001364000700000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}" }, { "description": "[basx391] Engineering notation tests", "bson": "1800000013640007000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}" }, { "description": "[basx377] Engineering notation tests", "bson": "180000001364000700000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}" }, { "description": "[basx393] Engineering notation tests", "bson": "180000001364000700000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}" }, { "description": "[basx375] Engineering notation tests", "bson": "1800000013640007000000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}" }, { "description": "[basx395] Engineering notation tests", "bson": "180000001364000700000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}" }, { "description": "[basx373] Engineering notation tests", "bson": "1800000013640007000000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}" }, { "description": "[basx397] Engineering notation tests", "bson": "180000001364000700000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}" }, { "description": "[basx371] Engineering notation tests", "bson": "1800000013640007000000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}" }, { "description": "[basx399] Engineering notation tests", "bson": "180000001364000700000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}" }, { "description": "[basx369] Engineering notation tests", "bson": "180000001364000700000000000000000000000000503000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}" }, { "description": "[basx401] Engineering notation tests", "bson": "180000001364000700000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}" }, { "description": "[basx367] Engineering notation tests", "bson": "180000001364000700000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" }, { "description": "[basx403] Engineering notation tests", "bson": "1800000013640007000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}" }, { "description": "[basx007] conform to rules and exponent will be in permitted range).", "bson": "1800000013640064000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}" }, { "description": "[basx005] conform to rules and exponent will be in permitted range).", "bson": "180000001364000A00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" }, { "description": "[basx165] Numbers with E", "bson": "180000001364000A00000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" }, { "description": "[basx163] Numbers with E", "bson": "180000001364000A00000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" }, { "description": "[basx325] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" }, { "description": "[basx305] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000543000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}" }, { "description": "[basx345] Engineering notation tests", "bson": "180000001364000A000000000000000000000000002C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}" }, { "description": "[basx303] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000563000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}" }, { "description": "[basx347] Engineering notation tests", "bson": "180000001364000A000000000000000000000000002A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}" }, { "description": "[basx301] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000583000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}" }, { "description": "[basx349] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000283000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}" }, { "description": "[basx351] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000263000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}" }, { "description": "[basx323] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}" }, { "description": "[basx327] Engineering notation tests", "bson": "180000001364000A000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" }, { "description": "[basx321] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}" }, { "description": "[basx329] Engineering notation tests", "bson": "180000001364000A000000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}" }, { "description": "[basx319] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}" }, { "description": "[basx331] Engineering notation tests", "bson": "180000001364000A000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}" }, { "description": "[basx317] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}" }, { "description": "[basx333] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}" }, { "description": "[basx315] Engineering notation tests", "bson": "180000001364000A000000000000000000000000004A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}" }, { "description": "[basx335] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}" }, { "description": "[basx313] Engineering notation tests", "bson": "180000001364000A000000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}" }, { "description": "[basx337] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}" }, { "description": "[basx311] Engineering notation tests", "bson": "180000001364000A000000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}" }, { "description": "[basx339] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}" }, { "description": "[basx309] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000503000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}" }, { "description": "[basx341] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}" }, { "description": "[basx164] Numbers with E", "bson": "180000001364000A00000000000000000000000000F43000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}" }, { "description": "[basx162] Numbers with E", "bson": "180000001364000A00000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" }, { "description": "[basx307] Engineering notation tests", "bson": "180000001364000A00000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" }, { "description": "[basx343] Engineering notation tests", "bson": "180000001364000A000000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}" }, { "description": "[basx008] conform to rules and exponent will be in permitted range).", "bson": "1800000013640065000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}" }, { "description": "[basx009] conform to rules and exponent will be in permitted range).", "bson": "1800000013640068000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}" }, { "description": "[basx010] conform to rules and exponent will be in permitted range).", "bson": "1800000013640069000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}" }, { "description": "[basx011] conform to rules and exponent will be in permitted range).", "bson": "180000001364006A000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}" }, { "description": "[basx012] conform to rules and exponent will be in permitted range).", "bson": "180000001364006D000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}" }, { "description": "[basx013] conform to rules and exponent will be in permitted range).", "bson": "180000001364006E000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}" }, { "description": "[basx040] strings without E cannot generate E in result", "bson": "180000001364000C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" }, { "description": "[basx190] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx197] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx196] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx198] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx191] Numbers with E", "bson": "18000000136400F104000000000000000000000000143000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}" }, { "description": "[basx203] Numbers with E", "bson": "18000000136400F104000000000000000000000000643000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}" }, { "description": "[basx195] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx199] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx194] Numbers with E", "bson": "18000000136400F104000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" }, { "description": "[basx200] Numbers with E", "bson": "18000000136400F104000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" }, { "description": "[basx193] Numbers with E", "bson": "18000000136400F104000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" }, { "description": "[basx201] Numbers with E", "bson": "18000000136400F104000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" }, { "description": "[basx192] Numbers with E", "bson": "18000000136400F1040000000000000000000000002C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}" }, { "description": "[basx202] Numbers with E", "bson": "18000000136400F1040000000000000000000000004C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}" }, { "description": "[basx044] strings without E cannot generate E in result", "bson": "18000000136400FC040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" }, { "description": "[basx042] strings without E cannot generate E in result", "bson": "18000000136400FC040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" }, { "description": "[basx046] strings without E cannot generate E in result", "bson": "180000001364001100000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}" }, { "description": "[basx049] strings without E cannot generate E in result", "bson": "180000001364002C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" }, { "description": "[basx048] strings without E cannot generate E in result", "bson": "180000001364002C00000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" }, { "description": "[basx158] Numbers with E", "bson": "180000001364002C00000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}" }, { "description": "[basx068] examples", "bson": "180000001364003200000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" }, { "description": "[basx169] Numbers with E", "bson": "180000001364006400000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" }, { "description": "[basx167] Numbers with E", "bson": "180000001364006400000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" }, { "description": "[basx168] Numbers with E", "bson": "180000001364006400000000000000000000000000F43000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}" }, { "description": "[basx166] Numbers with E", "bson": "180000001364006400000000000000000000000000523000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" }, { "description": "[basx210] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx217] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx216] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx218] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx211] Numbers with E", "bson": "18000000136400F104000000000000000000000000163000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}" }, { "description": "[basx223] Numbers with E", "bson": "18000000136400F104000000000000000000000000663000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}" }, { "description": "[basx215] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx219] Numbers with E", "bson": "18000000136400F104000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" }, { "description": "[basx214] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx220] Numbers with E", "bson": "18000000136400F104000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" }, { "description": "[basx213] Numbers with E", "bson": "18000000136400F104000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" }, { "description": "[basx221] Numbers with E", "bson": "18000000136400F104000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" }, { "description": "[basx212] Numbers with E", "bson": "18000000136400F1040000000000000000000000002E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}" }, { "description": "[basx222] Numbers with E", "bson": "18000000136400F1040000000000000000000000004E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}" }, { "description": "[basx006] conform to rules and exponent will be in permitted range).", "bson": "18000000136400E803000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}" }, { "description": "[basx230] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx237] Numbers with E", "bson": "18000000136400F104000000000000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" }, { "description": "[basx236] Numbers with E", "bson": "18000000136400F1040000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" }, { "description": "[basx238] Numbers with E", "bson": "18000000136400F104000000000000000000000000423000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" }, { "description": "[basx231] Numbers with E", "bson": "18000000136400F104000000000000000000000000183000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}" }, { "description": "[basx243] Numbers with E", "bson": "18000000136400F104000000000000000000000000683000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}" }, { "description": "[basx235] Numbers with E", "bson": "18000000136400F1040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" }, { "description": "[basx239] Numbers with E", "bson": "18000000136400F104000000000000000000000000443000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" }, { "description": "[basx234] Numbers with E", "bson": "18000000136400F1040000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" }, { "description": "[basx240] Numbers with E", "bson": "18000000136400F104000000000000000000000000463000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" }, { "description": "[basx233] Numbers with E", "bson": "18000000136400F104000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" }, { "description": "[basx241] Numbers with E", "bson": "18000000136400F104000000000000000000000000483000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" }, { "description": "[basx232] Numbers with E", "bson": "18000000136400F104000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" }, { "description": "[basx242] Numbers with E", "bson": "18000000136400F104000000000000000000000000503000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}" }, { "description": "[basx060] strings without E cannot generate E in result", "bson": "18000000136400185C0ACE00000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" }, { "description": "[basx059] strings without E cannot generate E in result", "bson": "18000000136400F198670C08000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}" }, { "description": "[basx058] strings without E cannot generate E in result", "bson": "180000001364006AF90B7C50000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}" }, { "description": "[basx057] strings without E cannot generate E in result", "bson": "180000001364006A19562522020000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}" }, { "description": "[basx056] strings without E cannot generate E in result", "bson": "180000001364006AB9C8733A0B0000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}" }, { "description": "[basx031] conform to rules and exponent will be in permitted range).", "bson": "1800000013640040AF0D8648700000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}" }, { "description": "[basx030] conform to rules and exponent will be in permitted range).", "bson": "1800000013640080910F8648700000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}" }, { "description": "[basx032] conform to rules and exponent will be in permitted range).", "bson": "1800000013640080910F8648700000000000000000403000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}" } ] } `}, {"decimal128-4.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "valid": [ { "description": "[basx023] conform to rules and exponent will be in permitted range).", "bson": "1800000013640001000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}" }, { "description": "[basx045] strings without E cannot generate E in result", "bson": "1800000013640003000000000000000000000000003A3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}" }, { "description": "[basx610] Zeros", "bson": "1800000013640000000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" }, { "description": "[basx612] Zeros", "bson": "1800000013640000000000000000000000000000003EB000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" }, { "description": "[basx043] strings without E cannot generate E in result", "bson": "18000000136400FC040000000000000000000000003C3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" }, { "description": "[basx055] strings without E cannot generate E in result", "bson": "180000001364000500000000000000000000000000303000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}" }, { "description": "[basx054] strings without E cannot generate E in result", "bson": "180000001364000500000000000000000000000000323000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" }, { "description": "[basx052] strings without E cannot generate E in result", "bson": "180000001364000500000000000000000000000000343000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" }, { "description": "[basx051] strings without E cannot generate E in result", "bson": "180000001364000500000000000000000000000000363000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}" }, { "description": "[basx050] strings without E cannot generate E in result", "bson": "180000001364000500000000000000000000000000383000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}" }, { "description": "[basx047] strings without E cannot generate E in result", "bson": "1800000013640005000000000000000000000000003E3000", "extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}" }, { "description": "[dqbsr431] check rounding modes heeded (Rounded)", "bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}" }, { "description": "OK2", "bson": "18000000136400000000000A5BC138938D44C64D31FC2F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}" } ], "parseErrors": [ { "description": "[basx564] Near-specials (Conversion_syntax)", "string": "Infi" }, { "description": "[basx565] Near-specials (Conversion_syntax)", "string": "Infin" }, { "description": "[basx566] Near-specials (Conversion_syntax)", "string": "Infini" }, { "description": "[basx567] Near-specials (Conversion_syntax)", "string": "Infinit" }, { "description": "[basx568] Near-specials (Conversion_syntax)", "string": "-Infinit" }, { "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": ".Infinity" }, { "description": "[basx562] Near-specials (Conversion_syntax)", "string": "NaNq" }, { "description": "[basx563] Near-specials (Conversion_syntax)", "string": "NaNs" }, { "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)", "string": "-7e10000" }, { "description": "[dqbsr534] negatives (Rounded & Inexact)", "string": "-1.11111111111111111111111111111234650" }, { "description": "[dqbsr535] negatives (Rounded & Inexact)", "string": "-1.11111111111111111111111111111234551" }, { "description": "[dqbsr533] negatives (Rounded & Inexact)", "string": "-1.11111111111111111111111111111234550" }, { "description": "[dqbsr532] negatives (Rounded & Inexact)", "string": "-1.11111111111111111111111111111234549" }, { "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)", "string": "1.11111111111111111111111111111234549" }, { "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)", "string": "1.11111111111111111111111111111234550" }, { "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)", "string": "1.11111111111111111111111111111234551" }, { "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)", "string": "1.11111111111111111111111111111234650" }, { "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)", "string": "7e10000" }, { "description": "Inexact rounding#1", "string": "100000000000000000000000000000000000000000000000000000000001" }, { "description": "Inexact rounding#2", "string": "1E-6177" } ] } `}, {"decimal128-5.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "valid": [ { "description": "[decq035] fold-downs (more below) (Clamped)", "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" }, { "description": "[decq037] fold-downs (more below) (Clamped)", "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" }, { "description": "[decq077] Nmin and below (Subnormal)", "bson": "180000001364000000000081EFAC855B416D2DEE04000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" }, { "description": "[decq078] Nmin and below (Subnormal)", "bson": "180000001364000000000081EFAC855B416D2DEE04000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" }, { "description": "[decq079] Nmin and below (Subnormal)", "bson": "180000001364000A00000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" }, { "description": "[decq080] Nmin and below (Subnormal)", "bson": "180000001364000A00000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" }, { "description": "[decq081] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000020000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" }, { "description": "[decq082] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000020000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" }, { "description": "[decq083] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "[decq084] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)", "bson": "180000001364000100000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)", "bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}" }, { "description": "[decq130] fold-downs (more below) (Clamped)", "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" }, { "description": "[decq132] fold-downs (more below) (Clamped)", "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" }, { "description": "[decq177] Nmin and below (Subnormal)", "bson": "180000001364000000000081EFAC855B416D2DEE04008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" }, { "description": "[decq178] Nmin and below (Subnormal)", "bson": "180000001364000000000081EFAC855B416D2DEE04008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" }, { "description": "[decq179] Nmin and below (Subnormal)", "bson": "180000001364000A00000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" }, { "description": "[decq180] Nmin and below (Subnormal)", "bson": "180000001364000A00000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" }, { "description": "[decq181] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000028000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" }, { "description": "[decq182] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000028000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" }, { "description": "[decq183] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" }, { "description": "[decq184] Nmin and below (Subnormal)", "bson": "180000001364000100000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" }, { "description": "[decq190] underflow edge cases (Subnormal)", "bson": "180000001364000100000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" }, { "description": "[decq200] underflow edge cases (Subnormal)", "bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}" }, { "description": "[decq400] zeros (Clamped)", "bson": "180000001364000000000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" }, { "description": "[decq401] zeros (Clamped)", "bson": "180000001364000000000000000000000000000000000000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" }, { "description": "[decq414] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" }, { "description": "[decq416] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" }, { "description": "[decq418] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" }, { "description": "[decq420] negative zeros (Clamped)", "bson": "180000001364000000000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" }, { "description": "[decq421] negative zeros (Clamped)", "bson": "180000001364000000000000000000000000000000008000", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" }, { "description": "[decq434] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" }, { "description": "[decq436] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" }, { "description": "[decq438] clamped zeros... (Clamped)", "bson": "180000001364000000000000000000000000000000FEDF00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" }, { "description": "[decq601] fold-down full sequence (Clamped)", "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" }, { "description": "[decq603] fold-down full sequence (Clamped)", "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" }, { "description": "[decq605] fold-down full sequence (Clamped)", "bson": "1800000013640000000080264B91C02220BE377E00FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" }, { "description": "[decq607] fold-down full sequence (Clamped)", "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" }, { "description": "[decq609] fold-down full sequence (Clamped)", "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" }, { "description": "[decq611] fold-down full sequence (Clamped)", "bson": "18000000136400000000106102253E5ECE4F200000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" }, { "description": "[decq613] fold-down full sequence (Clamped)", "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" }, { "description": "[decq615] fold-down full sequence (Clamped)", "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" }, { "description": "[decq617] fold-down full sequence (Clamped)", "bson": "180000001364000000004A48011416954508000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" }, { "description": "[decq619] fold-down full sequence (Clamped)", "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" }, { "description": "[decq621] fold-down full sequence (Clamped)", "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" }, { "description": "[decq623] fold-down full sequence (Clamped)", "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" }, { "description": "[decq625] fold-down full sequence (Clamped)", "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" }, { "description": "[decq627] fold-down full sequence (Clamped)", "bson": "18000000136400000010632D5EC76B050000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" }, { "description": "[decq629] fold-down full sequence (Clamped)", "bson": "180000001364000000E8890423C78A000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" }, { "description": "[decq631] fold-down full sequence (Clamped)", "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" }, { "description": "[decq633] fold-down full sequence (Clamped)", "bson": "1800000013640000008A5D78456301000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" }, { "description": "[decq635] fold-down full sequence (Clamped)", "bson": "180000001364000000C16FF2862300000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" }, { "description": "[decq637] fold-down full sequence (Clamped)", "bson": "180000001364000080C6A47E8D0300000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" }, { "description": "[decq639] fold-down full sequence (Clamped)", "bson": "1800000013640000407A10F35A0000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" }, { "description": "[decq641] fold-down full sequence (Clamped)", "bson": "1800000013640000A0724E18090000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" }, { "description": "[decq643] fold-down full sequence (Clamped)", "bson": "180000001364000010A5D4E8000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" }, { "description": "[decq645] fold-down full sequence (Clamped)", "bson": "1800000013640000E8764817000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" }, { "description": "[decq647] fold-down full sequence (Clamped)", "bson": "1800000013640000E40B5402000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" }, { "description": "[decq649] fold-down full sequence (Clamped)", "bson": "1800000013640000CA9A3B00000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" }, { "description": "[decq651] fold-down full sequence (Clamped)", "bson": "1800000013640000E1F50500000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" }, { "description": "[decq653] fold-down full sequence (Clamped)", "bson": "180000001364008096980000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" }, { "description": "[decq655] fold-down full sequence (Clamped)", "bson": "1800000013640040420F0000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" }, { "description": "[decq657] fold-down full sequence (Clamped)", "bson": "18000000136400A086010000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" }, { "description": "[decq659] fold-down full sequence (Clamped)", "bson": "180000001364001027000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" }, { "description": "[decq661] fold-down full sequence (Clamped)", "bson": "18000000136400E803000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" }, { "description": "[decq663] fold-down full sequence (Clamped)", "bson": "180000001364006400000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" }, { "description": "[decq665] fold-down full sequence (Clamped)", "bson": "180000001364000A00000000000000000000000000FE5F00", "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" } ] } `}, {"decimal128-6.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "parseErrors": [ { "description": "Incomplete Exponent", "string": "1e" }, { "description": "Exponent at the beginning", "string": "E01" }, { "description": "Just a decimal place", "string": "." }, { "description": "2 decimal places", "string": "..3" }, { "description": "2 decimal places", "string": ".13.3" }, { "description": "2 decimal places", "string": "1..3" }, { "description": "2 decimal places", "string": "1.3.4" }, { "description": "2 decimal places", "string": "1.34." }, { "description": "Decimal with no digits", "string": ".e" }, { "description": "2 signs", "string": "+-32.4" }, { "description": "2 signs", "string": "-+32.4" }, { "description": "2 negative signs", "string": "--32.4" }, { "description": "2 negative signs", "string": "-32.-4" }, { "description": "End in negative sign", "string": "32.0-" }, { "description": "2 negative signs", "string": "32.4E--21" }, { "description": "2 negative signs", "string": "32.4E-2-1" }, { "description": "2 signs", "string": "32.4E+-21" }, { "description": "Empty string", "string": "" }, { "description": "leading white space positive number", "string": " 1" }, { "description": "leading white space negative number", "string": " -1" }, { "description": "trailing white space", "string": "1 " }, { "description": "Invalid", "string": "E" }, { "description": "Invalid", "string": "invalid" }, { "description": "Invalid", "string": "i" }, { "description": "Invalid", "string": "in" }, { "description": "Invalid", "string": "-in" }, { "description": "Invalid", "string": "Na" }, { "description": "Invalid", "string": "-Na" }, { "description": "Invalid", "string": "1.23abc" }, { "description": "Invalid", "string": "1.23abcE+02" }, { "description": "Invalid", "string": "1.23E+0aabs2" } ] } `}, {"decimal128-7.json", ` { "description": "Decimal128", "bson_type": "0x13", "test_key": "d", "parseErrors": [ { "description": "[basx572] Near-specials (Conversion_syntax)", "string": "-9Inf" }, { "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "-1-" }, { "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "0000.." }, { "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": ".0000." }, { "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "00..00" }, { "description": "[basx569] Near-specials (Conversion_syntax)", "string": "0Inf" }, { "description": "[basx571] Near-specials (Conversion_syntax)", "string": "-0Inf" }, { "description": "[basx575] Near-specials (Conversion_syntax)", "string": "0sNaN" }, { "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "++1" }, { "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "--1" }, { "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "-+1" }, { "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "+-1" }, { "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": " +1" }, { "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": " + 1" }, { "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": " - 1" }, { "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "." }, { "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": ".." }, { "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "" }, { "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "e100" }, { "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "e+1" }, { "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": ".e+1" }, { "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "+.e+1" }, { "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "E+1" }, { "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": ".E+1" }, { "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "+.E+1" }, { "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "-.e+" }, { "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "-.e" }, { "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "-.E+" }, { "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "-.E" }, { "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "+.Inf" }, { "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": ".NaN" }, { "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "-.NaN" }, { "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "ONE" }, { "description": "[basx561] Near-specials (Conversion_syntax)", "string": "qNaN" }, { "description": "[basx573] Near-specials (Conversion_syntax)", "string": "-sNa" }, { "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)", "string": "+.sNaN" }, { "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "ten" }, { "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "u0b65" }, { "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "u0e5a" }, { "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "x" }, { "description": "[basx574] Near-specials (Conversion_syntax)", "string": "xNaN" }, { "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": ".123.5" }, { "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1..2" }, { "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1e1.0" }, { "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E+1.2.3" }, { "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1e123e" }, { "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E+1.2" }, { "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1e.1" }, { "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1e1." }, { "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E++1" }, { "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E--1" }, { "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E+-1" }, { "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E-+1" }, { "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E'1" }, { "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E\"1" }, { "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1e-" }, { "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1E" }, { "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1ee" }, { "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1.2.1" }, { "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1.23.4" }, { "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "1.34.5" }, { "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "01.35." }, { "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "01.35-" }, { "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "3+" }, { "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "7e99999a" }, { "description": "[basx570] Near-specials (Conversion_syntax)", "string": "9Inf" }, { "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "12 " }, { "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "12-" }, { "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "12e" }, { "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "12e++" }, { "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "12f4" }, { "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111e*123" }, { "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111e123-" }, { "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111e1*23" }, { "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111e+12+" }, { "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111e1-3-" }, { "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "111E1e+3" }, { "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "123,65" }, { "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "7e12356789012x" }, { "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", "string": "7e123567890x" } ] } `}, } mgo-r2016.08.01/bson/decode.go000066400000000000000000000454131274772341400156120ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson import ( "fmt" "math" "net/url" "reflect" "strconv" "sync" "time" ) type decoder struct { in []byte i int docType reflect.Type } var typeM = reflect.TypeOf(M{}) func newDecoder(in []byte) *decoder { return &decoder{in, 0, typeM} } // -------------------------------------------------------------------------- // Some helper functions. func corrupted() { panic("Document is corrupted") } func settableValueOf(i interface{}) reflect.Value { v := reflect.ValueOf(i) sv := reflect.New(v.Type()).Elem() sv.Set(v) return sv } // -------------------------------------------------------------------------- // Unmarshaling of documents. const ( setterUnknown = iota setterNone setterType setterAddr ) var setterStyles map[reflect.Type]int var setterIface reflect.Type var setterMutex sync.RWMutex func init() { var iface Setter setterIface = reflect.TypeOf(&iface).Elem() setterStyles = make(map[reflect.Type]int) } func setterStyle(outt reflect.Type) int { setterMutex.RLock() style := setterStyles[outt] setterMutex.RUnlock() if style == setterUnknown { setterMutex.Lock() defer setterMutex.Unlock() if outt.Implements(setterIface) { setterStyles[outt] = setterType } else if reflect.PtrTo(outt).Implements(setterIface) { setterStyles[outt] = setterAddr } else { setterStyles[outt] = setterNone } style = setterStyles[outt] } return style } func getSetter(outt reflect.Type, out reflect.Value) Setter { style := setterStyle(outt) if style == setterNone { return nil } if style == setterAddr { if !out.CanAddr() { return nil } out = out.Addr() } else if outt.Kind() == reflect.Ptr && out.IsNil() { out.Set(reflect.New(outt.Elem())) } return out.Interface().(Setter) } func clearMap(m reflect.Value) { var none reflect.Value for _, k := range m.MapKeys() { m.SetMapIndex(k, none) } } func (d *decoder) readDocTo(out reflect.Value) { var elemType reflect.Type outt := out.Type() outk := outt.Kind() for { if outk == reflect.Ptr && out.IsNil() { out.Set(reflect.New(outt.Elem())) } if setter := getSetter(outt, out); setter != nil { var raw Raw d.readDocTo(reflect.ValueOf(&raw)) err := setter.SetBSON(raw) if _, ok := err.(*TypeError); err != nil && !ok { panic(err) } return } if outk == reflect.Ptr { out = out.Elem() outt = out.Type() outk = out.Kind() continue } break } var fieldsMap map[string]fieldInfo var inlineMap reflect.Value start := d.i origout := out if outk == reflect.Interface { if d.docType.Kind() == reflect.Map { mv := reflect.MakeMap(d.docType) out.Set(mv) out = mv } else { dv := reflect.New(d.docType).Elem() out.Set(dv) out = dv } outt = out.Type() outk = outt.Kind() } docType := d.docType keyType := typeString convertKey := false switch outk { case reflect.Map: keyType = outt.Key() if keyType.Kind() != reflect.String { panic("BSON map must have string keys. Got: " + outt.String()) } if keyType != typeString { convertKey = true } elemType = outt.Elem() if elemType == typeIface { d.docType = outt } if out.IsNil() { out.Set(reflect.MakeMap(out.Type())) } else if out.Len() > 0 { clearMap(out) } case reflect.Struct: if outt != typeRaw { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } fieldsMap = sinfo.FieldsMap out.Set(sinfo.Zero) if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) if !inlineMap.IsNil() && inlineMap.Len() > 0 { clearMap(inlineMap) } elemType = inlineMap.Type().Elem() if elemType == typeIface { d.docType = inlineMap.Type() } } } case reflect.Slice: switch outt.Elem() { case typeDocElem: origout.Set(d.readDocElems(outt)) return case typeRawDocElem: origout.Set(d.readRawDocElems(outt)) return } fallthrough default: panic("Unsupported document type for unmarshalling: " + out.Type().String()) } end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() name := d.readCStr() if d.i >= end { corrupted() } switch outk { case reflect.Map: e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { k := reflect.ValueOf(name) if convertKey { k = k.Convert(keyType) } out.SetMapIndex(k, e) } case reflect.Struct: if outt == typeRaw { d.dropElem(kind) } else { if info, ok := fieldsMap[name]; ok { if info.Inline == nil { d.readElemTo(out.Field(info.Num), kind) } else { d.readElemTo(out.FieldByIndex(info.Inline), kind) } } else if inlineMap.IsValid() { if inlineMap.IsNil() { inlineMap.Set(reflect.MakeMap(inlineMap.Type())) } e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { inlineMap.SetMapIndex(reflect.ValueOf(name), e) } } else { d.dropElem(kind) } } case reflect.Slice: } if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } d.docType = docType if outt == typeRaw { out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]})) } } func (d *decoder) readArrayDocTo(out reflect.Value) { end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } i := 0 l := out.Len() for d.in[d.i] != '\x00' { if i >= l { panic("Length mismatch on array field") } kind := d.readByte() for d.i < end && d.in[d.i] != '\x00' { d.i++ } if d.i >= end { corrupted() } d.i++ d.readElemTo(out.Index(i), kind) if d.i >= end { corrupted() } i++ } if i != l { panic("Length mismatch on array field") } d.i++ // '\x00' if d.i != end { corrupted() } } func (d *decoder) readSliceDoc(t reflect.Type) interface{} { tmp := make([]reflect.Value, 0, 8) elemType := t.Elem() if elemType == typeRawDocElem { d.dropElem(0x04) return reflect.Zero(t).Interface() } end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() for d.i < end && d.in[d.i] != '\x00' { d.i++ } if d.i >= end { corrupted() } d.i++ e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { tmp = append(tmp, e) } if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } n := len(tmp) slice := reflect.MakeSlice(t, n, n) for i := 0; i != n; i++ { slice.Index(i).Set(tmp[i]) } return slice.Interface() } var typeSlice = reflect.TypeOf([]interface{}{}) var typeIface = typeSlice.Elem() func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { docType := d.docType d.docType = typ slice := make([]DocElem, 0, 8) d.readDocWith(func(kind byte, name string) { e := DocElem{Name: name} v := reflect.ValueOf(&e.Value) if d.readElemTo(v.Elem(), kind) { slice = append(slice, e) } }) slicev := reflect.New(typ).Elem() slicev.Set(reflect.ValueOf(slice)) d.docType = docType return slicev } func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { docType := d.docType d.docType = typ slice := make([]RawDocElem, 0, 8) d.readDocWith(func(kind byte, name string) { e := RawDocElem{Name: name} v := reflect.ValueOf(&e.Value) if d.readElemTo(v.Elem(), kind) { slice = append(slice, e) } }) slicev := reflect.New(typ).Elem() slicev.Set(reflect.ValueOf(slice)) d.docType = docType return slicev } func (d *decoder) readDocWith(f func(kind byte, name string)) { end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() name := d.readCStr() if d.i >= end { corrupted() } f(kind, name) if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } } // -------------------------------------------------------------------------- // Unmarshaling of individual elements within a document. var blackHole = settableValueOf(struct{}{}) func (d *decoder) dropElem(kind byte) { d.readElemTo(blackHole, kind) } // Attempt to decode an element from the document and put it into out. // If the types are not compatible, the returned ok value will be // false and out will be unchanged. func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { start := d.i if kind == 0x03 { // Delegate unmarshaling of documents. outt := out.Type() outk := out.Kind() switch outk { case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: d.readDocTo(out) return true } if setterStyle(outt) != setterNone { d.readDocTo(out) return true } if outk == reflect.Slice { switch outt.Elem() { case typeDocElem: out.Set(d.readDocElems(outt)) case typeRawDocElem: out.Set(d.readRawDocElems(outt)) default: d.readDocTo(blackHole) } return true } d.readDocTo(blackHole) return true } var in interface{} switch kind { case 0x01: // Float64 in = d.readFloat64() case 0x02: // UTF-8 string in = d.readStr() case 0x03: // Document panic("Can't happen. Handled above.") case 0x04: // Array outt := out.Type() if setterStyle(outt) != setterNone { // Skip the value so its data is handed to the setter below. d.dropElem(kind) break } for outt.Kind() == reflect.Ptr { outt = outt.Elem() } switch outt.Kind() { case reflect.Array: d.readArrayDocTo(out) return true case reflect.Slice: in = d.readSliceDoc(outt) default: in = d.readSliceDoc(typeSlice) } case 0x05: // Binary b := d.readBinary() if b.Kind == 0x00 || b.Kind == 0x02 { in = b.Data } else { in = b } case 0x06: // Undefined (obsolete, but still seen in the wild) in = Undefined case 0x07: // ObjectId in = ObjectId(d.readBytes(12)) case 0x08: // Bool in = d.readBool() case 0x09: // Timestamp // MongoDB handles timestamps as milliseconds. i := d.readInt64() if i == -62135596800000 { in = time.Time{} // In UTC for convenience. } else { in = time.Unix(i/1e3, i%1e3*1e6) } case 0x0A: // Nil in = nil case 0x0B: // RegEx in = d.readRegEx() case 0x0C: in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} case 0x0D: // JavaScript without scope in = JavaScript{Code: d.readStr()} case 0x0E: // Symbol in = Symbol(d.readStr()) case 0x0F: // JavaScript with scope d.i += 4 // Skip length js := JavaScript{d.readStr(), make(M)} d.readDocTo(reflect.ValueOf(js.Scope)) in = js case 0x10: // Int32 in = int(d.readInt32()) case 0x11: // Mongo-specific timestamp in = MongoTimestamp(d.readInt64()) case 0x12: // Int64 in = d.readInt64() case 0x13: // Decimal128 in = Decimal128{ l: uint64(d.readInt64()), h: uint64(d.readInt64()), } case 0x7F: // Max key in = MaxKey case 0xFF: // Min key in = MinKey default: panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) } outt := out.Type() if outt == typeRaw { out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]})) return true } if setter := getSetter(outt, out); setter != nil { err := setter.SetBSON(Raw{kind, d.in[start:d.i]}) if err == SetZero { out.Set(reflect.Zero(outt)) return true } if err == nil { return true } if _, ok := err.(*TypeError); !ok { panic(err) } return false } if in == nil { out.Set(reflect.Zero(outt)) return true } outk := outt.Kind() // Dereference and initialize pointer if necessary. first := true for outk == reflect.Ptr { if !out.IsNil() { out = out.Elem() } else { elem := reflect.New(outt.Elem()) if first { // Only set if value is compatible. first = false defer func(out, elem reflect.Value) { if good { out.Set(elem) } }(out, elem) } else { out.Set(elem) } out = elem } outt = out.Type() outk = outt.Kind() } inv := reflect.ValueOf(in) if outt == inv.Type() { out.Set(inv) return true } switch outk { case reflect.Interface: out.Set(inv) return true case reflect.String: switch inv.Kind() { case reflect.String: out.SetString(inv.String()) return true case reflect.Slice: if b, ok := in.([]byte); ok { out.SetString(string(b)) return true } case reflect.Int, reflect.Int64: if outt == typeJSONNumber { out.SetString(strconv.FormatInt(inv.Int(), 10)) return true } case reflect.Float64: if outt == typeJSONNumber { out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) return true } } case reflect.Slice, reflect.Array: // Remember, array (0x04) slices are built with the correct // element type. If we are here, must be a cross BSON kind // conversion (e.g. 0x05 unmarshalling on string). if outt.Elem().Kind() != reflect.Uint8 { break } switch inv.Kind() { case reflect.String: slice := []byte(inv.String()) out.Set(reflect.ValueOf(slice)) return true case reflect.Slice: switch outt.Kind() { case reflect.Array: reflect.Copy(out, inv) case reflect.Slice: out.SetBytes(inv.Bytes()) } return true } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch inv.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetInt(inv.Int()) return true case reflect.Float32, reflect.Float64: out.SetInt(int64(inv.Float())) return true case reflect.Bool: if inv.Bool() { out.SetInt(1) } else { out.SetInt(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("can't happen: no uint types in BSON (!?)") } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch inv.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetUint(uint64(inv.Int())) return true case reflect.Float32, reflect.Float64: out.SetUint(uint64(inv.Float())) return true case reflect.Bool: if inv.Bool() { out.SetUint(1) } else { out.SetUint(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON.") } case reflect.Float32, reflect.Float64: switch inv.Kind() { case reflect.Float32, reflect.Float64: out.SetFloat(inv.Float()) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetFloat(float64(inv.Int())) return true case reflect.Bool: if inv.Bool() { out.SetFloat(1) } else { out.SetFloat(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON?") } case reflect.Bool: switch inv.Kind() { case reflect.Bool: out.SetBool(inv.Bool()) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetBool(inv.Int() != 0) return true case reflect.Float32, reflect.Float64: out.SetBool(inv.Float() != 0) return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON?") } case reflect.Struct: if outt == typeURL && inv.Kind() == reflect.String { u, err := url.Parse(inv.String()) if err != nil { panic(err) } out.Set(reflect.ValueOf(u).Elem()) return true } if outt == typeBinary { if b, ok := in.([]byte); ok { out.Set(reflect.ValueOf(Binary{Data: b})) return true } } } return false } // -------------------------------------------------------------------------- // Parsers of basic types. func (d *decoder) readRegEx() RegEx { re := RegEx{} re.Pattern = d.readCStr() re.Options = d.readCStr() return re } func (d *decoder) readBinary() Binary { l := d.readInt32() b := Binary{} b.Kind = d.readByte() b.Data = d.readBytes(l) if b.Kind == 0x02 && len(b.Data) >= 4 { // Weird obsolete format with redundant length. b.Data = b.Data[4:] } return b } func (d *decoder) readStr() string { l := d.readInt32() b := d.readBytes(l - 1) if d.readByte() != '\x00' { corrupted() } return string(b) } func (d *decoder) readCStr() string { start := d.i end := start l := len(d.in) for ; end != l; end++ { if d.in[end] == '\x00' { break } } d.i = end + 1 if d.i > l { corrupted() } return string(d.in[start:end]) } func (d *decoder) readBool() bool { b := d.readByte() if b == 0 { return false } if b == 1 { return true } panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) } func (d *decoder) readFloat64() float64 { return math.Float64frombits(uint64(d.readInt64())) } func (d *decoder) readInt32() int32 { b := d.readBytes(4) return int32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) } func (d *decoder) readInt64() int64 { b := d.readBytes(8) return int64((uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56)) } func (d *decoder) readByte() byte { i := d.i d.i++ if d.i > len(d.in) { corrupted() } return d.in[i] } func (d *decoder) readBytes(length int32) []byte { if length < 0 { corrupted() } start := d.i d.i += int(length) if d.i < start || d.i > len(d.in) { corrupted() } return d.in[start : start+int(length)] } mgo-r2016.08.01/bson/encode.go000066400000000000000000000303131274772341400156150ustar00rootroot00000000000000// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson import ( "encoding/json" "fmt" "math" "net/url" "reflect" "strconv" "time" ) // -------------------------------------------------------------------------- // Some internal infrastructure. var ( typeBinary = reflect.TypeOf(Binary{}) typeObjectId = reflect.TypeOf(ObjectId("")) typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) typeSymbol = reflect.TypeOf(Symbol("")) typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) typeOrderKey = reflect.TypeOf(MinKey) typeDocElem = reflect.TypeOf(DocElem{}) typeRawDocElem = reflect.TypeOf(RawDocElem{}) typeRaw = reflect.TypeOf(Raw{}) typeURL = reflect.TypeOf(url.URL{}) typeTime = reflect.TypeOf(time.Time{}) typeString = reflect.TypeOf("") typeJSONNumber = reflect.TypeOf(json.Number("")) ) const itoaCacheSize = 32 var itoaCache []string func init() { itoaCache = make([]string, itoaCacheSize) for i := 0; i != itoaCacheSize; i++ { itoaCache[i] = strconv.Itoa(i) } } func itoa(i int) string { if i < itoaCacheSize { return itoaCache[i] } return strconv.Itoa(i) } // -------------------------------------------------------------------------- // Marshaling of the document value itself. type encoder struct { out []byte } func (e *encoder) addDoc(v reflect.Value) { for { if vi, ok := v.Interface().(Getter); ok { getv, err := vi.GetBSON() if err != nil { panic(err) } v = reflect.ValueOf(getv) continue } if v.Kind() == reflect.Ptr { v = v.Elem() continue } break } if v.Type() == typeRaw { raw := v.Interface().(Raw) if raw.Kind != 0x03 && raw.Kind != 0x00 { panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") } if len(raw.Data) == 0 { panic("Attempted to marshal empty Raw document") } e.addBytes(raw.Data...) return } start := e.reserveInt32() switch v.Kind() { case reflect.Map: e.addMap(v) case reflect.Struct: e.addStruct(v) case reflect.Array, reflect.Slice: e.addSlice(v) default: panic("Can't marshal " + v.Type().String() + " as a BSON document") } e.addBytes(0) e.setInt32(start, int32(len(e.out)-start)) } func (e *encoder) addMap(v reflect.Value) { for _, k := range v.MapKeys() { e.addElem(k.String(), v.MapIndex(k), false) } } func (e *encoder) addStruct(v reflect.Value) { sinfo, err := getStructInfo(v.Type()) if err != nil { panic(err) } var value reflect.Value if sinfo.InlineMap >= 0 { m := v.Field(sinfo.InlineMap) if m.Len() > 0 { for _, k := range m.MapKeys() { ks := k.String() if _, found := sinfo.FieldsMap[ks]; found { panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) } e.addElem(ks, m.MapIndex(k), false) } } } for _, info := range sinfo.FieldsList { if info.Inline == nil { value = v.Field(info.Num) } else { value = v.FieldByIndex(info.Inline) } if info.OmitEmpty && isZero(value) { continue } e.addElem(info.Key, value, info.MinSize) } } func isZero(v reflect.Value) bool { switch v.Kind() { case reflect.String: return len(v.String()) == 0 case reflect.Ptr, reflect.Interface: return v.IsNil() case reflect.Slice: return v.Len() == 0 case reflect.Map: return v.Len() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Bool: return !v.Bool() case reflect.Struct: vt := v.Type() if vt == typeTime { return v.Interface().(time.Time).IsZero() } for i := 0; i < v.NumField(); i++ { if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { continue // Private field } if !isZero(v.Field(i)) { return false } } return true } return false } func (e *encoder) addSlice(v reflect.Value) { vi := v.Interface() if d, ok := vi.(D); ok { for _, elem := range d { e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } if d, ok := vi.(RawD); ok { for _, elem := range d { e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } l := v.Len() et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } if et == typeRawDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(RawDocElem) e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } for i := 0; i < l; i++ { e.addElem(itoa(i), v.Index(i), false) } } // -------------------------------------------------------------------------- // Marshaling of elements in a document. func (e *encoder) addElemName(kind byte, name string) { e.addBytes(kind) e.addBytes([]byte(name)...) e.addBytes(0) } func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if !v.IsValid() { e.addElemName(0x0A, name) return } if getter, ok := v.Interface().(Getter); ok { getv, err := getter.GetBSON() if err != nil { panic(err) } e.addElem(name, reflect.ValueOf(getv), minSize) return } switch v.Kind() { case reflect.Interface: e.addElem(name, v.Elem(), minSize) case reflect.Ptr: e.addElem(name, v.Elem(), minSize) case reflect.String: s := v.String() switch v.Type() { case typeObjectId: if len(s) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s)) + ")") } e.addElemName(0x07, name) e.addBytes([]byte(s)...) case typeSymbol: e.addElemName(0x0E, name) e.addStr(s) case typeJSONNumber: n := v.Interface().(json.Number) if i, err := n.Int64(); err == nil { e.addElemName(0x12, name) e.addInt64(i) } else if f, err := n.Float64(); err == nil { e.addElemName(0x01, name) e.addFloat64(f) } else { panic("failed to convert json.Number to a number: " + s) } default: e.addElemName(0x02, name) e.addStr(s) } case reflect.Float32, reflect.Float64: e.addElemName(0x01, name) e.addFloat64(v.Float()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: u := v.Uint() if int64(u) < 0 { panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { e.addElemName(0x10, name) e.addInt32(int32(u)) } else { e.addElemName(0x12, name) e.addInt64(int64(u)) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch v.Type() { case typeMongoTimestamp: e.addElemName(0x11, name) e.addInt64(v.Int()) case typeOrderKey: if v.Int() == int64(MaxKey) { e.addElemName(0x7F, name) } else { e.addElemName(0xFF, name) } default: i := v.Int() if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { // It fits into an int32, encode as such. e.addElemName(0x10, name) e.addInt32(int32(i)) } else { e.addElemName(0x12, name) e.addInt64(i) } } case reflect.Bool: e.addElemName(0x08, name) if v.Bool() { e.addBytes(1) } else { e.addBytes(0) } case reflect.Map: e.addElemName(0x03, name) e.addDoc(v) case reflect.Slice: vt := v.Type() et := vt.Elem() if et.Kind() == reflect.Uint8 { e.addElemName(0x05, name) e.addBinary(0x00, v.Bytes()) } else if et == typeDocElem || et == typeRawDocElem { e.addElemName(0x03, name) e.addDoc(v) } else { e.addElemName(0x04, name) e.addDoc(v) } case reflect.Array: et := v.Type().Elem() if et.Kind() == reflect.Uint8 { e.addElemName(0x05, name) if v.CanAddr() { e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) } else { n := v.Len() e.addInt32(int32(n)) e.addBytes(0x00) for i := 0; i < n; i++ { el := v.Index(i) e.addBytes(byte(el.Uint())) } } } else { e.addElemName(0x04, name) e.addDoc(v) } case reflect.Struct: switch s := v.Interface().(type) { case Raw: kind := s.Kind if kind == 0x00 { kind = 0x03 } if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { panic("Attempted to marshal empty Raw document") } e.addElemName(kind, name) e.addBytes(s.Data...) case Binary: e.addElemName(0x05, name) e.addBinary(s.Kind, s.Data) case Decimal128: e.addElemName(0x13, name) e.addInt64(int64(s.l)) e.addInt64(int64(s.h)) case DBPointer: e.addElemName(0x0C, name) e.addStr(s.Namespace) if len(s.Id) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s.Id)) + ")") } e.addBytes([]byte(s.Id)...) case RegEx: e.addElemName(0x0B, name) e.addCStr(s.Pattern) e.addCStr(s.Options) case JavaScript: if s.Scope == nil { e.addElemName(0x0D, name) e.addStr(s.Code) } else { e.addElemName(0x0F, name) start := e.reserveInt32() e.addStr(s.Code) e.addDoc(reflect.ValueOf(s.Scope)) e.setInt32(start, int32(len(e.out)-start)) } case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName(0x09, name) e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName(0x02, name) e.addStr(s.String()) case undefined: e.addElemName(0x06, name) default: e.addElemName(0x03, name) e.addDoc(v) } default: panic("Can't marshal " + v.Type().String() + " in a BSON document") } } // -------------------------------------------------------------------------- // Marshaling of base types. func (e *encoder) addBinary(subtype byte, v []byte) { if subtype == 0x02 { // Wonder how that brilliant idea came to life. Obsolete, luckily. e.addInt32(int32(len(v) + 4)) e.addBytes(subtype) e.addInt32(int32(len(v))) } else { e.addInt32(int32(len(v))) e.addBytes(subtype) } e.addBytes(v...) } func (e *encoder) addStr(v string) { e.addInt32(int32(len(v) + 1)) e.addCStr(v) } func (e *encoder) addCStr(v string) { e.addBytes([]byte(v)...) e.addBytes(0) } func (e *encoder) reserveInt32() (pos int) { pos = len(e.out) e.addBytes(0, 0, 0, 0) return pos } func (e *encoder) setInt32(pos int, v int32) { e.out[pos+0] = byte(v) e.out[pos+1] = byte(v >> 8) e.out[pos+2] = byte(v >> 16) e.out[pos+3] = byte(v >> 24) } func (e *encoder) addInt32(v int32) { u := uint32(v) e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) } func (e *encoder) addInt64(v int64) { u := uint64(v) e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) } func (e *encoder) addFloat64(v float64) { e.addInt64(int64(math.Float64bits(v))) } func (e *encoder) addBytes(v ...byte) { e.out = append(e.out, v...) } mgo-r2016.08.01/bson/json.go000066400000000000000000000212561274772341400153370ustar00rootroot00000000000000package bson import ( "bytes" "encoding/base64" "fmt" "gopkg.in/mgo.v2/internal/json" "strconv" "time" ) // UnmarshalJSON unmarshals a JSON value that may hold non-standard // syntax as defined in BSON's extended JSON specification. func UnmarshalJSON(data []byte, value interface{}) error { d := json.NewDecoder(bytes.NewBuffer(data)) d.Extend(&jsonExt) return d.Decode(value) } // MarshalJSON marshals a JSON value that may hold non-standard // syntax as defined in BSON's extended JSON specification. func MarshalJSON(value interface{}) ([]byte, error) { var buf bytes.Buffer e := json.NewEncoder(&buf) e.Extend(&jsonExt) err := e.Encode(value) if err != nil { return nil, err } return buf.Bytes(), nil } // jdec is used internally by the JSON decoding functions // so they may unmarshal functions without getting into endless // recursion due to keyed objects. func jdec(data []byte, value interface{}) error { d := json.NewDecoder(bytes.NewBuffer(data)) d.Extend(&funcExt) return d.Decode(value) } var jsonExt json.Extension var funcExt json.Extension // TODO // - Shell regular expressions ("/regexp/opts") func init() { jsonExt.DecodeUnquotedKeys(true) jsonExt.DecodeTrailingCommas(true) funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary") jsonExt.DecodeKeyed("$binary", jdecBinary) jsonExt.DecodeKeyed("$binaryFunc", jdecBinary) jsonExt.EncodeType([]byte(nil), jencBinarySlice) jsonExt.EncodeType(Binary{}, jencBinaryType) funcExt.DecodeFunc("ISODate", "$dateFunc", "S") funcExt.DecodeFunc("new Date", "$dateFunc", "S") jsonExt.DecodeKeyed("$date", jdecDate) jsonExt.DecodeKeyed("$dateFunc", jdecDate) jsonExt.EncodeType(time.Time{}, jencDate) funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i") jsonExt.DecodeKeyed("$timestamp", jdecTimestamp) jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp) funcExt.DecodeConst("undefined", Undefined) jsonExt.DecodeKeyed("$regex", jdecRegEx) jsonExt.EncodeType(RegEx{}, jencRegEx) funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id") jsonExt.DecodeKeyed("$oid", jdecObjectId) jsonExt.DecodeKeyed("$oidFunc", jdecObjectId) jsonExt.EncodeType(ObjectId(""), jencObjectId) funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id") jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef) funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N") jsonExt.DecodeKeyed("$numberLong", jdecNumberLong) jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong) jsonExt.EncodeType(int64(0), jencNumberLong) jsonExt.EncodeType(int(0), jencInt) funcExt.DecodeConst("MinKey", MinKey) funcExt.DecodeConst("MaxKey", MaxKey) jsonExt.DecodeKeyed("$minKey", jdecMinKey) jsonExt.DecodeKeyed("$maxKey", jdecMaxKey) jsonExt.EncodeType(orderKey(0), jencMinMaxKey) jsonExt.DecodeKeyed("$undefined", jdecUndefined) jsonExt.EncodeType(Undefined, jencUndefined) jsonExt.Extend(&funcExt) } func fbytes(format string, args ...interface{}) []byte { var buf bytes.Buffer fmt.Fprintf(&buf, format, args...) return buf.Bytes() } func jdecBinary(data []byte) (interface{}, error) { var v struct { Binary []byte `json:"$binary"` Type string `json:"$type"` Func struct { Binary []byte `json:"$binary"` Type int64 `json:"$type"` } `json:"$binaryFunc"` } err := jdec(data, &v) if err != nil { return nil, err } var binData []byte var binKind int64 if v.Type == "" && v.Binary == nil { binData = v.Func.Binary binKind = v.Func.Type } else if v.Type == "" { return v.Binary, nil } else { binData = v.Binary binKind, err = strconv.ParseInt(v.Type, 0, 64) if err != nil { binKind = -1 } } if binKind == 0 { return binData, nil } if binKind < 0 || binKind > 255 { return nil, fmt.Errorf("invalid type in binary object: %s", data) } return Binary{Kind: byte(binKind), Data: binData}, nil } func jencBinarySlice(v interface{}) ([]byte, error) { in := v.([]byte) out := make([]byte, base64.StdEncoding.EncodedLen(len(in))) base64.StdEncoding.Encode(out, in) return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil } func jencBinaryType(v interface{}) ([]byte, error) { in := v.(Binary) out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data))) base64.StdEncoding.Encode(out, in.Data) return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil } const jdateFormat = "2006-01-02T15:04:05.999Z" func jdecDate(data []byte) (interface{}, error) { var v struct { S string `json:"$date"` Func struct { S string } `json:"$dateFunc"` } _ = jdec(data, &v) if v.S == "" { v.S = v.Func.S } if v.S != "" { for _, format := range []string{jdateFormat, "2006-01-02"} { t, err := time.Parse(format, v.S) if err == nil { return t, nil } } return nil, fmt.Errorf("cannot parse date: %q", v.S) } var vn struct { Date struct { N int64 `json:"$numberLong,string"` } `json:"$date"` Func struct { S int64 } `json:"$dateFunc"` } err := jdec(data, &vn) if err != nil { return nil, fmt.Errorf("cannot parse date: %q", data) } n := vn.Date.N if n == 0 { n = vn.Func.S } return time.Unix(n/1000, n%1000*1e6).UTC(), nil } func jencDate(v interface{}) ([]byte, error) { t := v.(time.Time) return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil } func jdecTimestamp(data []byte) (interface{}, error) { var v struct { Func struct { T int32 `json:"t"` I int32 `json:"i"` } `json:"$timestamp"` } err := jdec(data, &v) if err != nil { return nil, err } return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil } func jencTimestamp(v interface{}) ([]byte, error) { ts := uint64(v.(MongoTimestamp)) return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil } func jdecRegEx(data []byte) (interface{}, error) { var v struct { Regex string `json:"$regex"` Options string `json:"$options"` } err := jdec(data, &v) if err != nil { return nil, err } return RegEx{v.Regex, v.Options}, nil } func jencRegEx(v interface{}) ([]byte, error) { re := v.(RegEx) type regex struct { Regex string `json:"$regex"` Options string `json:"$options"` } return json.Marshal(regex{re.Pattern, re.Options}) } func jdecObjectId(data []byte) (interface{}, error) { var v struct { Id string `json:"$oid"` Func struct { Id string } `json:"$oidFunc"` } err := jdec(data, &v) if err != nil { return nil, err } if v.Id == "" { v.Id = v.Func.Id } return ObjectIdHex(v.Id), nil } func jencObjectId(v interface{}) ([]byte, error) { return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil } func jdecDBRef(data []byte) (interface{}, error) { // TODO Support unmarshaling $ref and $id into the input value. var v struct { Obj map[string]interface{} `json:"$dbrefFunc"` } // TODO Fix this. Must not be required. v.Obj = make(map[string]interface{}) err := jdec(data, &v) if err != nil { return nil, err } return v.Obj, nil } func jdecNumberLong(data []byte) (interface{}, error) { var v struct { N int64 `json:"$numberLong,string"` Func struct { N int64 `json:",string"` } `json:"$numberLongFunc"` } var vn struct { N int64 `json:"$numberLong"` Func struct { N int64 } `json:"$numberLongFunc"` } err := jdec(data, &v) if err != nil { err = jdec(data, &vn) v.N = vn.N v.Func.N = vn.Func.N } if err != nil { return nil, err } if v.N != 0 { return v.N, nil } return v.Func.N, nil } func jencNumberLong(v interface{}) ([]byte, error) { n := v.(int64) f := `{"$numberLong":"%d"}` if n <= 1<<53 { f = `{"$numberLong":%d}` } return fbytes(f, n), nil } func jencInt(v interface{}) ([]byte, error) { n := v.(int) f := `{"$numberLong":"%d"}` if n <= 1<<53 { f = `%d` } return fbytes(f, n), nil } func jdecMinKey(data []byte) (interface{}, error) { var v struct { N int64 `json:"$minKey"` } err := jdec(data, &v) if err != nil { return nil, err } if v.N != 1 { return nil, fmt.Errorf("invalid $minKey object: %s", data) } return MinKey, nil } func jdecMaxKey(data []byte) (interface{}, error) { var v struct { N int64 `json:"$maxKey"` } err := jdec(data, &v) if err != nil { return nil, err } if v.N != 1 { return nil, fmt.Errorf("invalid $maxKey object: %s", data) } return MaxKey, nil } func jencMinMaxKey(v interface{}) ([]byte, error) { switch v.(orderKey) { case MinKey: return []byte(`{"$minKey":1}`), nil case MaxKey: return []byte(`{"$maxKey":1}`), nil } panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v)) } func jdecUndefined(data []byte) (interface{}, error) { var v struct { B bool `json:"$undefined"` } err := jdec(data, &v) if err != nil { return nil, err } if !v.B { return nil, fmt.Errorf("invalid $undefined object: %s", data) } return Undefined, nil } func jencUndefined(v interface{}) ([]byte, error) { return []byte(`{"$undefined":true}`), nil } mgo-r2016.08.01/bson/json_test.go000066400000000000000000000074301274772341400163740ustar00rootroot00000000000000package bson_test import ( "gopkg.in/mgo.v2/bson" . "gopkg.in/check.v1" "reflect" "strings" "time" ) type jsonTest struct { a interface{} // value encoded into JSON (optional) b string // JSON expected as output of , and used as input to c interface{} // Value expected from decoding , defaults to e string // error string, if decoding (b) should fail } var jsonTests = []jsonTest{ // $binary { a: []byte("foo"), b: `{"$binary":"Zm9v","$type":"0x0"}`, }, { a: bson.Binary{Kind: 2, Data: []byte("foo")}, b: `{"$binary":"Zm9v","$type":"0x2"}`, }, { b: `BinData(2,"Zm9v")`, c: bson.Binary{Kind: 2, Data: []byte("foo")}, }, // $date { a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC), b: `{"$date":"2016-05-15T01:02:03.004Z"}`, }, { b: `{"$date": {"$numberLong": "1002"}}`, c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC), }, { b: `ISODate("2016-05-15T01:02:03.004Z")`, c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC), }, { b: `new Date(1000)`, c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), }, { b: `new Date("2016-05-15")`, c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC), }, // $timestamp { a: bson.MongoTimestamp(4294967298), b: `{"$timestamp":{"t":1,"i":2}}`, }, { b: `Timestamp(1, 2)`, c: bson.MongoTimestamp(4294967298), }, // $regex { a: bson.RegEx{"pattern", "options"}, b: `{"$regex":"pattern","$options":"options"}`, }, // $oid { a: bson.ObjectIdHex("0123456789abcdef01234567"), b: `{"$oid":"0123456789abcdef01234567"}`, }, { b: `ObjectId("0123456789abcdef01234567")`, c: bson.ObjectIdHex("0123456789abcdef01234567"), }, // $ref (no special type) { b: `DBRef("name", "id")`, c: map[string]interface{}{"$ref": "name", "$id": "id"}, }, // $numberLong { a: 123, b: `123`, }, { a: int64(9007199254740992), b: `{"$numberLong":9007199254740992}`, }, { a: int64(1<<53 + 1), b: `{"$numberLong":"9007199254740993"}`, }, { a: 1<<53 + 1, b: `{"$numberLong":"9007199254740993"}`, c: int64(9007199254740993), }, { b: `NumberLong(9007199254740992)`, c: int64(1 << 53), }, { b: `NumberLong("9007199254740993")`, c: int64(1<<53 + 1), }, // $minKey, $maxKey { a: bson.MinKey, b: `{"$minKey":1}`, }, { a: bson.MaxKey, b: `{"$maxKey":1}`, }, { b: `MinKey`, c: bson.MinKey, }, { b: `MaxKey`, c: bson.MaxKey, }, { b: `{"$minKey":0}`, e: `invalid $minKey object: {"$minKey":0}`, }, { b: `{"$maxKey":0}`, e: `invalid $maxKey object: {"$maxKey":0}`, }, { a: bson.Undefined, b: `{"$undefined":true}`, }, { b: `undefined`, c: bson.Undefined, }, { b: `{"v": undefined}`, c: struct{ V interface{} }{bson.Undefined}, }, // Unquoted keys and trailing commas { b: `{$foo: ["bar",],}`, c: map[string]interface{}{"$foo": []interface{}{"bar"}}, }, } func (s *S) TestJSON(c *C) { for i, item := range jsonTests { c.Logf("------------ (#%d)", i) c.Logf("A: %#v", item.a) c.Logf("B: %#v", item.b) if item.c == nil { item.c = item.a } else { c.Logf("C: %#v", item.c) } if item.e != "" { c.Logf("E: %s", item.e) } if item.a != nil { data, err := bson.MarshalJSON(item.a) c.Assert(err, IsNil) c.Logf("Dumped: %#v", string(data)) c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b) } var zero interface{} if item.c == nil { zero = &struct{}{} } else { zero = reflect.New(reflect.TypeOf(item.c)).Interface() } err := bson.UnmarshalJSON([]byte(item.b), zero) if item.e != "" { c.Assert(err, NotNil) c.Assert(err.Error(), Equals, item.e) continue } c.Assert(err, IsNil) zerov := reflect.ValueOf(zero) value := zerov.Interface() if zerov.Kind() == reflect.Ptr { value = zerov.Elem().Interface() } c.Logf("Loaded: %#v", value) c.Assert(value, DeepEquals, item.c) } } mgo-r2016.08.01/bson/slice-getter-setter.patch000066400000000000000000000530561274772341400207560ustar00rootroot00000000000000diff --git a/bson/bson_test.go b/bson/bson_test.go index 5c1b869..630292b 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1056,6 +1056,25 @@ func (i *getterSetterInt) SetBSON(raw bson.Raw) error { return err } +type ifaceType interface { + Hello() +} + +type ifaceSlice []ifaceType + +func (s *ifaceSlice) SetBSON(raw bson.Raw) error { + var ns []int + if err := raw.Unmarshal(&ns); err != nil { + return err + } + *s = make(ifaceSlice, ns[0]) + return nil +} + +func (s ifaceSlice) GetBSON() (interface{}, error) { + return []int{len(s)}, nil +} + type ( MyString string MyBytes []byte @@ -1090,197 +1109,200 @@ func parseURL(s string) *url.URL { // verify that the resulting value is deep-equal to the untouched second value. // Then, it will do the same in the *opposite* direction! var twoWayCrossItems = []crossTypeItem{ - // int<=>int - {&struct{ I int }{42}, &struct{ I int8 }{42}}, - {&struct{ I int }{42}, &struct{ I int32 }{42}}, - {&struct{ I int }{42}, &struct{ I int64 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, - - // uint<=>uint - {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, - - // float32<=>float64 - {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, - - // int<=>uint - {&struct{ I uint }{42}, &struct{ I int }{42}}, - {&struct{ I uint }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, - - // int <=> float - {&struct{ I int }{42}, &struct{ I float64 }{42}}, - - // int <=> bool - {&struct{ I int }{1}, &struct{ I bool }{true}}, - {&struct{ I int }{0}, &struct{ I bool }{false}}, - - // uint <=> float64 - {&struct{ I uint }{42}, &struct{ I float64 }{42}}, - - // uint <=> bool - {&struct{ I uint }{1}, &struct{ I bool }{true}}, - {&struct{ I uint }{0}, &struct{ I bool }{false}}, - - // float64 <=> bool - {&struct{ I float64 }{1}, &struct{ I bool }{true}}, - {&struct{ I float64 }{0}, &struct{ I bool }{false}}, - - // string <=> string and string <=> []byte - {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, - {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, - {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, - - // map <=> struct - {&struct { - A struct { - B, C int - } - }{struct{ B, C int }{1, 2}}, - map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, - - {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, - {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, - {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, - {&struct{ A uint }{42}, map[string]int{"a": 42}}, - {&struct{ A uint }{42}, map[string]float64{"a": 42}}, - {&struct{ A uint }{1}, map[string]bool{"a": true}}, - {&struct{ A int }{42}, map[string]uint{"a": 42}}, - {&struct{ A int }{42}, map[string]float64{"a": 42}}, - {&struct{ A int }{1}, map[string]bool{"a": true}}, - {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, - {&struct{ A float64 }{42}, map[string]int{"a": 42}}, - {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, - {&struct{ A float64 }{1}, map[string]bool{"a": true}}, - {&struct{ A bool }{true}, map[string]int{"a": 1}}, - {&struct{ A bool }{true}, map[string]uint{"a": 1}}, - {&struct{ A bool }{true}, map[string]float64{"a": 1}}, - {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, - - // url.URL <=> string - {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - - // Slices - {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - - // Conditionals - {&condBool{true}, map[string]bool{"v": true}}, - {&condBool{}, map[string]bool{}}, - {&condInt{1}, map[string]int{"v": 1}}, - {&condInt{}, map[string]int{}}, - {&condUInt{1}, map[string]uint{"v": 1}}, - {&condUInt{}, map[string]uint{}}, - {&condFloat{}, map[string]int{}}, - {&condStr{"yo"}, map[string]string{"v": "yo"}}, - {&condStr{}, map[string]string{}}, - {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, - {&condStrNS{}, map[string]string{}}, - {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, - {&condSlice{}, map[string][]string{}}, - {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, - {&condMap{}, map[string][]string{}}, - {&condIface{"yo"}, map[string]string{"v": "yo"}}, - {&condIface{""}, map[string]string{"v": ""}}, - {&condIface{}, map[string]string{}}, - {&condPtr{&truevar}, map[string]bool{"v": true}}, - {&condPtr{&falsevar}, map[string]bool{"v": false}}, - {&condPtr{}, map[string]string{}}, - - {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, - {&condTime{}, map[string]string{}}, - - {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, - {&condStruct{struct{ A []int }{}}, bson.M{}}, - - {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, - {&namedCondStr{}, map[string]string{}}, - - {&shortInt{1}, map[string]interface{}{"v": 1}}, - {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, - - {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, - {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortNonEmptyInt{}, map[string]interface{}{}}, - - {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, - {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, - {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, - {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, - - // []byte <=> MyBytes - {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, - {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, - {&struct{ B MyBytes }{}, map[string]bool{}}, - {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, - - // bool <=> MyBool - {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, - {&struct{ B MyBool }{}, map[string]bool{"b": false}}, - {&struct{ B MyBool }{}, map[string]string{}}, - {&struct{ B bool }{}, map[string]MyBool{"b": false}}, - - // arrays - {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, - - // zero time - {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, - - // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds - {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, - map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, - - // bson.D <=> []DocElem - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, - {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, - - // bson.RawD <=> []RawDocElem - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - - // bson.M <=> map - {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, - - // bson.M <=> map[MyString] - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, - - // json.Number <=> int64, float64 - {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, - {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, - {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, - - // bson.D <=> non-struct getter/setter - {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, - {&bson.D{{"a", 42}}, &gsintvar}, + //// int<=>int + //{&struct{ I int }{42}, &struct{ I int8 }{42}}, + //{&struct{ I int }{42}, &struct{ I int32 }{42}}, + //{&struct{ I int }{42}, &struct{ I int64 }{42}}, + //{&struct{ I int8 }{42}, &struct{ I int32 }{42}}, + //{&struct{ I int8 }{42}, &struct{ I int64 }{42}}, + //{&struct{ I int32 }{42}, &struct{ I int64 }{42}}, + + //// uint<=>uint + //{&struct{ I uint }{42}, &struct{ I uint8 }{42}}, + //{&struct{ I uint }{42}, &struct{ I uint32 }{42}}, + //{&struct{ I uint }{42}, &struct{ I uint64 }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, + //{&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, + + //// float32<=>float64 + //{&struct{ I float32 }{42}, &struct{ I float64 }{42}}, + + //// int<=>uint + //{&struct{ I uint }{42}, &struct{ I int }{42}}, + //{&struct{ I uint }{42}, &struct{ I int8 }{42}}, + //{&struct{ I uint }{42}, &struct{ I int32 }{42}}, + //{&struct{ I uint }{42}, &struct{ I int64 }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I int }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, + //{&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, + //{&struct{ I uint32 }{42}, &struct{ I int }{42}}, + //{&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, + //{&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, + //{&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, + //{&struct{ I uint64 }{42}, &struct{ I int }{42}}, + //{&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, + //{&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, + //{&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, + + //// int <=> float + //{&struct{ I int }{42}, &struct{ I float64 }{42}}, + + //// int <=> bool + //{&struct{ I int }{1}, &struct{ I bool }{true}}, + //{&struct{ I int }{0}, &struct{ I bool }{false}}, + + //// uint <=> float64 + //{&struct{ I uint }{42}, &struct{ I float64 }{42}}, + + //// uint <=> bool + //{&struct{ I uint }{1}, &struct{ I bool }{true}}, + //{&struct{ I uint }{0}, &struct{ I bool }{false}}, + + //// float64 <=> bool + //{&struct{ I float64 }{1}, &struct{ I bool }{true}}, + //{&struct{ I float64 }{0}, &struct{ I bool }{false}}, + + //// string <=> string and string <=> []byte + //{&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, + //{&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, + //{&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, + + //// map <=> struct + //{&struct { + // A struct { + // B, C int + // } + //}{struct{ B, C int }{1, 2}}, + // map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, + + //{&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, + //{&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, + //{&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, + //{&struct{ A uint }{42}, map[string]int{"a": 42}}, + //{&struct{ A uint }{42}, map[string]float64{"a": 42}}, + //{&struct{ A uint }{1}, map[string]bool{"a": true}}, + //{&struct{ A int }{42}, map[string]uint{"a": 42}}, + //{&struct{ A int }{42}, map[string]float64{"a": 42}}, + //{&struct{ A int }{1}, map[string]bool{"a": true}}, + //{&struct{ A float64 }{42}, map[string]float32{"a": 42}}, + //{&struct{ A float64 }{42}, map[string]int{"a": 42}}, + //{&struct{ A float64 }{42}, map[string]uint{"a": 42}}, + //{&struct{ A float64 }{1}, map[string]bool{"a": true}}, + //{&struct{ A bool }{true}, map[string]int{"a": 1}}, + //{&struct{ A bool }{true}, map[string]uint{"a": 1}}, + //{&struct{ A bool }{true}, map[string]float64{"a": 1}}, + //{&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, + + //// url.URL <=> string + //{&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, + //{&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, + + //// Slices + //{&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, + //{&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, + + //// Conditionals + //{&condBool{true}, map[string]bool{"v": true}}, + //{&condBool{}, map[string]bool{}}, + //{&condInt{1}, map[string]int{"v": 1}}, + //{&condInt{}, map[string]int{}}, + //{&condUInt{1}, map[string]uint{"v": 1}}, + //{&condUInt{}, map[string]uint{}}, + //{&condFloat{}, map[string]int{}}, + //{&condStr{"yo"}, map[string]string{"v": "yo"}}, + //{&condStr{}, map[string]string{}}, + //{&condStrNS{"yo"}, map[string]string{"v": "yo"}}, + //{&condStrNS{}, map[string]string{}}, + //{&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, + //{&condSlice{}, map[string][]string{}}, + //{&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, + //{&condMap{}, map[string][]string{}}, + //{&condIface{"yo"}, map[string]string{"v": "yo"}}, + //{&condIface{""}, map[string]string{"v": ""}}, + //{&condIface{}, map[string]string{}}, + //{&condPtr{&truevar}, map[string]bool{"v": true}}, + //{&condPtr{&falsevar}, map[string]bool{"v": false}}, + //{&condPtr{}, map[string]string{}}, + + //{&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, + //{&condTime{}, map[string]string{}}, + + //{&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, + //{&condStruct{struct{ A []int }{}}, bson.M{}}, + + //{&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, + //{&namedCondStr{}, map[string]string{}}, + + //{&shortInt{1}, map[string]interface{}{"v": 1}}, + //{&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, + //{&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + //{&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, + //{&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + //{&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + //{&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, + + //{&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, + //{&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + //{&shortNonEmptyInt{}, map[string]interface{}{}}, + + //{&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, + //{&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, + //{&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, + //{&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, + //{&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, + //{&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + + //// []byte <=> MyBytes + //{&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, + //{&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, + //{&struct{ B MyBytes }{}, map[string]bool{}}, + //{&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, + + //// bool <=> MyBool + //{&struct{ B MyBool }{true}, map[string]bool{"b": true}}, + //{&struct{ B MyBool }{}, map[string]bool{"b": false}}, + //{&struct{ B MyBool }{}, map[string]string{}}, + //{&struct{ B bool }{}, map[string]MyBool{"b": false}}, + + //// arrays + //{&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, + + //// zero time + //{&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, + + //// zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds + //{&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, + // map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, + + //// bson.D <=> []DocElem + //{&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, + //{&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, + //{&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, + + //// bson.RawD <=> []RawDocElem + //{&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, + //{&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, + + //// bson.M <=> map + //{bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, + //{bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, + + //// bson.M <=> map[MyString] + //{bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, + + //// json.Number <=> int64, float64 + //{&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, + //{&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, + //{&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, + + //// bson.D <=> non-struct getter/setter + //{&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, + //{&bson.D{{"a", 42}}, &gsintvar}, + + // Interface slice setter. + {&struct{ V ifaceSlice }{ifaceSlice{nil, nil}}, bson.M{"v": []interface{}{2}}}, } // Same thing, but only one way (obj1 => obj2). diff --git a/bson/decode.go b/bson/decode.go index 782e933..bdd2e02 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -474,6 +474,11 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { panic("Can't happen. Handled above.") case 0x04: // Array outt := out.Type() + if setterStyle(outt) != setterNone { + // Skip the value so its data is handed to the setter below. + d.dropElem(kind) + break + } for outt.Kind() == reflect.Ptr { outt = outt.Elem() } diff --git a/bson/encode.go b/bson/encode.go index 81a13ad..8599f03 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -212,7 +212,7 @@ func (e *encoder) addSlice(v reflect.Value) { return } l := v.Len() - et := v.Type().Elem() + et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) @@ -415,7 +415,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName('\x09', name) - e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6)) + e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName('\x02', name) mgo-r2016.08.01/bson/specdata/000077500000000000000000000000001274772341400156155ustar00rootroot00000000000000mgo-r2016.08.01/bson/specdata/update.sh000077500000000000000000000005701274772341400174400ustar00rootroot00000000000000#!/bin/sh set -e if [ ! -d specifications ]; then git clone -b bson git@github.com:jyemin/specifications fi TESTFILE="../specdata_test.go" cat < $TESTFILE package bson_test var specTests = []string{ END for file in specifications/source/bson/tests/*.yml; do ( echo '`' cat $file echo -n '`,' ) >> $TESTFILE done echo '}' >> $TESTFILE gofmt -w $TESTFILE mgo-r2016.08.01/bson/specdata_test.go000066400000000000000000000131101274772341400171770ustar00rootroot00000000000000package bson_test var specTests = []string{ ` --- description: "Array type" documents: - decoded: a : [] encoded: 0D000000046100050000000000 - decoded: a: [10] encoded: 140000000461000C0000001030000A0000000000 - # Decode an array that uses an empty string as the key decodeOnly : true decoded: a: [10] encoded: 130000000461000B00000010000A0000000000 - # Decode an array that uses a non-numeric string as the key decodeOnly : true decoded: a: [10] encoded: 150000000461000D000000106162000A0000000000 `, ` --- description: "Boolean type" documents: - encoded: "090000000862000100" decoded: { "b" : true } - encoded: "090000000862000000" decoded: { "b" : false } `, ` --- description: "Corrupted BSON" documents: - encoded: "09000000016600" error: "truncated double" - encoded: "09000000026600" error: "truncated string" - encoded: "09000000036600" error: "truncated document" - encoded: "09000000046600" error: "truncated array" - encoded: "09000000056600" error: "truncated binary" - encoded: "09000000076600" error: "truncated objectid" - encoded: "09000000086600" error: "truncated boolean" - encoded: "09000000096600" error: "truncated date" - encoded: "090000000b6600" error: "truncated regex" - encoded: "090000000c6600" error: "truncated db pointer" - encoded: "0C0000000d6600" error: "truncated javascript" - encoded: "0C0000000e6600" error: "truncated symbol" - encoded: "0C0000000f6600" error: "truncated javascript with scope" - encoded: "0C000000106600" error: "truncated int32" - encoded: "0C000000116600" error: "truncated timestamp" - encoded: "0C000000126600" error: "truncated int64" - encoded: "0400000000" error: basic - encoded: "0500000001" error: basic - encoded: "05000000" error: basic - encoded: "0700000002610078563412" error: basic - encoded: "090000001061000500" error: basic - encoded: "00000000000000000000" error: basic - encoded: "1300000002666f6f00040000006261720000" error: "basic" - encoded: "1800000003666f6f000f0000001062617200ffffff7f0000" error: basic - encoded: "1500000003666f6f000c0000000862617200010000" error: basic - encoded: "1c00000003666f6f001200000002626172000500000062617a000000" error: basic - encoded: "1000000002610004000000616263ff00" error: string is not null-terminated - encoded: "0c0000000200000000000000" error: bad_string_length - encoded: "120000000200ffffffff666f6f6261720000" error: bad_string_length - encoded: "0c0000000e00000000000000" error: bad_string_length - encoded: "120000000e00ffffffff666f6f6261720000" error: bad_string_length - encoded: "180000000c00fa5bd841d6585d9900" error: "" - encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900" error: bad_string_length - encoded: "0c0000000d00000000000000" error: bad_string_length - encoded: "0c0000000d00ffffffff0000" error: bad_string_length - encoded: "1c0000000f001500000000000000000c000000020001000000000000" error: bad_string_length - encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000" error: bad_string_length - encoded: "1c0000000f001500000001000000000c000000020000000000000000" error: bad_string_length - encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000" error: bad_string_length - encoded: "0E00000008616263646566676869707172737475" error: "Run-on CString" - encoded: "0100000000" error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)" - encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000" error: "One object, but with object size listed smaller than it is in the data" - encoded: "05000000" error: "One object, missing the EOO at the end" - encoded: "0500000001" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01" - encoded: "05000000ff" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff" - encoded: "0500000070" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70" - encoded: "07000000000000" error: "Invalid BSON type low range" - encoded: "07000000800000" error: "Invalid BSON type high range" - encoded: "090000000862000200" error: "Invalid boolean value of 2" - encoded: "09000000086200ff00" error: "Invalid boolean value of -1" `, ` --- description: "Int32 type" documents: - decoded: i: -2147483648 encoded: 0C0000001069000000008000 - decoded: i: 2147483647 encoded: 0C000000106900FFFFFF7F00 - decoded: i: -1 encoded: 0C000000106900FFFFFFFF00 - decoded: i: 0 encoded: 0C0000001069000000000000 - decoded: i: 1 encoded: 0C0000001069000100000000 `, ` --- description: "String type" documents: - decoded: s : "" encoded: 0D000000027300010000000000 - decoded: s: "a" encoded: 0E00000002730002000000610000 - decoded: s: "This is a string" encoded: 1D0000000273001100000054686973206973206120737472696E670000 - decoded: s: "κόσμε" encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000 `} mgo-r2016.08.01/bson/specifications/000077500000000000000000000000001274772341400170345ustar00rootroot00000000000000mgo-r2016.08.01/bulk.go000066400000000000000000000234151274772341400143610ustar00rootroot00000000000000package mgo import ( "bytes" "sort" "gopkg.in/mgo.v2/bson" ) // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // // MongoDB servers older than version 2.6 do not have proper support for bulk // operations, so the driver attempts to map its API as much as possible into // the functionality that works. In particular, in those releases updates and // removals are sent individually, and inserts are sent in bulk but have // suboptimal error reporting compared to more recent versions of the server. // See the documentation of BulkErrorCase for details on that. // // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api // type Bulk struct { c *Collection opcount int actions []bulkAction ordered bool } type bulkOp int const ( bulkInsert bulkOp = iota + 1 bulkUpdate bulkUpdateAll bulkRemove ) type bulkAction struct { op bulkOp docs []interface{} idxs []int } type bulkUpdateOp []interface{} type bulkDeleteOp []interface{} // BulkResult holds the results for a bulk operation. type BulkResult struct { Matched int Modified int // Available only for MongoDB 2.6+ // Be conservative while we understand exactly how to report these // results in a useful and convenient way, and also how to emulate // them with prior servers. private bool } // BulkError holds an error returned from running a Bulk operation. // Individual errors may be obtained and inspected via the Cases method. type BulkError struct { ecases []BulkErrorCase } func (e *BulkError) Error() string { if len(e.ecases) == 0 { return "invalid BulkError instance: no errors" } if len(e.ecases) == 1 { return e.ecases[0].Err.Error() } msgs := make([]string, 0, len(e.ecases)) seen := make(map[string]bool) for _, ecase := range e.ecases { msg := ecase.Err.Error() if !seen[msg] { seen[msg] = true msgs = append(msgs, msg) } } if len(msgs) == 1 { return msgs[0] } var buf bytes.Buffer buf.WriteString("multiple errors in bulk operation:\n") for _, msg := range msgs { buf.WriteString(" - ") buf.WriteString(msg) buf.WriteByte('\n') } return buf.String() } type bulkErrorCases []BulkErrorCase func (slice bulkErrorCases) Len() int { return len(slice) } func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index } func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } // BulkErrorCase holds an individual error found while attempting a single change // within a bulk operation, and the position in which it was enqueued. // // MongoDB servers older than version 2.6 do not have proper support for bulk // operations, so the driver attempts to map its API as much as possible into // the functionality that works. In particular, only the last error is reported // for bulk inserts and without any positional information, so the Index // field is set to -1 in these cases. type BulkErrorCase struct { Index int // Position of operation that failed, or -1 if unknown. Err error } // Cases returns all individual errors found while attempting the requested changes. // // See the documentation of BulkErrorCase for limitations in older MongoDB releases. func (e *BulkError) Cases() []BulkErrorCase { return e.ecases } // Bulk returns a value to prepare the execution of a bulk operation. func (c *Collection) Bulk() *Bulk { return &Bulk{c: c, ordered: true} } // Unordered puts the bulk operation in unordered mode. // // In unordered mode the indvidual operations may be sent // out of order, which means latter operations may proceed // even if prior ones have failed. func (b *Bulk) Unordered() { b.ordered = false } func (b *Bulk) action(op bulkOp, opcount int) *bulkAction { var action *bulkAction if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { action = &b.actions[len(b.actions)-1] } else if !b.ordered { for i := range b.actions { if b.actions[i].op == op { action = &b.actions[i] break } } } if action == nil { b.actions = append(b.actions, bulkAction{op: op}) action = &b.actions[len(b.actions)-1] } for i := 0; i < opcount; i++ { action.idxs = append(action.idxs, b.opcount) b.opcount++ } return action } // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { action := b.action(bulkInsert, len(docs)) action.docs = append(action.docs, docs...) } // Remove queues up the provided selectors for removing matching documents. // Each selector will remove only a single matching document. func (b *Bulk) Remove(selectors ...interface{}) { action := b.action(bulkRemove, len(selectors)) for _, selector := range selectors { if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &deleteOp{ Collection: b.c.FullName, Selector: selector, Flags: 1, Limit: 1, }) } } // RemoveAll queues up the provided selectors for removing all matching documents. // Each selector will remove all matching documents. func (b *Bulk) RemoveAll(selectors ...interface{}) { action := b.action(bulkRemove, len(selectors)) for _, selector := range selectors { if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &deleteOp{ Collection: b.c.FullName, Selector: selector, Flags: 0, Limit: 0, }) } } // Update queues up the provided pairs of updating instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair matches exactly one document for updating at most. func (b *Bulk) Update(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], }) } } // UpdateAll queues up the provided pairs of updating instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair updates all documents matching the selector. func (b *Bulk) UpdateAll(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.UpdateAll requires an even number of parameters") } action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], Flags: 2, Multi: true, }) } } // Upsert queues up the provided pairs of upserting instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair matches exactly one document for updating at most. func (b *Bulk) Upsert(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], Flags: 1, Upsert: true, }) } } // Run runs all the operations queued up. // // If an error is reported on an unordered bulk operation, the error value may // be an aggregation of all issues observed. As an exception to that, Insert // operations running on MongoDB versions prior to 2.6 will report the last // error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { var result BulkResult var berr BulkError var failed bool for i := range b.actions { action := &b.actions[i] var ok bool switch action.op { case bulkInsert: ok = b.runInsert(action, &result, &berr) case bulkUpdate: ok = b.runUpdate(action, &result, &berr) case bulkRemove: ok = b.runRemove(action, &result, &berr) default: panic("unknown bulk operation") } if !ok { failed = true if b.ordered { break } } } if failed { sort.Sort(bulkErrorCases(berr.ecases)) return nil, &berr } return &result, nil } func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool { op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } lerr, err := b.c.writeOp(op, b.ordered) return b.checkSuccess(action, berr, lerr, err) } func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) if lerr != nil { result.Matched += lerr.N result.Modified += lerr.modified } return b.checkSuccess(action, berr, lerr, err) } func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) if lerr != nil { result.Matched += lerr.N result.Modified += lerr.modified } return b.checkSuccess(action, berr, lerr, err) } func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool { if lerr != nil && len(lerr.ecases) > 0 { for i := 0; i < len(lerr.ecases); i++ { // Map back from the local error index into the visible one. ecase := lerr.ecases[i] idx := ecase.Index if idx >= 0 { idx = action.idxs[idx] } berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err}) } return false } else if err != nil { for i := 0; i < len(action.idxs); i++ { berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err}) } return false } return true } mgo-r2016.08.01/bulk_test.go000066400000000000000000000333611274772341400154210ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2015 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( . "gopkg.in/check.v1" "gopkg.in/mgo.v2" ) func (s *S) TestBulkInsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Insert(M{"n": 1}) bulk.Insert(M{"n": 2}, M{"n": 3}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) } func (s *S) TestBulkInsertError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") c.Assert(mgo.IsDup(err), Equals, true) type doc struct { N int `_id` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}}) } func (s *S) TestBulkInsertErrorUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") type doc struct { N int `_id` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) } func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { // The server has a batch limit of 1000 documents when using write commands. // This artificial limit did not exist with the old wire protocol, so to // avoid compatibility issues the implementation internally split batches // into the proper size and delivers them one by one. This test ensures that // the behavior of unordered (that is, continue on error) remains correct // when errors happen and there are batches left. session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() const total = 4096 type doc struct { Id int `_id` } docs := make([]interface{}, total) for i := 0; i < total; i++ { docs[i] = doc{i} } docs[1] = doc{0} bulk.Insert(docs...) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, total-1) var res doc err = coll.FindId(1500).One(&res) c.Assert(err, IsNil) c.Assert(res.Id, Equals, 1500) } func (s *S) TestBulkErrorString(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // If it's just the same string multiple times, join it into a single message. bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key") c.Assert(mgo.IsDup(err), Equals, true) // With matching errors but different messages, present them all. bulk = coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"}) _, err = bulk.Run() if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$") c.Assert(err, ErrorMatches, "(?s).*dupone.*") c.Assert(err, ErrorMatches, "(?s).*duptwo.*") } else { // Wire protocol query doesn't return all errors. c.Assert(err, ErrorMatches, ".*duplicate.*") } c.Assert(mgo.IsDup(err), Equals, true) // With mixed errors, present them all. bulk = coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": []int{2}}) _, err = bulk.Run() if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") } else { // Wire protocol query doesn't return all errors. c.Assert(err, ErrorMatches, ".*array.*") } c.Assert(mgo.IsDup(err), Equals, false) } func (s *S) TestBulkErrorCases_2_6(c *C) { if !s.versionAtLeast(2, 6) { c.Skip("2.4- has poor bulk reporting") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() // There's a limit of 1000 operations per command, so // this forces the more complex indexing logic to act. for i := 0; i < 1010; i++ { switch i { case 3, 14: bulk.Insert(M{"_id": "dupone"}) case 5, 106: bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}}) case 7, 1008: bulk.Insert(M{"_id": "duptwo"}) default: bulk.Insert(M{"_id": i}) } } _, err = bulk.Run() ecases := err.(*mgo.BulkError).Cases() c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*") c.Check(ecases[0].Index, Equals, 14) c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*") c.Check(ecases[1].Index, Equals, 106) c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*") c.Check(ecases[2].Index, Equals, 1008) } func (s *S) TestBulkErrorCases_2_4(c *C) { if s.versionAtLeast(2, 6) { c.Skip("2.6+ has better reporting") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() // There's a limit of 1000 operations per command, so // this forces the more complex indexing logic to act. for i := 0; i < 1010; i++ { switch i { case 3, 14: bulk.Insert(M{"_id": "dupone"}) case 5: bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}}) case 106: bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}}) case 7, 1008: bulk.Insert(M{"_id": "duptwo"}) default: bulk.Insert(M{"_id": i}) } } _, err = bulk.Run() ecases := err.(*mgo.BulkError).Cases() c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*") c.Check(ecases[0].Index, Equals, -1) c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`) c.Check(ecases[1].Index, Equals, 106) } func (s *S) TestBulkErrorCasesOrdered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() // There's a limit of 1000 operations per command, so // this forces the more complex indexing logic to act. for i := 0; i < 20; i++ { switch i { case 3, 14: bulk.Insert(M{"_id": "dupone"}) case 7, 17: bulk.Insert(M{"_id": "duptwo"}) default: bulk.Insert(M{"_id": i}) } } _, err = bulk.Run() ecases := err.(*mgo.BulkError).Cases() c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*") if s.versionAtLeast(2, 6) { c.Check(ecases[0].Index, Equals, 14) } else { c.Check(ecases[0].Index, Equals, -1) } c.Check(ecases, HasLen, 1) } func (s *S) TestBulkUpdate(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}}) bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}}) bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 4) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 3) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}}) } func (s *S) TestBulkUpdateError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Update( M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, M{"n": 3}, M{"$set": M{"n": 30}}, ) r, err := bulk.Run() c.Assert(err, ErrorMatches, ".*_id.*") c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}}) } func (s *S) TestBulkUpdateErrorUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Unordered() bulk.Update( M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, M{"n": 3}, M{"$set": M{"n": 30}}, ) r, err := bulk.Run() c.Assert(err, ErrorMatches, ".*_id.*") c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}}) } func (s *S) TestBulkUpdateAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) // Won't change. bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 6) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 5) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}}) } func (s *S) TestBulkMixedUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Abuse undefined behavior to ensure the desired implementation is in place. bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"n": 1}) bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}}) bulk.Insert(M{"n": 2}) bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}}) bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}}) bulk.Insert(M{"n": 3}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 3) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 3) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}}) } func (s *S) TestBulkUpsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}}) bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}}) } func (s *S) TestBulkRemove(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Remove(M{"n": 1}) bulk.Remove(M{"n": 2}, M{"n": 4}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 3) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{3}, {4}}) } func (s *S) TestBulkRemoveAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.RemoveAll(M{"n": 1}) bulk.RemoveAll(M{"n": 2}, M{"n": 4}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 4) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{3}}) } mgo-r2016.08.01/cluster.go000066400000000000000000000455611274772341400151130ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "errors" "fmt" "net" "strconv" "strings" "sync" "time" "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- // Mongo cluster encapsulation. // // A cluster enables the communication with one or more servers participating // in a mongo cluster. This works with individual servers, a replica set, // a replica pair, one or multiple mongos routers, etc. type mongoCluster struct { sync.RWMutex serverSynced sync.Cond userSeeds []string dynaSeeds []string servers mongoServers masters mongoServers references int syncing bool direct bool failFast bool syncCount uint setName string cachedIndex map[string]bool sync chan bool dial dialer } func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster { cluster := &mongoCluster{ userSeeds: userSeeds, references: 1, direct: direct, failFast: failFast, dial: dial, setName: setName, } cluster.serverSynced.L = cluster.RWMutex.RLocker() cluster.sync = make(chan bool, 1) stats.cluster(+1) go cluster.syncServersLoop() return cluster } // Acquire increases the reference count for the cluster. func (cluster *mongoCluster) Acquire() { cluster.Lock() cluster.references++ debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) cluster.Unlock() } // Release decreases the reference count for the cluster. Once // it reaches zero, all servers will be closed. func (cluster *mongoCluster) Release() { cluster.Lock() if cluster.references == 0 { panic("cluster.Release() with references == 0") } cluster.references-- debugf("Cluster %p released (refs=%d)", cluster, cluster.references) if cluster.references == 0 { for _, server := range cluster.servers.Slice() { server.Close() } // Wake up the sync loop so it can die. cluster.syncServers() stats.cluster(-1) } cluster.Unlock() } func (cluster *mongoCluster) LiveServers() (servers []string) { cluster.RLock() for _, serv := range cluster.servers.Slice() { servers = append(servers, serv.Addr) } cluster.RUnlock() return servers } func (cluster *mongoCluster) removeServer(server *mongoServer) { cluster.Lock() cluster.masters.Remove(server) other := cluster.servers.Remove(server) cluster.Unlock() if other != nil { other.Close() log("Removed server ", server.Addr, " from cluster.") } server.Close() } type isMasterResult struct { IsMaster bool Secondary bool Primary string Hosts []string Passives []string Tags bson.D Msg string SetName string `bson:"setName"` MaxWireVersion int `bson:"maxWireVersion"` } func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { // Monotonic let's it talk to a slave and still hold the socket. session := newSession(Monotonic, cluster, 10*time.Second) session.setSocket(socket) err := session.Run("ismaster", result) session.Close() return err } type possibleTimeout interface { Timeout() bool } var syncSocketTimeout = 5 * time.Second func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { var syncTimeout time.Duration if raceDetector { // This variable is only ever touched by tests. globalMutex.Lock() syncTimeout = syncSocketTimeout globalMutex.Unlock() } else { syncTimeout = syncSocketTimeout } addr := server.Addr log("SYNC Processing ", addr, "...") // Retry a few times to avoid knocking a server down for a hiccup. var result isMasterResult var tryerr error for retry := 0; ; retry++ { if retry == 3 || retry == 1 && cluster.failFast { return nil, nil, tryerr } if retry > 0 { // Don't abuse the server needlessly if there's something actually wrong. if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { // Give a chance for waiters to timeout as well. cluster.serverSynced.Broadcast() } time.Sleep(syncShortDelay) } // It's not clear what would be a good timeout here. Is it // better to wait longer or to retry? socket, _, err := server.AcquireSocket(0, syncTimeout) if err != nil { tryerr = err logf("SYNC Failed to get socket to %s: %v", addr, err) continue } err = cluster.isMaster(socket, &result) socket.Release() if err != nil { tryerr = err logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) continue } debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) break } if cluster.setName != "" && result.SetName != cluster.setName { logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName) return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName) } if result.IsMaster { debugf("SYNC %s is a master.", addr) if !server.info.Master { // Made an incorrect assumption above, so fix stats. stats.conn(-1, false) stats.conn(+1, true) } } else if result.Secondary { debugf("SYNC %s is a slave.", addr) } else if cluster.direct { logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) } else { logf("SYNC %s is neither a master nor a slave.", addr) // Let stats track it as whatever was known before. return nil, nil, errors.New(addr + " is not a master nor slave") } info = &mongoServerInfo{ Master: result.IsMaster, Mongos: result.Msg == "isdbgrid", Tags: result.Tags, SetName: result.SetName, MaxWireVersion: result.MaxWireVersion, } hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) if result.Primary != "" { // First in the list to speed up master discovery. hosts = append(hosts, result.Primary) } hosts = append(hosts, result.Hosts...) hosts = append(hosts, result.Passives...) debugf("SYNC %s knows about the following peers: %#v", addr, hosts) return info, hosts, nil } type syncKind bool const ( completeSync syncKind = true partialSync syncKind = false ) func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { cluster.Lock() current := cluster.servers.Search(server.ResolvedAddr) if current == nil { if syncKind == partialSync { cluster.Unlock() server.Close() log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") return } cluster.servers.Add(server) if info.Master { cluster.masters.Add(server) log("SYNC Adding ", server.Addr, " to cluster as a master.") } else { log("SYNC Adding ", server.Addr, " to cluster as a slave.") } } else { if server != current { panic("addServer attempting to add duplicated server") } if server.Info().Master != info.Master { if info.Master { log("SYNC Server ", server.Addr, " is now a master.") cluster.masters.Add(server) } else { log("SYNC Server ", server.Addr, " is now a slave.") cluster.masters.Remove(server) } } } server.SetInfo(info) debugf("SYNC Broadcasting availability of server %s", server.Addr) cluster.serverSynced.Broadcast() cluster.Unlock() } func (cluster *mongoCluster) getKnownAddrs() []string { cluster.RLock() max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() seen := make(map[string]bool, max) known := make([]string, 0, max) add := func(addr string) { if _, found := seen[addr]; !found { seen[addr] = true known = append(known, addr) } } for _, addr := range cluster.userSeeds { add(addr) } for _, addr := range cluster.dynaSeeds { add(addr) } for _, serv := range cluster.servers.Slice() { add(serv.Addr) } cluster.RUnlock() return known } // syncServers injects a value into the cluster.sync channel to force // an iteration of the syncServersLoop function. func (cluster *mongoCluster) syncServers() { select { case cluster.sync <- true: default: } } // How long to wait for a checkup of the cluster topology if nothing // else kicks a synchronization before that. const syncServersDelay = 30 * time.Second const syncShortDelay = 500 * time.Millisecond // syncServersLoop loops while the cluster is alive to keep its idea of // the server topology up-to-date. It must be called just once from // newCluster. The loop iterates once syncServersDelay has passed, or // if somebody injects a value into the cluster.sync channel to force a // synchronization. A loop iteration will contact all servers in // parallel, ask them about known peers and their own role within the // cluster, and then attempt to do the same with all the peers // retrieved. func (cluster *mongoCluster) syncServersLoop() { for { debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) cluster.Lock() if cluster.references == 0 { cluster.Unlock() break } cluster.references++ // Keep alive while syncing. direct := cluster.direct cluster.Unlock() cluster.syncServersIteration(direct) // We just synchronized, so consume any outstanding requests. select { case <-cluster.sync: default: } cluster.Release() // Hold off before allowing another sync. No point in // burning CPU looking for down servers. if !cluster.failFast { time.Sleep(syncShortDelay) } cluster.Lock() if cluster.references == 0 { cluster.Unlock() break } cluster.syncCount++ // Poke all waiters so they have a chance to timeout or // restart syncing if they wish to. cluster.serverSynced.Broadcast() // Check if we have to restart immediately either way. restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() cluster.Unlock() if restart { log("SYNC No masters found. Will synchronize again.") time.Sleep(syncShortDelay) continue } debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) // Hold off until somebody explicitly requests a synchronization // or it's time to check for a cluster topology change again. select { case <-cluster.sync: case <-time.After(syncServersDelay): } } debugf("SYNC Cluster %p is stopping its sync loop.", cluster) } func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { cluster.RLock() server := cluster.servers.Search(tcpaddr.String()) cluster.RUnlock() if server != nil { return server } return newServer(addr, tcpaddr, cluster.sync, cluster.dial) } func resolveAddr(addr string) (*net.TCPAddr, error) { // Simple cases that do not need actual resolution. Works with IPv4 and v6. if host, port, err := net.SplitHostPort(addr); err == nil { if port, _ := strconv.Atoi(port); port > 0 { zone := "" if i := strings.LastIndex(host, "%"); i >= 0 { zone = host[i+1:] host = host[:i] } ip := net.ParseIP(host) if ip != nil { return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil } } } // Attempt to resolve IPv4 and v6 concurrently. addrChan := make(chan *net.TCPAddr, 2) for _, network := range []string{"udp4", "udp6"} { network := network go func() { // The unfortunate UDP dialing hack allows having a timeout on address resolution. conn, err := net.DialTimeout(network, addr, 10*time.Second) if err != nil { addrChan <- nil } else { addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) conn.Close() } }() } // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. tcpaddr := <-addrChan if tcpaddr == nil || len(tcpaddr.IP) != 4 { var timeout <-chan time.Time if tcpaddr != nil { // Don't wait too long if an IPv6 address is known. timeout = time.After(50 * time.Millisecond) } select { case <-timeout: case tcpaddr2 := <-addrChan: if tcpaddr == nil || tcpaddr2 != nil { // It's an IPv4 address or the only known address. Use it. tcpaddr = tcpaddr2 } } } if tcpaddr == nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) } if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } return tcpaddr, nil } type pendingAdd struct { server *mongoServer info *mongoServerInfo } func (cluster *mongoCluster) syncServersIteration(direct bool) { log("SYNC Starting full topology synchronization...") var wg sync.WaitGroup var m sync.Mutex notYetAdded := make(map[string]pendingAdd) addIfFound := make(map[string]bool) seen := make(map[string]bool) syncKind := partialSync var spawnSync func(addr string, byMaster bool) spawnSync = func(addr string, byMaster bool) { wg.Add(1) go func() { defer wg.Done() tcpaddr, err := resolveAddr(addr) if err != nil { log("SYNC Failed to start sync of ", addr, ": ", err.Error()) return } resolvedAddr := tcpaddr.String() m.Lock() if byMaster { if pending, ok := notYetAdded[resolvedAddr]; ok { delete(notYetAdded, resolvedAddr) m.Unlock() cluster.addServer(pending.server, pending.info, completeSync) return } addIfFound[resolvedAddr] = true } if seen[resolvedAddr] { m.Unlock() return } seen[resolvedAddr] = true m.Unlock() server := cluster.server(addr, tcpaddr) info, hosts, err := cluster.syncServer(server) if err != nil { cluster.removeServer(server) return } m.Lock() add := direct || info.Master || addIfFound[resolvedAddr] if add { syncKind = completeSync } else { notYetAdded[resolvedAddr] = pendingAdd{server, info} } m.Unlock() if add { cluster.addServer(server, info, completeSync) } if !direct { for _, addr := range hosts { spawnSync(addr, info.Master) } } }() } knownAddrs := cluster.getKnownAddrs() for _, addr := range knownAddrs { spawnSync(addr, false) } wg.Wait() if syncKind == completeSync { logf("SYNC Synchronization was complete (got data from primary).") for _, pending := range notYetAdded { cluster.removeServer(pending.server) } } else { logf("SYNC Synchronization was partial (cannot talk to primary).") for _, pending := range notYetAdded { cluster.addServer(pending.server, pending.info, partialSync) } } cluster.Lock() mastersLen := cluster.masters.Len() logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) // Update dynamic seeds, but only if we have any good servers. Otherwise, // leave them alone for better chances of a successful sync in the future. if syncKind == completeSync { dynaSeeds := make([]string, cluster.servers.Len()) for i, server := range cluster.servers.Slice() { dynaSeeds[i] = server.Addr } cluster.dynaSeeds = dynaSeeds debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) } cluster.Unlock() } // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false for { cluster.RLock() for { mastersLen := cluster.masters.Len() slavesLen := cluster.servers.Len() - mastersLen debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { break } if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { break } if started.IsZero() { // Initialize after fast path above. started = time.Now() syncCount = cluster.syncCount } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { cluster.RUnlock() return nil, errors.New("no reachable servers") } log("Waiting for servers to synchronize...") cluster.syncServers() // Remember: this will release and reacquire the lock. cluster.serverSynced.Wait() } var server *mongoServer if slaveOk { server = cluster.servers.BestFit(mode, serverTags) } else { server = cluster.masters.BestFit(mode, nil) } cluster.RUnlock() if server == nil { // Must have failed the requested tags. Sleep to avoid spinning. time.Sleep(1e8) continue } s, abended, err := server.AcquireSocket(poolLimit, socketTimeout) if err == errPoolLimit { if !warnedLimit { warnedLimit = true log("WARNING: Per-server connection limit reached.") } time.Sleep(100 * time.Millisecond) continue } if err != nil { cluster.removeServer(server) cluster.syncServers() continue } if abended && !slaveOk { var result isMasterResult err := cluster.isMaster(s, &result) if err != nil || !result.IsMaster { logf("Cannot confirm server %s as master (%v)", server.Addr, err) s.Release() cluster.syncServers() time.Sleep(100 * time.Millisecond) continue } } return s, nil } panic("unreached") } func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { cluster.Lock() if cluster.cachedIndex == nil { cluster.cachedIndex = make(map[string]bool) } if exists { cluster.cachedIndex[cacheKey] = true } else { delete(cluster.cachedIndex, cacheKey) } cluster.Unlock() } func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { cluster.RLock() if cluster.cachedIndex != nil { result = cluster.cachedIndex[cacheKey] } cluster.RUnlock() return } func (cluster *mongoCluster) ResetIndexCache() { cluster.Lock() cluster.cachedIndex = make(map[string]bool) cluster.Unlock() } mgo-r2016.08.01/cluster_test.go000066400000000000000000001530241274772341400161440ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "fmt" "io" "net" "strings" "sync" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func (s *S) TestNewSession(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure other has copied those. session.SetSafe(nil) session.SetBatch(-1) other := session.New() defer other.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. otherColl := other.DB("mydb").C("mycoll") err = otherColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With New(), each session has its own socket now. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 2) // Ensure query parameters were cloned. err = otherColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(other.Ping(), IsNil) mgo.ResetStats() iter := otherColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Close() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) } func (s *S) TestCloneSession(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure clone is copying those. session.SetSafe(nil) session.SetBatch(-1) clone := session.Clone() defer clone.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. cloneColl := clone.DB("mydb").C("mycoll") err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With Clone(), same socket is shared between sessions now. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 2) // Refreshing one of them should let the original socket go, // while preserving the safety settings. clone.Refresh() err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Must have used another connection now. stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 2) c.Assert(stats.SocketRefs, Equals, 2) // Ensure query parameters were cloned. err = cloneColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(clone.Ping(), IsNil) mgo.ResetStats() iter := cloneColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Close() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) } func (s *S) TestModeStrong(c *C) { session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) session.SetMode(mgo.Strong, false) c.Assert(session.Mode(), Equals, mgo.Strong) result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 1) session.SetMode(mgo.Strong, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeMonotonic(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) c.Assert(session.Mode(), Equals, mgo.Monotonic) var result struct{ IsMaster bool } cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, true) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 2) session.SetMode(mgo.Monotonic, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeMonotonicAfterStrong(c *C) { // Test that a strong session shifting to a monotonic // one preserves the socket untouched. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() // Insert something to force a connection to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) session.SetMode(mgo.Monotonic, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Master socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // Confirm it's the master even though it's Monotonic by now. result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) } func (s *S) TestModeStrongAfterMonotonic(c *C) { // Test that shifting from Monotonic to Strong while // using a slave socket will keep the socket reserved // until the master socket is necessary, so that no // switch over occurs unless it's actually necessary. // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) // Ensure we're talking to a slave, and reserve the socket. result := M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) // Switch to a Strong session. session.SetMode(mgo.Strong, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Slave socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // But any operation will switch it to the master. result = M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) } func (s *S) TestModeMonotonicWriteOnIteration(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) c.Assert(session.Mode(), Equals, mgo.Monotonic) coll1 := session.DB("mydb").C("mycoll1") coll2 := session.DB("mydb").C("mycoll2") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll1.Insert(M{"n": n}) c.Assert(err, IsNil) } // Release master so we can grab a slave again. session.Refresh() // Wait until synchronization is done. for { n, err := coll1.Count() c.Assert(err, IsNil) if n == len(ns) { break } } iter := coll1.Find(nil).Batch(2).Iter() i := 0 m := M{} for iter.Next(&m) { i++ if i > 3 { err := coll2.Insert(M{"n": 47 + i}) c.Assert(err, IsNil) } } c.Assert(i, Equals, len(ns)) } func (s *S) TestModeEventual(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Eventual, false) c.Assert(session.Mode(), Equals, mgo.Eventual) result := M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) result = M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeEventualAfterStrong(c *C) { // Test that a strong session shifting to an eventual // one preserves the socket untouched. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() // Insert something to force a connection to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) session.SetMode(mgo.Eventual, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Master socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // Confirm it's the master even though it's Eventual by now. result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) session.SetMode(mgo.Eventual, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeStrongFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With strong consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. // Increase the timeout since this may take quite a while. session.SetSyncTimeout(3 * time.Minute) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), host) // Insert some data to confirm it's indeed a master. err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) c.Assert(err, IsNil) } func (s *S) TestModePrimaryHiccup(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Establish a few extra sessions to create spare sockets to // the master. This increases a bit the chances of getting an // incorrect cached socket. var sessions []*mgo.Session for i := 0; i < 20; i++ { sessions = append(sessions, session.Copy()) err = sessions[len(sessions)-1].Run("serverStatus", result) c.Assert(err, IsNil) } for i := range sessions { sessions[i].Close() } // Kill the master, but bring it back immediatelly. host := result.Host s.Stop(host) s.StartAll() // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With strong consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. // Increase the timeout since this may take quite a while. session.SetSyncTimeout(3 * time.Minute) // Insert some data to confirm it's indeed a master. err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) c.Assert(err, IsNil) } func (s *S) TestModeMonotonicFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) // Insert something to force a switch to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With monotonic consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), host) } func (s *S) TestModeMonotonicWithSlaveFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // Create new monotonic session with an explicit address to ensure // a slave is synchronized before the master, otherwise a connection // with the master may be used below for lack of other options. var addr string switch { case strings.HasSuffix(ssresult.Host, ":40021"): addr = "localhost:40022" case strings.HasSuffix(ssresult.Host, ":40022"): addr = "localhost:40021" case strings.HasSuffix(ssresult.Host, ":40023"): addr = "localhost:40021" default: c.Fatal("Unknown host: ", ssresult.Host) } session, err = mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) // Check the address of the socket associated with the monotonic session. c.Log("Running serverStatus and isMaster with monotonic session") err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) slave := ssresult.Host c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave)) c.Assert(master, Not(Equals), slave) // Kill the master. s.Stop(master) // Session must still be good, since we were talking to a slave. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) c.Assert(ssresult.Host, Equals, slave, Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host)) // If we try to insert something, it'll have to hold until the new // master is available to move the connection, and work correctly. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Must now be talking to the new master. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // ... which is not the old one, since it's still dead. c.Assert(ssresult.Host, Not(Equals), master) } func (s *S) TestModeEventualFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) master := result.Host session.SetMode(mgo.Eventual, true) // Should connect to the master when needed. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) // Kill the master. s.Stop(master) // Should still work, with the new master now. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), master) } func (s *S) TestModeSecondaryJustPrimary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Secondary, true) err = session.Ping() c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.SecondaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) } func (s *S) TestModeSecondaryPreferredFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() // Ensure secondaries are available for being picked up. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } session.SetMode(mgo.SecondaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") secondary := result.Host // Should connect to the primary when needed. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) // Kill the primary. s.Stop("localhost:40011") // It can still talk to the selected secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Equals, secondary) // But cannot speak to the primary until reset. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, Equals, io.EOF) session.Refresh() // Can still talk to a secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") s.StartAll() // Should now be able to talk to the primary again. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) } func (s *S) TestModePrimaryPreferredFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.PrimaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") // Kill the primary. s.Stop("localhost:40011") // Should now fail as there was a primary socket in use already. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // Refresh so the reserved primary socket goes away. session.Refresh() // Should be able to talk to the secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) s.StartAll() // Should wait for the new primary to become available. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // And should use the new primary in general, as it is preferred. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") } func (s *S) TestModePrimaryFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetSyncTimeout(3 * time.Second) session.SetMode(mgo.Primary, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") // Kill the primary. s.Stop("localhost:40011") session.Refresh() err = session.Ping() c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestModeSecondary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Secondary, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") secondary := result.Host coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Equals, secondary) } func (s *S) TestPreserveSocketCountOnSync(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.SocketsAlive != 3 { c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive) stats = mgo.GetStats() time.Sleep(5e8) } c.Assert(stats.SocketsAlive, Equals, 3) // Kill the master (with rs1, 'a' is always the master). s.Stop("localhost:40011") // Wait for the logic to run for a bit and bring it back. startedAll := make(chan bool) go func() { time.Sleep(5e9) s.StartAll() startedAll <- true }() // Do not allow the test to return before the goroutine above is done. defer func() { <-startedAll }() // Do an action to kick the resync logic in, and also to // wait until the cluster recognizes the server is back. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) for i := 0; i != 20; i++ { stats = mgo.GetStats() if stats.SocketsAlive == 3 { break } c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) time.Sleep(5e8) } // Ensure the number of sockets is preserved after syncing. stats = mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) } // Connect to the master of a deployment with a single server, // run an insert, and then ensure the insert worked and that a // single connection was established. func (s *S) TestTopologySyncWithSingleMaster(c *C) { // Use hostname here rather than IP, to make things trickier. session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) // One connection used for discovery. Master socket recycled for // insert. Socket is reserved after insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 0) c.Assert(stats.SocketsInUse, Equals, 1) // Refresh session and socket must be released. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestTopologySyncWithSlaveSeed(c *C) { // That's supposed to be a slave. Must run discovery // and find out master to insert successfully. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) // One connection to each during discovery. Master // socket recycled for insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) // Only one socket reference alive, in the master socket owned // by the above session. c.Assert(stats.SocketsInUse, Equals, 1) // Refresh it, and it must be gone. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestSyncTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() s.Stop("localhost:40001") timeout := 3 * time.Second session.SetSyncTimeout(timeout) started := time.Now() // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestDialWithTimeout(c *C) { if *fast { c.Skip("-fast") } timeout := 2 * time.Second started := time.Now() // 40009 isn't used by the test servers. session, err := mgo.DialWithTimeout("localhost:40009", timeout) if session != nil { session.Close() } c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestSocketTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() s.Freeze("localhost:40001") timeout := 3 * time.Second session.SetSocketTimeout(timeout) started := time.Now() // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, ErrorMatches, ".*: i/o timeout") c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestSocketTimeoutOnDial(c *C) { if *fast { c.Skip("-fast") } timeout := 1 * time.Second defer mgo.HackSyncSocketTimeout(timeout)() s.Freeze("localhost:40001") started := time.Now() session, err := mgo.DialWithTimeout("localhost:40001", timeout) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true) } func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() timeout := 2 * time.Second session.SetSocketTimeout(timeout) // Do something that relies on the timeout and works. c.Assert(session.Ping(), IsNil) // Freeze and wait for the timeout to go by. s.Freeze("localhost:40001") time.Sleep(timeout + 500*time.Millisecond) s.Thaw("localhost:40001") // Do something again. The timeout above should not have killed // the socket as there was nothing to be done. c.Assert(session.Ping(), IsNil) } func (s *S) TestDialWithReplicaSetName(c *C) { seedLists := [][]string{ // rs1 primary and rs2 primary []string{"localhost:40011", "localhost:40021"}, // rs1 primary and rs2 secondary []string{"localhost:40011", "localhost:40022"}, // rs1 secondary and rs2 primary []string{"localhost:40012", "localhost:40021"}, // rs1 secondary and rs2 secondary []string{"localhost:40012", "localhost:40022"}, } rs2Members := []string{":40021", ":40022", ":40023"} verifySyncedServers := func(session *mgo.Session, numServers int) { // wait for the server(s) to be synced for len(session.LiveServers()) != numServers { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // ensure none of the rs2 set members are communicated with for _, addr := range session.LiveServers() { for _, rs2Member := range rs2Members { c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false) } } } // only communication with rs1 members is expected for _, seedList := range seedLists { info := mgo.DialInfo{ Addrs: seedList, Timeout: 5 * time.Second, ReplicaSetName: "rs1", } session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) verifySyncedServers(session, 3) session.Close() info.Direct = true session, err = mgo.DialWithInfo(&info) c.Assert(err, IsNil) verifySyncedServers(session, 1) session.Close() connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ",")) session, err = mgo.Dial(connectionUrl) c.Assert(err, IsNil) verifySyncedServers(session, 3) session.Close() connectionUrl += "&connect=direct" session, err = mgo.Dial(connectionUrl) c.Assert(err, IsNil) verifySyncedServers(session, 1) session.Close() } } func (s *S) TestDirect(c *C) { session, err := mgo.Dial("localhost:40012?connect=direct") c.Assert(err, IsNil) defer session.Close() // We know that server is a slave. session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) stats := mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 1) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8 * time.Nanosecond) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, ErrorMatches, "no reachable servers") // Writing to the local database is okay. coll = session.DB("local").C("mycoll") defer coll.RemoveAll(nil) id := bson.NewObjectId() err = coll.Insert(M{"_id": id}) c.Assert(err, IsNil) // Data was stored in the right server. n, err := coll.Find(M{"_id": id}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) // Server hasn't changed. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) } func (s *S) TestDirectToUnknownStateMember(c *C) { session, err := mgo.Dial("localhost:40041?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8 * time.Nanosecond) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, ErrorMatches, "no reachable servers") // Slave is still reachable. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) } func (s *S) TestFailFast(c *C) { info := mgo.DialInfo{ Addrs: []string{"localhost:99999"}, Timeout: 5 * time.Second, FailFast: true, } started := time.Now() _, err := mgo.DialWithInfo(&info) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } func (s *S) countQueries(c *C, server string) (n int) { defer func() { c.Logf("Queries for %q: %d", server, n) }() session, err := mgo.Dial(server + "?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) var result struct { OpCounters struct { Query int } Metrics struct { Commands struct{ Find struct{ Total int } } } } err = session.Run("serverStatus", &result) c.Assert(err, IsNil) if s.versionAtLeast(3, 2) { return result.Metrics.Commands.Find.Total } return result.OpCounters.Query } func (s *S) countCommands(c *C, server, commandName string) (n int) { defer func() { c.Logf("Queries for %q: %d", server, n) }() session, err := mgo.Dial(server + "?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) var result struct { Metrics struct { Commands map[string]struct{ Total int } } } err = session.Run("serverStatus", &result) c.Assert(err, IsNil) return result.Metrics.Commands[commandName].Total } func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // Ensure mongos is aware about the current topology. s.Stop(":40201") s.StartAll() mongos, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer mongos.Close() // Insert some data as otherwise 3.2+ doesn't seem to run the query at all. err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) c.Assert(err, IsNil) // Wait until all servers see the data. for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} { session, err := mgo.Dial(addr + "?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) for i := 300; i >= 0; i-- { n, err := session.DB("mydb").C("mycoll").Find(nil).Count() c.Assert(err, IsNil) if n == 1 { break } if i == 0 { c.Fatalf("Inserted data never reached " + addr) } time.Sleep(100 * time.Millisecond) } } // Collect op counters for everyone. q21a := s.countQueries(c, "localhost:40021") q22a := s.countQueries(c, "localhost:40022") q23a := s.countQueries(c, "localhost:40023") // Do a SlaveOk query through MongoS mongos.SetMode(mgo.Monotonic, true) coll := mongos.DB("mydb").C("mycoll") var result struct{ N int } for i := 0; i != 5; i++ { err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } // Collect op counters for everyone again. q21b := s.countQueries(c, "localhost:40021") q22b := s.countQueries(c, "localhost:40022") q23b := s.countQueries(c, "localhost:40023") var masterDelta, slaveDelta int switch hostPort(master) { case "40021": masterDelta = q21b - q21a slaveDelta = (q22b - q22a) + (q23b - q23a) case "40022": masterDelta = q22b - q22a slaveDelta = (q21b - q21a) + (q23b - q23a) case "40023": masterDelta = q23b - q23a slaveDelta = (q21b - q21a) + (q22b - q22a) default: c.Fatal("Uh?") } c.Check(masterDelta, Equals, 0) // Just the counting itself. c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. } func (s *S) TestSecondaryModeWithMongos(c *C) { session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // Ensure mongos is aware about the current topology. s.Stop(":40201") s.StartAll() mongos, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer mongos.Close() mongos.SetSyncTimeout(5 * time.Second) // Insert some data as otherwise 3.2+ doesn't seem to run the query at all. err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) c.Assert(err, IsNil) // Wait until all servers see the data. for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} { session, err := mgo.Dial(addr + "?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) for i := 300; i >= 0; i-- { n, err := session.DB("mydb").C("mycoll").Find(nil).Count() c.Assert(err, IsNil) if n == 1 { break } if i == 0 { c.Fatalf("Inserted data never reached " + addr) } time.Sleep(100 * time.Millisecond) } } // Collect op counters for everyone. q21a := s.countQueries(c, "localhost:40021") q22a := s.countQueries(c, "localhost:40022") q23a := s.countQueries(c, "localhost:40023") // Do a Secondary query through MongoS mongos.SetMode(mgo.Secondary, true) coll := mongos.DB("mydb").C("mycoll") var result struct{ N int } for i := 0; i != 5; i++ { err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } // Collect op counters for everyone again. q21b := s.countQueries(c, "localhost:40021") q22b := s.countQueries(c, "localhost:40022") q23b := s.countQueries(c, "localhost:40023") var masterDelta, slaveDelta int switch hostPort(master) { case "40021": masterDelta = q21b - q21a slaveDelta = (q22b - q22a) + (q23b - q23a) case "40022": masterDelta = q22b - q22a slaveDelta = (q21b - q21a) + (q23b - q23a) case "40023": masterDelta = q23b - q23a slaveDelta = (q21b - q21a) + (q22b - q22a) default: c.Fatal("Uh?") } c.Check(masterDelta, Equals, 0) // Just the counting itself. c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. } func (s *S) TestSecondaryModeWithMongosInsert(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Secondary, true) session.SetSyncTimeout(4 * time.Second) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) var result struct{ A int } coll.Find(nil).One(&result) c.Assert(result.A, Equals, 1) } func (s *S) TestRemovalOfClusterMember(c *C) { if *fast { c.Skip("-fast") } master, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer master.Close() // Wait for cluster to fully sync up. for i := 0; i < 10; i++ { if len(master.LiveServers()) == 3 { break } time.Sleep(5e8) } if len(master.LiveServers()) != 3 { c.Fatalf("Test started with bad cluster state: %v", master.LiveServers()) } result := &struct { IsMaster bool Me string }{} slave := master.Copy() slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently. err = slave.Run("isMaster", result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, false) slaveAddr := result.Me defer func() { config := map[string]string{ "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`, "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`, "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`, } master.Refresh() master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() // Ensure suite syncs up with the changes before next test. s.Stop(":40201") s.StartAll() time.Sleep(8 * time.Second) // TODO Find a better way to find out when mongos is fully aware that all // servers are up. Without that follow up tests that depend on mongos will // break due to their expectation of things being in a working state. }() c.Logf("========== Removing slave: %s ==========", slaveAddr) master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) master.Refresh() // Give the cluster a moment to catch up by doing a roundtrip to the master. err = master.Ping() c.Assert(err, IsNil) time.Sleep(3e9) // This must fail since the slave has been taken off the cluster. err = slave.Ping() c.Assert(err, NotNil) for i := 0; i < 15; i++ { if len(master.LiveServers()) == 2 { break } time.Sleep(time.Second) } live := master.LiveServers() if len(live) != 2 { c.Errorf("Removed server still considered live: %#s", live) } c.Log("========== Test succeeded. ==========") } func (s *S) TestPoolLimitSimple(c *C) { for test := 0; test < 2; test++ { var session *mgo.Session var err error if test == 0 { session, err = mgo.Dial("localhost:40001") c.Assert(err, IsNil) session.SetPoolLimit(1) } else { session, err = mgo.Dial("localhost:40001?maxPoolSize=1") c.Assert(err, IsNil) } defer session.Close() // Put one socket in use. c.Assert(session.Ping(), IsNil) done := make(chan time.Duration) // Now block trying to get another one due to the pool limit. go func() { copy := session.Copy() defer copy.Close() started := time.Now() c.Check(copy.Ping(), IsNil) done <- time.Now().Sub(started) }() time.Sleep(300 * time.Millisecond) // Put the one socket back in the pool, freeing it for the copy. session.Refresh() delay := <-done c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) } } func (s *S) TestPoolLimitMany(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.SocketsAlive != 3 { c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive) stats = mgo.GetStats() time.Sleep(5e8) } const poolLimit = 64 session.SetPoolLimit(poolLimit) // Consume the whole limit for the master. var master []*mgo.Session for i := 0; i < poolLimit; i++ { s := session.Copy() defer s.Close() c.Assert(s.Ping(), IsNil) master = append(master, s) } before := time.Now() go func() { time.Sleep(3e9) master[0].Refresh() }() // Then, a single ping must block, since it would need another // connection to the master, over the limit. Once the goroutine // above releases its socket, it should move on. session.Ping() delay := time.Now().Sub(before) c.Assert(delay > 3e9, Equals, true) c.Assert(delay < 6e9, Equals, true) } func (s *S) TestSetModeEventualIterBug(c *C) { session1, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session1.Close() session1.SetMode(mgo.Eventual, false) coll1 := session1.DB("mydb").C("mycoll") const N = 100 for i := 0; i < N; i++ { err = coll1.Insert(M{"_id": i}) c.Assert(err, IsNil) } c.Logf("Waiting until secondary syncs") for { n, err := coll1.Count() c.Assert(err, IsNil) if n == N { c.Logf("Found all") break } } session2, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session2.Close() session2.SetMode(mgo.Eventual, false) coll2 := session2.DB("mydb").C("mycoll") i := 0 iter := coll2.Find(nil).Batch(10).Iter() var result struct{} for iter.Next(&result) { i++ } c.Assert(iter.Close(), Equals, nil) c.Assert(i, Equals, N) } func (s *S) TestCustomDialOld(c *C) { dials := make(chan bool, 16) dial := func(addr net.Addr) (net.Conn, error) { tcpaddr, ok := addr.(*net.TCPAddr) if !ok { return nil, fmt.Errorf("unexpected address type: %T", addr) } dials <- true return net.DialTCP("tcp", nil, tcpaddr) } info := mgo.DialInfo{ Addrs: []string{"localhost:40012"}, Dial: dial, } // Use hostname here rather than IP, to make things trickier. session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) defer session.Close() const N = 3 for i := 0; i < N; i++ { select { case <-dials: case <-time.After(5 * time.Second): c.Fatalf("expected %d dials, got %d", N, i) } } select { case <-dials: c.Fatalf("got more dials than expected") case <-time.After(100 * time.Millisecond): } } func (s *S) TestCustomDialNew(c *C) { dials := make(chan bool, 16) dial := func(addr *mgo.ServerAddr) (net.Conn, error) { dials <- true if addr.TCPAddr().Port == 40012 { c.Check(addr.String(), Equals, "localhost:40012") } return net.DialTCP("tcp", nil, addr.TCPAddr()) } info := mgo.DialInfo{ Addrs: []string{"localhost:40012"}, DialServer: dial, } // Use hostname here rather than IP, to make things trickier. session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) defer session.Close() const N = 3 for i := 0; i < N; i++ { select { case <-dials: case <-time.After(5 * time.Second): c.Fatalf("expected %d dials, got %d", N, i) } } select { case <-dials: c.Fatalf("got more dials than expected") case <-time.After(100 * time.Millisecond): } } func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { if *fast { c.Skip("-fast") } // Dial the shard. session, err := mgo.Dial("localhost:40203") c.Assert(err, IsNil) defer session.Close() // Login and insert something to make it more realistic. session.DB("admin").Login("root", "rapadura") coll := session.DB("mydb").C("mycoll") err = coll.Insert(bson.M{"n": 1}) c.Assert(err, IsNil) // Dial the replica set to figure the master out. rs, err := mgo.Dial("root:rapadura@localhost:40031") c.Assert(err, IsNil) defer rs.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = rs.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = rs.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // This won't work because the master just died. err = coll.Insert(bson.M{"n": 2}) c.Assert(err, NotNil) // Refresh session and wait for re-election. session.Refresh() for i := 0; i < 60; i++ { err = coll.Insert(bson.M{"n": 3}) if err == nil { break } c.Logf("Waiting for replica set to elect a new master. Last error: %v", err) time.Sleep(500 * time.Millisecond) } c.Assert(err, IsNil) count, err := coll.Count() c.Assert(count > 1, Equals, true) } func (s *S) TestNearestSecondary(c *C) { defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" rs1c := "127.0.0.1:40013" s.Freeze(rs1b) session, err := mgo.Dial(rs1a) c.Assert(err, IsNil) defer session.Close() // Wait for the sync up to run through the first couple of servers. for len(session.LiveServers()) != 2 { c.Log("Waiting for two servers to be alive...") time.Sleep(100 * time.Millisecond) } // Extra delay to ensure the third server gets penalized. time.Sleep(500 * time.Millisecond) // Release third server. s.Thaw(rs1b) // Wait for it to come up. for len(session.LiveServers()) != 3 { c.Log("Waiting for all servers to be alive...") time.Sleep(100 * time.Millisecond) } session.SetMode(mgo.Monotonic, true) var result struct{ Host string } // See which slave picks the line, several times to avoid chance. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) } if *fast { // Don't hold back for several seconds. return } // Now hold the other server for long enough to penalize it. s.Freeze(rs1c) time.Sleep(5 * time.Second) s.Thaw(rs1c) // Wait for the ping to be processed. time.Sleep(500 * time.Millisecond) // Repeating the test should now pick the former server consistently. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1b)) } } func (s *S) TestNearestServer(c *C) { defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" rs1c := "127.0.0.1:40013" session, err := mgo.Dial(rs1a) c.Assert(err, IsNil) defer session.Close() s.Freeze(rs1a) s.Freeze(rs1b) // Extra delay to ensure the first two servers get penalized. time.Sleep(500 * time.Millisecond) // Release them. s.Thaw(rs1a) s.Thaw(rs1b) // Wait for everyone to come up. for len(session.LiveServers()) != 3 { c.Log("Waiting for all servers to be alive...") time.Sleep(100 * time.Millisecond) } session.SetMode(mgo.Nearest, true) var result struct{ Host string } // See which server picks the line, several times to avoid chance. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) } if *fast { // Don't hold back for several seconds. return } // Now hold the two secondaries for long enough to penalize them. s.Freeze(rs1b) s.Freeze(rs1c) time.Sleep(5 * time.Second) s.Thaw(rs1b) s.Thaw(rs1c) // Wait for the ping to be processed. time.Sleep(500 * time.Millisecond) // Repeating the test should now pick the primary server consistently. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1a)) } } func (s *S) TestConnectCloseConcurrency(c *C) { restore := mgo.HackPingDelay(500 * time.Millisecond) defer restore() var wg sync.WaitGroup const n = 500 wg.Add(n) for i := 0; i < n; i++ { go func() { defer wg.Done() session, err := mgo.Dial("localhost:40001") if err != nil { c.Fatal(err) } time.Sleep(1) session.Close() }() } wg.Wait() } func (s *S) TestSelectServers(c *C) { if !s.versionAtLeast(2, 2) { c.Skip("read preferences introduced in 2.2") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Eventual, true) var result struct{ Host string } session.Refresh() session.SelectServers(bson.D{{"rs1", "b"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40012") session.Refresh() session.SelectServers(bson.D{{"rs1", "c"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40013") } func (s *S) TestSelectServersWithMongos(c *C) { if !s.versionAtLeast(2, 2) { c.Skip("read preferences introduced in 2.2") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) var slave1, slave2 string switch hostPort(master) { case "40021": slave1, slave2 = "b", "c" case "40022": slave1, slave2 = "a", "c" case "40023": slave1, slave2 = "a", "b" } // Collect op counters for everyone. q21a := s.countQueries(c, "localhost:40021") q22a := s.countQueries(c, "localhost:40022") q23a := s.countQueries(c, "localhost:40023") // Do a SlaveOk query through MongoS mongos, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer mongos.Close() mongos.SetMode(mgo.Monotonic, true) mongos.Refresh() mongos.SelectServers(bson.D{{"rs2", slave1}}) coll := mongos.DB("mydb").C("mycoll") result := &struct{}{} for i := 0; i != 5; i++ { err := coll.Find(nil).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } mongos.Refresh() mongos.SelectServers(bson.D{{"rs2", slave2}}) coll = mongos.DB("mydb").C("mycoll") for i := 0; i != 7; i++ { err := coll.Find(nil).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } // Collect op counters for everyone again. q21b := s.countQueries(c, "localhost:40021") q22b := s.countQueries(c, "localhost:40022") q23b := s.countQueries(c, "localhost:40023") switch hostPort(master) { case "40021": c.Check(q21b-q21a, Equals, 0) c.Check(q22b-q22a, Equals, 5) c.Check(q23b-q23a, Equals, 7) case "40022": c.Check(q21b-q21a, Equals, 5) c.Check(q22b-q22a, Equals, 0) c.Check(q23b-q23a, Equals, 7) case "40023": c.Check(q21b-q21a, Equals, 5) c.Check(q22b-q22a, Equals, 7) c.Check(q23b-q23a, Equals, 0) default: c.Fatal("Uh?") } } func (s *S) TestDoNotFallbackToMonotonic(c *C) { // There was a bug at some point that some functions were // falling back to Monotonic mode. This test ensures all listIndexes // commands go to the primary, as should happen since the session is // in Strong mode. if !s.versionAtLeast(3, 0) { c.Skip("command-counting logic depends on 3.0+") } session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() for i := 0; i < 15; i++ { q11a := s.countCommands(c, "localhost:40011", "listIndexes") q12a := s.countCommands(c, "localhost:40012", "listIndexes") q13a := s.countCommands(c, "localhost:40013", "listIndexes") _, err := session.DB("local").C("system.indexes").Indexes() c.Assert(err, IsNil) q11b := s.countCommands(c, "localhost:40011", "listIndexes") q12b := s.countCommands(c, "localhost:40012", "listIndexes") q13b := s.countCommands(c, "localhost:40013", "listIndexes") c.Assert(q11b, Equals, q11a+1) c.Assert(q12b, Equals, q12a) c.Assert(q13b, Equals, q13a) } } mgo-r2016.08.01/dbtest/000077500000000000000000000000001274772341400143555ustar00rootroot00000000000000mgo-r2016.08.01/dbtest/dbserver.go000066400000000000000000000117131274772341400165230ustar00rootroot00000000000000package dbtest import ( "bytes" "fmt" "net" "os" "os/exec" "strconv" "time" "gopkg.in/mgo.v2" "gopkg.in/tomb.v2" ) // DBServer controls a MongoDB server process to be used within test suites. // // The test server is started when Session is called the first time and should // remain running for the duration of all tests, with the Wipe method being // called between tests (before each of them) to clear stored data. After all tests // are done, the Stop method should be called to stop the test server. // // Before the DBServer is used the SetPath method must be called to define // the location for the database files to be stored. type DBServer struct { session *mgo.Session output bytes.Buffer server *exec.Cmd dbpath string host string tomb tomb.Tomb } // SetPath defines the path to the directory where the database files will be // stored if it is started. The directory path itself is not created or removed // by the test helper. func (dbs *DBServer) SetPath(dbpath string) { dbs.dbpath = dbpath } func (dbs *DBServer) start() { if dbs.server != nil { panic("DBServer already started") } if dbs.dbpath == "" { panic("DBServer.SetPath must be called before using the server") } mgo.SetStats(true) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic("unable to listen on a local address: " + err.Error()) } addr := l.Addr().(*net.TCPAddr) l.Close() dbs.host = addr.String() args := []string{ "--dbpath", dbs.dbpath, "--bind_ip", "127.0.0.1", "--port", strconv.Itoa(addr.Port), "--nssize", "1", "--noprealloc", "--smallfiles", "--nojournal", } dbs.tomb = tomb.Tomb{} dbs.server = exec.Command("mongod", args...) dbs.server.Stdout = &dbs.output dbs.server.Stderr = &dbs.output err = dbs.server.Start() if err != nil { panic(err) } dbs.tomb.Go(dbs.monitor) dbs.Wipe() } func (dbs *DBServer) monitor() error { dbs.server.Process.Wait() if dbs.tomb.Alive() { // Present some debugging information. fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes()) fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr cmd.Run() fmt.Fprintf(os.Stderr, "----------------------------------------\n") panic("mongod process died unexpectedly") } return nil } // Stop stops the test server process, if it is running. // // It's okay to call Stop multiple times. After the test server is // stopped it cannot be restarted. // // All database sessions must be closed before or while the Stop method // is running. Otherwise Stop will panic after a timeout informing that // there is a session leak. func (dbs *DBServer) Stop() { if dbs.session != nil { dbs.checkSessions() if dbs.session != nil { dbs.session.Close() dbs.session = nil } } if dbs.server != nil { dbs.tomb.Kill(nil) dbs.server.Process.Signal(os.Interrupt) select { case <-dbs.tomb.Dead(): case <-time.After(5 * time.Second): panic("timeout waiting for mongod process to die") } dbs.server = nil } } // Session returns a new session to the server. The returned session // must be closed after the test is done with it. // // The first Session obtained from a DBServer will start it. func (dbs *DBServer) Session() *mgo.Session { if dbs.server == nil { dbs.start() } if dbs.session == nil { mgo.ResetStats() var err error dbs.session, err = mgo.Dial(dbs.host + "/test") if err != nil { panic(err) } } return dbs.session.Copy() } // checkSessions ensures all mgo sessions opened were properly closed. // For slightly faster tests, it may be disabled setting the // environmnet variable CHECK_SESSIONS to 0. func (dbs *DBServer) checkSessions() { if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil { return } dbs.session.Close() dbs.session = nil for i := 0; i < 100; i++ { stats := mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { return } time.Sleep(100 * time.Millisecond) } panic("There are mgo sessions still alive.") } // Wipe drops all created databases and their data. // // The MongoDB server remains running if it was prevoiusly running, // or stopped if it was previously stopped. // // All database sessions must be closed before or while the Wipe method // is running. Otherwise Wipe will panic after a timeout informing that // there is a session leak. func (dbs *DBServer) Wipe() { if dbs.server == nil || dbs.session == nil { return } dbs.checkSessions() sessionUnset := dbs.session == nil session := dbs.Session() defer session.Close() if sessionUnset { dbs.session.Close() dbs.session = nil } names, err := session.DatabaseNames() if err != nil { panic(err) } for _, name := range names { switch name { case "admin", "local", "config": default: err = session.DB(name).DropDatabase() if err != nil { panic(err) } } } } mgo-r2016.08.01/dbtest/dbserver_test.go000066400000000000000000000041541274772341400175630ustar00rootroot00000000000000package dbtest_test import ( "os" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/dbtest" ) type M map[string]interface{} func TestAll(t *testing.T) { TestingT(t) } type S struct { oldCheckSessions string } var _ = Suite(&S{}) func (s *S) SetUpTest(c *C) { s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") os.Setenv("CHECK_SESSIONS", "") } func (s *S) TearDownTest(c *C) { os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) } func (s *S) TestWipeData(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) session.Close() c.Assert(err, IsNil) server.Wipe() session = server.Session() names, err := session.DatabaseNames() session.Close() c.Assert(err, IsNil) for _, name := range names { if name != "local" && name != "admin" { c.Fatalf("Wipe should have removed this database: %s", name) } } } func (s *S) TestStop(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() // Server should not be running. process := server.ProcessTest() c.Assert(process, IsNil) session := server.Session() addr := session.LiveServers()[0] session.Close() // Server should be running now. process = server.ProcessTest() p, err := os.FindProcess(process.Pid) c.Assert(err, IsNil) p.Release() server.Stop() // Server should not be running anymore. session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) if session != nil { session.Close() c.Fatalf("Stop did not stop the server") } } func (s *S) TestCheckSessions(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() defer session.Close() c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") } func (s *S) TestCheckSessionsDisabled(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() os.Setenv("CHECK_SESSIONS", "0") // Should not panic, although it looks to Wipe like this session will leak. session := server.Session() defer session.Close() server.Wipe() } mgo-r2016.08.01/dbtest/export_test.go000066400000000000000000000002301274772341400172570ustar00rootroot00000000000000package dbtest import ( "os" ) func (dbs *DBServer) ProcessTest() *os.Process { if dbs.server == nil { return nil } return dbs.server.Process } mgo-r2016.08.01/doc.go000066400000000000000000000023171274772341400141670ustar00rootroot00000000000000// Package mgo offers a rich MongoDB driver for Go. // // Details about the mgo project (pronounced as "mango") are found // in its web page: // // http://labix.org/mgo // // Usage of the driver revolves around the concept of sessions. To // get started, obtain a session using the Dial function: // // session, err := mgo.Dial(url) // // This will establish one or more connections with the cluster of // servers defined by the url parameter. From then on, the cluster // may be queried with multiple consistency rules (see SetMode) and // documents retrieved with statements such as: // // c := session.DB(database).C(collection) // err := c.Find(query).One(&result) // // New sessions are typically created by calling session.Copy on the // initial session obtained at dial time. These new sessions will share // the same cluster information and connection pool, and may be easily // handed into other methods and functions for organizing logic. // Every session created must have its Close method called at the end // of its life time, so its resources may be put back in the pool or // collected, depending on the case. // // For more details, see the documentation for the types and methods. // package mgo mgo-r2016.08.01/export_test.go000066400000000000000000000011121274772341400157720ustar00rootroot00000000000000package mgo import ( "time" ) func HackPingDelay(newDelay time.Duration) (restore func()) { globalMutex.Lock() defer globalMutex.Unlock() oldDelay := pingDelay restore = func() { globalMutex.Lock() pingDelay = oldDelay globalMutex.Unlock() } pingDelay = newDelay return } func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) { globalMutex.Lock() defer globalMutex.Unlock() oldTimeout := syncSocketTimeout restore = func() { globalMutex.Lock() syncSocketTimeout = oldTimeout globalMutex.Unlock() } syncSocketTimeout = newTimeout return } mgo-r2016.08.01/gridfs.go000066400000000000000000000513271274772341400147050ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "encoding/hex" "errors" "hash" "io" "os" "sync" "time" "gopkg.in/mgo.v2/bson" ) type GridFS struct { Files *Collection Chunks *Collection } type gfsFileMode int const ( gfsClosed gfsFileMode = 0 gfsReading gfsFileMode = 1 gfsWriting gfsFileMode = 2 ) type GridFile struct { m sync.Mutex c sync.Cond gfs *GridFS mode gfsFileMode err error chunk int offset int64 wpending int wbuf []byte wsum hash.Hash rbuf []byte rcache *gfsCachedChunk doc gfsFile } type gfsFile struct { Id interface{} "_id" ChunkSize int "chunkSize" UploadDate time.Time "uploadDate" Length int64 ",minsize" MD5 string Filename string ",omitempty" ContentType string "contentType,omitempty" Metadata *bson.Raw ",omitempty" } type gfsChunk struct { Id interface{} "_id" FilesId interface{} "files_id" N int Data []byte } type gfsCachedChunk struct { wait sync.Mutex n int data []byte err error } func newGridFS(db *Database, prefix string) *GridFS { return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} } func (gfs *GridFS) newFile() *GridFile { file := &GridFile{gfs: gfs} file.c.L = &file.m //runtime.SetFinalizer(file, finalizeFile) return file } func finalizeFile(file *GridFile) { file.Close() } // Create creates a new file with the provided name in the GridFS. If the file // name already exists, a new version will be inserted with an up-to-date // uploadDate that will cause it to be atomically visible to the Open and // OpenId methods. If the file name is not important, an empty name may be // provided and the file Id used instead. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // A simple example inserting a new file: // // func check(err error) { // if err != nil { // panic(err.String()) // } // } // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // n, err := file.Write([]byte("Hello world!")) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes written\n", n) // // The io.Writer interface is implemented by *GridFile and may be used to // help on the file creation. For example: // // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // messages, err := os.Open("/var/log/messages") // check(err) // defer messages.Close() // err = io.Copy(file, messages) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) Create(name string) (file *GridFile, err error) { file = gfs.newFile() file.mode = gfsWriting file.wsum = md5.New() file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} return } // OpenId returns the file with the provided id, for reading. // If the file isn't found, err will be set to mgo.ErrNotFound. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // The following example will print the first 8192 bytes from the file: // // func check(err error) { // if err != nil { // panic(err.String()) // } // } // file, err := db.GridFS("fs").OpenId(objid) // check(err) // b := make([]byte, 8192) // n, err := file.Read(b) // check(err) // fmt.Println(string(b)) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes read\n", n) // // The io.Reader interface is implemented by *GridFile and may be used to // deal with it. As an example, the following snippet will dump the whole // file into the standard output: // // file, err := db.GridFS("fs").OpenId(objid) // check(err) // err = io.Copy(os.Stdout, file) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { var doc gfsFile err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) if err != nil { return } file = gfs.newFile() file.mode = gfsReading file.doc = doc return } // Open returns the most recently uploaded file with the provided // name, for reading. If the file isn't found, err will be set // to mgo.ErrNotFound. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // The following example will print the first 8192 bytes from the file: // // file, err := db.GridFS("fs").Open("myfile.txt") // check(err) // b := make([]byte, 8192) // n, err := file.Read(b) // check(err) // fmt.Println(string(b)) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes read\n", n) // // The io.Reader interface is implemented by *GridFile and may be used to // deal with it. As an example, the following snippet will dump the whole // file into the standard output: // // file, err := db.GridFS("fs").Open("myfile.txt") // check(err) // err = io.Copy(os.Stdout, file) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) Open(name string) (file *GridFile, err error) { var doc gfsFile err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) if err != nil { return } file = gfs.newFile() file.mode = gfsReading file.doc = doc return } // OpenNext opens the next file from iter for reading, sets *file to it, // and returns true on the success case. If no more documents are available // on iter or an error occurred, *file is set to nil and the result is false. // Errors will be available via iter.Err(). // // The iter parameter must be an iterator on the GridFS files collection. // Using the GridFS.Find method is an easy way to obtain such an iterator, // but any iterator on the collection will work. // // If the provided *file is non-nil, OpenNext will close it before attempting // to iterate to the next element. This means that in a loop one only // has to worry about closing files when breaking out of the loop early // (break, return, or panic). // // For example: // // gfs := db.GridFS("fs") // query := gfs.Find(nil).Sort("filename") // iter := query.Iter() // var f *mgo.GridFile // for gfs.OpenNext(iter, &f) { // fmt.Printf("Filename: %s\n", f.Name()) // } // if iter.Close() != nil { // panic(iter.Close()) // } // func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { if *file != nil { // Ignoring the error here shouldn't be a big deal // as we're reading the file and the loop iteration // for this file is finished. _ = (*file).Close() } var doc gfsFile if !iter.Next(&doc) { *file = nil return false } f := gfs.newFile() f.mode = gfsReading f.doc = doc *file = f return true } // Find runs query on GridFS's files collection and returns // the resulting Query. // // This logic: // // gfs := db.GridFS("fs") // iter := gfs.Find(nil).Iter() // // Is equivalent to: // // files := db.C("fs" + ".files") // iter := files.Find(nil).Iter() // func (gfs *GridFS) Find(query interface{}) *Query { return gfs.Files.Find(query) } // RemoveId deletes the file with the provided id from the GridFS. func (gfs *GridFS) RemoveId(id interface{}) error { err := gfs.Files.Remove(bson.M{"_id": id}) if err != nil { return err } _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}}) return err } type gfsDocId struct { Id interface{} "_id" } // Remove deletes all files with the provided name from the GridFS. func (gfs *GridFS) Remove(name string) (err error) { iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() var doc gfsDocId for iter.Next(&doc) { if e := gfs.RemoveId(doc.Id); e != nil { err = e } } if err == nil { err = iter.Close() } return err } func (file *GridFile) assertMode(mode gfsFileMode) { switch file.mode { case mode: return case gfsWriting: panic("GridFile is open for writing") case gfsReading: panic("GridFile is open for reading") case gfsClosed: panic("GridFile is closed") default: panic("internal error: missing GridFile mode") } } // SetChunkSize sets size of saved chunks. Once the file is written to, it // will be split in blocks of that size and each block saved into an // independent chunk document. The default chunk size is 255kb. // // It is a runtime error to call this function once the file has started // being written to. func (file *GridFile) SetChunkSize(bytes int) { file.assertMode(gfsWriting) debugf("GridFile %p: setting chunk size to %d", file, bytes) file.m.Lock() file.doc.ChunkSize = bytes file.m.Unlock() } // Id returns the current file Id. func (file *GridFile) Id() interface{} { return file.doc.Id } // SetId changes the current file Id. // // It is a runtime error to call this function once the file has started // being written to, or when the file is not open for writing. func (file *GridFile) SetId(id interface{}) { file.assertMode(gfsWriting) file.m.Lock() file.doc.Id = id file.m.Unlock() } // Name returns the optional file name. An empty string will be returned // in case it is unset. func (file *GridFile) Name() string { return file.doc.Filename } // SetName changes the optional file name. An empty string may be used to // unset it. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetName(name string) { file.assertMode(gfsWriting) file.m.Lock() file.doc.Filename = name file.m.Unlock() } // ContentType returns the optional file content type. An empty string will be // returned in case it is unset. func (file *GridFile) ContentType() string { return file.doc.ContentType } // ContentType changes the optional file content type. An empty string may be // used to unset it. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetContentType(ctype string) { file.assertMode(gfsWriting) file.m.Lock() file.doc.ContentType = ctype file.m.Unlock() } // GetMeta unmarshals the optional "metadata" field associated with the // file into the result parameter. The meaning of keys under that field // is user-defined. For example: // // result := struct{ INode int }{} // err = file.GetMeta(&result) // if err != nil { // panic(err.String()) // } // fmt.Printf("inode: %d\n", result.INode) // func (file *GridFile) GetMeta(result interface{}) (err error) { file.m.Lock() if file.doc.Metadata != nil { err = bson.Unmarshal(file.doc.Metadata.Data, result) } file.m.Unlock() return } // SetMeta changes the optional "metadata" field associated with the // file. The meaning of keys under that field is user-defined. // For example: // // file.SetMeta(bson.M{"inode": inode}) // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetMeta(metadata interface{}) { file.assertMode(gfsWriting) data, err := bson.Marshal(metadata) file.m.Lock() if err != nil && file.err == nil { file.err = err } else { file.doc.Metadata = &bson.Raw{Data: data} } file.m.Unlock() } // Size returns the file size in bytes. func (file *GridFile) Size() (bytes int64) { file.m.Lock() bytes = file.doc.Length file.m.Unlock() return } // MD5 returns the file MD5 as a hex-encoded string. func (file *GridFile) MD5() (md5 string) { return file.doc.MD5 } // UploadDate returns the file upload time. func (file *GridFile) UploadDate() time.Time { return file.doc.UploadDate } // SetUploadDate changes the file upload time. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetUploadDate(t time.Time) { file.assertMode(gfsWriting) file.m.Lock() file.doc.UploadDate = t file.m.Unlock() } // Close flushes any pending changes in case the file is being written // to, waits for any background operations to finish, and closes the file. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. func (file *GridFile) Close() (err error) { file.m.Lock() defer file.m.Unlock() if file.mode == gfsWriting { if len(file.wbuf) > 0 && file.err == nil { file.insertChunk(file.wbuf) file.wbuf = file.wbuf[0:0] } file.completeWrite() } else if file.mode == gfsReading && file.rcache != nil { file.rcache.wait.Lock() file.rcache = nil } file.mode = gfsClosed debugf("GridFile %p: closed", file) return file.err } func (file *GridFile) completeWrite() { for file.wpending > 0 { debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) file.c.Wait() } if file.err == nil { hexsum := hex.EncodeToString(file.wsum.Sum(nil)) if file.doc.UploadDate.IsZero() { file.doc.UploadDate = bson.Now() } file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) } if file.err != nil { file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) } if file.err == nil { index := Index{ Key: []string{"files_id", "n"}, Unique: true, } file.err = file.gfs.Chunks.EnsureIndex(index) } } // Abort cancels an in-progress write, preventing the file from being // automically created and ensuring previously written chunks are // removed when the file is closed. // // It is a runtime error to call Abort when the file was not opened // for writing. func (file *GridFile) Abort() { if file.mode != gfsWriting { panic("file.Abort must be called on file opened for writing") } file.err = errors.New("write aborted") } // Write writes the provided data to the file and returns the // number of bytes written and an error in case something // wrong happened. // // The file will internally cache the data so that all but the last // chunk sent to the database have the size defined by SetChunkSize. // This also means that errors may be deferred until a future call // to Write or Close. // // The parameters and behavior of this function turn the file // into an io.Writer. func (file *GridFile) Write(data []byte) (n int, err error) { file.assertMode(gfsWriting) file.m.Lock() debugf("GridFile %p: writing %d bytes", file, len(data)) defer file.m.Unlock() if file.err != nil { return 0, file.err } n = len(data) file.doc.Length += int64(n) chunkSize := file.doc.ChunkSize if len(file.wbuf)+len(data) < chunkSize { file.wbuf = append(file.wbuf, data...) return } // First, flush file.wbuf complementing with data. if len(file.wbuf) > 0 { missing := chunkSize - len(file.wbuf) if missing > len(data) { missing = len(data) } file.wbuf = append(file.wbuf, data[:missing]...) data = data[missing:] file.insertChunk(file.wbuf) file.wbuf = file.wbuf[0:0] } // Then, flush all chunks from data without copying. for len(data) > chunkSize { size := chunkSize if size > len(data) { size = len(data) } file.insertChunk(data[:size]) data = data[size:] } // And append the rest for a future call. file.wbuf = append(file.wbuf, data...) return n, file.err } func (file *GridFile) insertChunk(data []byte) { n := file.chunk file.chunk++ debugf("GridFile %p: adding to checksum: %q", file, string(data)) file.wsum.Write(data) for file.doc.ChunkSize*file.wpending >= 1024*1024 { // Hold on.. we got a MB pending. file.c.Wait() if file.err != nil { return } } file.wpending++ debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) // We may not own the memory of data, so rather than // simply copying it, we'll marshal the document ahead of time. data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) if err != nil { file.err = err return } go func() { err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) file.m.Lock() file.wpending-- if err != nil && file.err == nil { file.err = err } file.c.Broadcast() file.m.Unlock() }() } // Seek sets the offset for the next Read or Write on file to // offset, interpreted according to whence: 0 means relative to // the origin of the file, 1 means relative to the current offset, // and 2 means relative to the end. It returns the new offset and // an error, if any. func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { file.m.Lock() debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) defer file.m.Unlock() switch whence { case os.SEEK_SET: case os.SEEK_CUR: offset += file.offset case os.SEEK_END: offset += file.doc.Length default: panic("unsupported whence value") } if offset > file.doc.Length { return file.offset, errors.New("seek past end of file") } if offset == file.doc.Length { // If we're seeking to the end of the file, // no need to read anything. This enables // a client to find the size of the file using only the // io.ReadSeeker interface with low overhead. file.offset = offset return file.offset, nil } chunk := int(offset / int64(file.doc.ChunkSize)) if chunk+1 == file.chunk && offset >= file.offset { file.rbuf = file.rbuf[int(offset-file.offset):] file.offset = offset return file.offset, nil } file.offset = offset file.chunk = chunk file.rbuf = nil file.rbuf, err = file.getChunk() if err == nil { file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] } return file.offset, err } // Read reads into b the next available data from the file and // returns the number of bytes written and an error in case // something wrong happened. At the end of the file, n will // be zero and err will be set to io.EOF. // // The parameters and behavior of this function turn the file // into an io.Reader. func (file *GridFile) Read(b []byte) (n int, err error) { file.assertMode(gfsReading) file.m.Lock() debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) defer file.m.Unlock() if file.offset == file.doc.Length { return 0, io.EOF } for err == nil { i := copy(b, file.rbuf) n += i file.offset += int64(i) file.rbuf = file.rbuf[i:] if i == len(b) || file.offset == file.doc.Length { break } b = b[i:] file.rbuf, err = file.getChunk() } return n, err } func (file *GridFile) getChunk() (data []byte, err error) { cache := file.rcache file.rcache = nil if cache != nil && cache.n == file.chunk { debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) cache.wait.Lock() data, err = cache.data, cache.err } else { debugf("GridFile %p: Fetching chunk %d", file, file.chunk) var doc gfsChunk err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc) data = doc.Data } file.chunk++ if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { // Read the next one in background. cache = &gfsCachedChunk{n: file.chunk} cache.wait.Lock() debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) // Clone the session to avoid having it closed in between. chunks := file.gfs.Chunks session := chunks.Database.Session.Clone() go func(id interface{}, n int) { defer session.Close() chunks = chunks.With(session) var doc gfsChunk cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc) cache.data = doc.Data cache.wait.Unlock() }(file.doc.Id, file.chunk) file.rcache = cache } debugf("Returning err: %#v", err) return } mgo-r2016.08.01/gridfs_test.go000066400000000000000000000375431274772341400157500ustar00rootroot00000000000000// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "io" "os" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func (s *S) TestGridFSCreate(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") before := bson.Now() gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) n, err := file.Write([]byte("some data")) c.Assert(err, IsNil) c.Assert(n, Equals, 9) err = file.Close() c.Assert(err, IsNil) after := bson.Now() // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) fileId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(fileId.Valid(), Equals, true) result["_id"] = "" ud, ok := result["uploadDate"].(time.Time) c.Assert(ok, Equals, true) c.Assert(ud.After(before) && ud.Before(after), Equals, true) result["uploadDate"] = "" expected := M{ "_id": "", "length": 9, "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", } c.Assert(result, DeepEquals, expected) // Check the chunk. result = M{} err = db.C("fs.chunks").Find(nil).One(result) c.Assert(err, IsNil) chunkId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(chunkId.Valid(), Equals, true) result["_id"] = "" expected = M{ "_id": "", "files_id": fileId, "n": 0, "data": []byte("some data"), } c.Assert(result, DeepEquals, expected) // Check that an index was created. indexes, err := db.C("fs.chunks").Indexes() c.Assert(err, IsNil) c.Assert(len(indexes), Equals, 2) c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"}) } func (s *S) TestGridFSFileDetails(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile1.txt") c.Assert(err, IsNil) n, err := file.Write([]byte("some")) c.Assert(err, IsNil) c.Assert(n, Equals, 4) c.Assert(file.Size(), Equals, int64(4)) n, err = file.Write([]byte(" data")) c.Assert(err, IsNil) c.Assert(n, Equals, 5) c.Assert(file.Size(), Equals, int64(9)) id, _ := file.Id().(bson.ObjectId) c.Assert(id.Valid(), Equals, true) c.Assert(file.Name(), Equals, "myfile1.txt") c.Assert(file.ContentType(), Equals, "") var info interface{} err = file.GetMeta(&info) c.Assert(err, IsNil) c.Assert(info, IsNil) file.SetId("myid") file.SetName("myfile2.txt") file.SetContentType("text/plain") file.SetMeta(M{"any": "thing"}) c.Assert(file.Id(), Equals, "myid") c.Assert(file.Name(), Equals, "myfile2.txt") c.Assert(file.ContentType(), Equals, "text/plain") err = file.GetMeta(&info) c.Assert(err, IsNil) c.Assert(info, DeepEquals, bson.M{"any": "thing"}) err = file.Close() c.Assert(err, IsNil) c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34") ud := file.UploadDate() now := time.Now() c.Assert(ud.Before(now), Equals, true) c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true) result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) result["uploadDate"] = "" expected := M{ "_id": "myid", "length": 9, "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", "filename": "myfile2.txt", "contentType": "text/plain", "metadata": M{"any": "thing"}, } c.Assert(result, DeepEquals, expected) } func (s *S) TestGridFSSetUploadDate(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local) file.SetUploadDate(t) err = file.Close() c.Assert(err, IsNil) // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) ud := result["uploadDate"].(time.Time) if !ud.Equal(t) { c.Fatalf("want upload date %s, got %s", t, ud) } } func (s *S) TestGridFSCreateWithChunking(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) file.SetChunkSize(5) // Smaller than the chunk size. n, err := file.Write([]byte("abc")) c.Assert(err, IsNil) c.Assert(n, Equals, 3) // Boundary in the middle. n, err = file.Write([]byte("defg")) c.Assert(err, IsNil) c.Assert(n, Equals, 4) // Boundary at the end. n, err = file.Write([]byte("hij")) c.Assert(err, IsNil) c.Assert(n, Equals, 3) // Larger than the chunk size, with 3 chunks. n, err = file.Write([]byte("klmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 12) err = file.Close() c.Assert(err, IsNil) // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) fileId, _ := result["_id"].(bson.ObjectId) c.Assert(fileId.Valid(), Equals, true) result["_id"] = "" result["uploadDate"] = "" expected := M{ "_id": "", "length": 22, "chunkSize": 5, "uploadDate": "", "md5": "44a66044834cbe55040089cabfc102d5", } c.Assert(result, DeepEquals, expected) // Check the chunks. iter := db.C("fs.chunks").Find(nil).Sort("n").Iter() dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"} for i := 0; ; i++ { result = M{} if !iter.Next(result) { if i != 5 { c.Fatalf("Expected 5 chunks, got %d", i) } break } c.Assert(iter.Close(), IsNil) result["_id"] = "" expected = M{ "_id": "", "files_id": fileId, "n": i, "data": []byte(dataChunks[i]), } c.Assert(result, DeepEquals, expected) } } func (s *S) TestGridFSAbort(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) file.SetChunkSize(5) n, err := file.Write([]byte("some data")) c.Assert(err, IsNil) c.Assert(n, Equals, 9) var count int for i := 0; i < 10; i++ { count, err = db.C("fs.chunks").Count() if count > 0 || err != nil { break } } c.Assert(err, IsNil) c.Assert(count, Equals, 1) file.Abort() err = file.Close() c.Assert(err, ErrorMatches, "write aborted") count, err = db.C("fs.chunks").Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) } func (s *S) TestGridFSCloseConflict(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true}) // For a closing-time conflict err = db.C("fs.files").Insert(M{"filename": "foo.txt"}) c.Assert(err, IsNil) gfs := db.GridFS("fs") file, err := gfs.Create("foo.txt") c.Assert(err, IsNil) _, err = file.Write([]byte("some data")) c.Assert(err, IsNil) err = file.Close() c.Assert(mgo.IsDup(err), Equals, true) count, err := db.C("fs.chunks").Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) } func (s *S) TestGridFSOpenNotFound(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.OpenId("non-existent") c.Assert(err == mgo.ErrNotFound, Equals, true) c.Assert(file, IsNil) file, err = gfs.Open("non-existent") c.Assert(err == mgo.ErrNotFound, Equals, true) c.Assert(file, IsNil) } func (s *S) TestGridFSReadAll(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) file, err = gfs.OpenId(id) c.Assert(err, IsNil) b := make([]byte, 30) n, err = file.Read(b) c.Assert(n, Equals, 22) c.Assert(err, IsNil) n, err = file.Read(b) c.Assert(n, Equals, 0) c.Assert(err == io.EOF, Equals, true) err = file.Close() c.Assert(err, IsNil) } func (s *S) TestGridFSReadChunking(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) file, err = gfs.OpenId(id) c.Assert(err, IsNil) b := make([]byte, 30) // Smaller than the chunk size. n, err = file.Read(b[:3]) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(b[:3], DeepEquals, []byte("abc")) // Boundary in the middle. n, err = file.Read(b[:4]) c.Assert(err, IsNil) c.Assert(n, Equals, 4) c.Assert(b[:4], DeepEquals, []byte("defg")) // Boundary at the end. n, err = file.Read(b[:3]) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(b[:3], DeepEquals, []byte("hij")) // Larger than the chunk size, with 3 chunks. n, err = file.Read(b) c.Assert(err, IsNil) c.Assert(n, Equals, 12) c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv")) n, err = file.Read(b) c.Assert(n, Equals, 0) c.Assert(err == io.EOF, Equals, true) err = file.Close() c.Assert(err, IsNil) } func (s *S) TestGridFSOpen(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() file, err = gfs.Open("myfile.txt") c.Assert(err, IsNil) defer file.Close() var b [1]byte _, err = file.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "2") } func (s *S) TestGridFSSeek(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) b := make([]byte, 5) file, err = gfs.OpenId(id) c.Assert(err, IsNil) o, err := file.Seek(3, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(3)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("defgh")) o, err = file.Seek(5, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(13)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("nopqr")) o, err = file.Seek(0, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(22)) n, err = file.Read(b) c.Assert(err, Equals, io.EOF) c.Assert(n, Equals, 0) o, err = file.Seek(-10, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(12)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("mnopq")) o, err = file.Seek(8, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(8)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("ijklm")) // Trivial seek forward within same chunk. Already // got the data, shouldn't touch the database. sent := mgo.GetStats().SentOps o, err = file.Seek(1, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(14)) c.Assert(mgo.GetStats().SentOps, Equals, sent) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("opqrs")) // Try seeking past end of file. file.Seek(3, os.SEEK_SET) o, err = file.Seek(23, os.SEEK_SET) c.Assert(err, ErrorMatches, "seek past end of file") c.Assert(o, Equals, int64(3)) } func (s *S) TestGridFSRemoveId(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) id := file.Id() file.Close() err = gfs.RemoveId(id) c.Assert(err, IsNil) file, err = gfs.Open("myfile.txt") c.Assert(err, IsNil) defer file.Close() var b [1]byte _, err = file.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "1") n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 0) } func (s *S) TestGridFSRemove(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() err = gfs.Remove("myfile.txt") c.Assert(err, IsNil) _, err = gfs.Open("myfile.txt") c.Assert(err == mgo.ErrNotFound, Equals, true) n, err := db.C("fs.chunks").Find(nil).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 0) } func (s *S) TestGridFSOpenNext(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile1.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile2.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() var f *mgo.GridFile var b [1]byte iter := gfs.Find(nil).Sort("-filename").Iter() ok := gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile2.txt") _, err = f.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "2") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile1.txt") _, err = f.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "1") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) c.Assert(f, IsNil) // Do it again with a more restrictive query to make sure // it's actually taken into account. iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter() ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile1.txt") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) c.Assert(f, IsNil) } mgo-r2016.08.01/harness/000077500000000000000000000000001274772341400145335ustar00rootroot00000000000000mgo-r2016.08.01/harness/certs/000077500000000000000000000000001274772341400156535ustar00rootroot00000000000000mgo-r2016.08.01/harness/certs/client.crt000066400000000000000000000022141274772341400176420ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H 4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ 616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd 7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI /nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r z3A= -----END CERTIFICATE----- mgo-r2016.08.01/harness/certs/client.key000066400000000000000000000032131274772341400176420ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ +HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB 28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ 4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- mgo-r2016.08.01/harness/certs/client.pem000066400000000000000000000062651274772341400176450ustar00rootroot00000000000000To regenerate the key: openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key cat server.key server.crt > server.pem openssl genrsa -out client.key 2048 openssl req -key client.key -new -out client.req openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt cat client.key client.crt > client.pem -----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ +HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB 28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ 4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H 4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ 616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd 7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI /nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r z3A= -----END CERTIFICATE----- mgo-r2016.08.01/harness/certs/client.req000066400000000000000000000017351274772341400176500ustar00rootroot00000000000000-----BEGIN CERTIFICATE REQUEST----- MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/ h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4 GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M 3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8 8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg tb1s0MA= -----END CERTIFICATE REQUEST----- mgo-r2016.08.01/harness/certs/server.crt000066400000000000000000000024161274772341400176760ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm 6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu 2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL yQ== -----END CERTIFICATE----- mgo-r2016.08.01/harness/certs/server.key000066400000000000000000000032501274772341400176730ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do 4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt 9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk SruEA1+5bfBRMW0P+h7Qfe4= -----END PRIVATE KEY----- mgo-r2016.08.01/harness/certs/server.pem000066400000000000000000000056661274772341400177010ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do 4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt 9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk SruEA1+5bfBRMW0P+h7Qfe4= -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm 6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu 2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL yQ== -----END CERTIFICATE----- mgo-r2016.08.01/harness/daemons/000077500000000000000000000000001274772341400161615ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/.env000066400000000000000000000022371274772341400167560ustar00rootroot00000000000000 set -e MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p') MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/') MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/') versionAtLeast() { TESTMAJOR="$1" TESTMINOR="$2" if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then return 0 fi if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then return 100 fi if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then return 0 fi return 100 } COMMONDOPTSNOIP=" --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath ./db " COMMONDOPTS=" $COMMONDOPTSNOIP --bind_ip=127.0.0.1 " COMMONCOPTS=" $COMMONDOPTS " COMMONSOPTS=" --chunkSize 1 --bind_ip=127.0.0.1 " if versionAtLeast 3 2; then # 3.2 doesn't like --nojournal on config servers. #COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')" # Using a hacked version of MongoDB 3.2 for now. # Go back to MMAPv1 so it's not super sluggish. :-( COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP" COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS" COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS" fi if [ "$TRAVIS" = true ]; then set -x fi mgo-r2016.08.01/harness/daemons/cfg1/000077500000000000000000000000001274772341400170015ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/db/000077500000000000000000000000001274772341400173665ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/db/.empty000066400000000000000000000000001274772341400205130ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/db/journal/000077500000000000000000000000001274772341400210405ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/db/journal/tempLatencyTest000066400000000000000000006200001274772341400241060ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/db/mongod.lock000077500000000000000000000000001274772341400215140ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/log/000077500000000000000000000000001274772341400175625ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg1/log/run000077500000000000000000000000401274772341400203060ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/cfg1/run000077500000000000000000000001171274772341400175320ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONCOPTS \ --port 40101 \ --configsvr mgo-r2016.08.01/harness/daemons/cfg2/000077500000000000000000000000001274772341400170025ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg2/db/000077500000000000000000000000001274772341400173675ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg2/db/.empty000066400000000000000000000000001274772341400205140ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg2/log/000077500000000000000000000000001274772341400175635ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg2/log/run000077500000000000000000000000401274772341400203070ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/cfg2/run000077500000000000000000000001171274772341400175330ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONCOPTS \ --port 40102 \ --configsvr mgo-r2016.08.01/harness/daemons/cfg3/000077500000000000000000000000001274772341400170035ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg3/db/000077500000000000000000000000001274772341400173705ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg3/db/.empty000066400000000000000000000000001274772341400205150ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg3/log/000077500000000000000000000000001274772341400175645ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/cfg3/log/run000077500000000000000000000000401274772341400203100ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/cfg3/run000077500000000000000000000001711274772341400175340ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONCOPTS \ --port 40103 \ --configsvr \ --auth \ --keyFile=../../certs/keyfile mgo-r2016.08.01/harness/daemons/db1/000077500000000000000000000000001274772341400166275ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db1/db/000077500000000000000000000000001274772341400172145ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db1/db/.empty000066400000000000000000000000001274772341400203410ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db1/log/000077500000000000000000000000001274772341400174105ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db1/log/run000077500000000000000000000000401274772341400201340ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/db1/run000077500000000000000000000003001274772341400173520ustar00rootroot00000000000000#!/bin/sh . ../.env if [ x$NOIPV6 = x1 ]; then BINDIP="127.0.0.1" else BINDIP="127.0.0.1,::1" fi exec mongod $COMMONDOPTSNOIP \ --shardsvr \ --bind_ip=$BINDIP \ --port 40001 \ --ipv6 mgo-r2016.08.01/harness/daemons/db2/000077500000000000000000000000001274772341400166305ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db2/db/000077500000000000000000000000001274772341400172155ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db2/db/.empty000066400000000000000000000000001274772341400203420ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db2/log/000077500000000000000000000000001274772341400174115ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db2/log/run000077500000000000000000000000401274772341400201350ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/db2/run000077500000000000000000000001271274772341400173620ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --port 40002 \ --auth mgo-r2016.08.01/harness/daemons/db3/000077500000000000000000000000001274772341400166315ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db3/db/000077500000000000000000000000001274772341400172165ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db3/db/.empty000066400000000000000000000000001274772341400203430ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db3/log/000077500000000000000000000000001274772341400174125ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/db3/log/run000077500000000000000000000000401274772341400201360ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/db3/run000077500000000000000000000002771274772341400173710ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --port 40003 \ --auth \ --sslMode preferSSL \ --sslCAFile ../../certs/server.pem \ --sslPEMKeyFile ../../certs/server.pem mgo-r2016.08.01/harness/daemons/rs1a/000077500000000000000000000000001274772341400170275ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1a/db/000077500000000000000000000000001274772341400174145ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1a/db/.empty000066400000000000000000000000001274772341400205410ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1a/log/000077500000000000000000000000001274772341400176105ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1a/log/run000077500000000000000000000000401274772341400203340ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs1a/run000077500000000000000000000001361274772341400175610ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs1 \ --port 40011 mgo-r2016.08.01/harness/daemons/rs1b/000077500000000000000000000000001274772341400170305ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1b/db/000077500000000000000000000000001274772341400174155ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1b/db/.empty000066400000000000000000000000001274772341400205420ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1b/log/000077500000000000000000000000001274772341400176115ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1b/log/run000077500000000000000000000000401274772341400203350ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs1b/run000077500000000000000000000001361274772341400175620ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs1 \ --port 40012 mgo-r2016.08.01/harness/daemons/rs1c/000077500000000000000000000000001274772341400170315ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1c/db/000077500000000000000000000000001274772341400174165ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1c/db/.empty000066400000000000000000000000001274772341400205430ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1c/log/000077500000000000000000000000001274772341400176125ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs1c/log/run000077500000000000000000000000401274772341400203360ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs1c/run000077500000000000000000000001361274772341400175630ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs1 \ --port 40013 mgo-r2016.08.01/harness/daemons/rs2a/000077500000000000000000000000001274772341400170305ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2a/db/000077500000000000000000000000001274772341400174155ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2a/db/.empty000066400000000000000000000000001274772341400205420ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2a/log/000077500000000000000000000000001274772341400176115ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2a/log/run000077500000000000000000000000401274772341400203350ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs2a/run000077500000000000000000000001361274772341400175620ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs2 \ --port 40021 mgo-r2016.08.01/harness/daemons/rs2b/000077500000000000000000000000001274772341400170315ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2b/db/000077500000000000000000000000001274772341400174165ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2b/db/.empty000066400000000000000000000000001274772341400205430ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2b/log/000077500000000000000000000000001274772341400176125ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2b/log/run000077500000000000000000000000401274772341400203360ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs2b/run000077500000000000000000000001361274772341400175630ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs2 \ --port 40022 mgo-r2016.08.01/harness/daemons/rs2c/000077500000000000000000000000001274772341400170325ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2c/db/000077500000000000000000000000001274772341400174175ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2c/db/.empty000066400000000000000000000000001274772341400205440ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2c/log/000077500000000000000000000000001274772341400176135ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs2c/log/run000077500000000000000000000000401274772341400203370ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs2c/run000077500000000000000000000001361274772341400175640ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs2 \ --port 40023 mgo-r2016.08.01/harness/daemons/rs3a/000077500000000000000000000000001274772341400170315ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3a/db/000077500000000000000000000000001274772341400174165ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3a/db/.empty000066400000000000000000000000001274772341400205430ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3a/log/000077500000000000000000000000001274772341400176125ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3a/log/run000077500000000000000000000000401274772341400203360ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs3a/run000077500000000000000000000001771274772341400175700ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40031 \ --keyFile=../../certs/keyfile mgo-r2016.08.01/harness/daemons/rs3b/000077500000000000000000000000001274772341400170325ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3b/db/000077500000000000000000000000001274772341400174175ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3b/db/.empty000066400000000000000000000000001274772341400205440ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3b/log/000077500000000000000000000000001274772341400176135ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3b/log/run000077500000000000000000000000401274772341400203370ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs3b/run000077500000000000000000000001771274772341400175710ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40032 \ --keyFile=../../certs/keyfile mgo-r2016.08.01/harness/daemons/rs3c/000077500000000000000000000000001274772341400170335ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3c/db/000077500000000000000000000000001274772341400174205ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3c/db/.empty000066400000000000000000000000001274772341400205450ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3c/log/000077500000000000000000000000001274772341400176145ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs3c/log/run000077500000000000000000000000401274772341400203400ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs3c/run000077500000000000000000000001771274772341400175720ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40033 \ --keyFile=../../certs/keyfile mgo-r2016.08.01/harness/daemons/rs4a/000077500000000000000000000000001274772341400170325ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs4a/db/000077500000000000000000000000001274772341400174175ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs4a/db/.empty000066400000000000000000000000001274772341400205440ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs4a/log/000077500000000000000000000000001274772341400176135ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/rs4a/log/run000077500000000000000000000000401274772341400203370ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/rs4a/run000077500000000000000000000001361274772341400175640ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs4 \ --port 40041 mgo-r2016.08.01/harness/daemons/s1/000077500000000000000000000000001274772341400165045ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s1/log/000077500000000000000000000000001274772341400172655ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s1/log/run000077500000000000000000000000401274772341400200110ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/s1/run000077500000000000000000000001351274772341400172350ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongos $COMMONSOPTS \ --port 40201 \ --configdb 127.0.0.1:40101 mgo-r2016.08.01/harness/daemons/s2/000077500000000000000000000000001274772341400165055ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s2/log/000077500000000000000000000000001274772341400172665ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s2/log/run000077500000000000000000000000401274772341400200120ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/s2/run000077500000000000000000000001351274772341400172360ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongos $COMMONSOPTS \ --port 40202 \ --configdb 127.0.0.1:40102 mgo-r2016.08.01/harness/daemons/s3/000077500000000000000000000000001274772341400165065ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s3/log/000077500000000000000000000000001274772341400172675ustar00rootroot00000000000000mgo-r2016.08.01/harness/daemons/s3/log/run000077500000000000000000000000401274772341400200130ustar00rootroot00000000000000#!/bin/sh exec cat - > log.txt mgo-r2016.08.01/harness/daemons/s3/run000077500000000000000000000001761274772341400172440ustar00rootroot00000000000000#!/bin/sh . ../.env exec mongos $COMMONSOPTS \ --port 40203 \ --configdb 127.0.0.1:40103 \ --keyFile=../../certs/keyfile mgo-r2016.08.01/harness/mongojs/000077500000000000000000000000001274772341400162075ustar00rootroot00000000000000mgo-r2016.08.01/harness/mongojs/dropall.js000066400000000000000000000036561274772341400202140ustar00rootroot00000000000000 var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] var auth = [40002, 40103, 40203, 40031] var db1 = new Mongo("localhost:40001") if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) { ports.push(40003) auth.push(40003) } for (var i in ports) { var port = ports[i] var server = "localhost:" + port var mongo = new Mongo("localhost:" + port) var admin = mongo.getDB("admin") for (var j in auth) { if (auth[j] == port) { admin.auth("root", "rapadura") admin.system.users.find().forEach(function(u) { if (u.user == "root" || u.user == "reader") { return; } if (typeof admin.dropUser == "function") { mongo.getDB(u.db).dropUser(u.user); } else { admin.removeUser(u.user); } }) break } } var result = admin.runCommand({"listDatabases": 1}) for (var j = 0; j != 100; j++) { if (typeof result.databases != "undefined" || notMaster(result)) { break } result = admin.runCommand({"listDatabases": 1}) } if (notMaster(result)) { continue } if (typeof result.databases == "undefined") { print("Could not list databases. Command result:") print(JSON.stringify(result)) quit(12) } var dbs = result.databases for (var j = 0; j != dbs.length; j++) { var db = dbs[j] switch (db.name) { case "admin": case "local": case "config": break default: mongo.getDB(db.name).dropDatabase() } } } function notMaster(result) { return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) } // vim:ts=4:sw=4:et mgo-r2016.08.01/harness/mongojs/init.js000066400000000000000000000106711274772341400175150ustar00rootroot00000000000000//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5} var settings = {}; // We know the master of the first set (pri=1), but not of the second. var rs1cfg = {_id: "rs1", members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}}, {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}}, {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}], settings: settings} var rs2cfg = {_id: "rs2", members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}, {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}}, {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}], settings: settings} var rs3cfg = {_id: "rs3", members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}}, {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}}, {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}], settings: settings} for (var i = 0; i != 60; i++) { try { db1 = new Mongo("127.0.0.1:40001").getDB("admin") db2 = new Mongo("127.0.0.1:40002").getDB("admin") rs1a = new Mongo("127.0.0.1:40011").getDB("admin") rs2a = new Mongo("127.0.0.1:40021").getDB("admin") rs3a = new Mongo("127.0.0.1:40031").getDB("admin") break } catch(err) { print("Can't connect yet...") } sleep(1000) } function hasSSL() { return Boolean(db1.serverBuildInfo().OpenSSLVersion) } rs1a.runCommand({replSetInitiate: rs1cfg}) rs2a.runCommand({replSetInitiate: rs2cfg}) rs3a.runCommand({replSetInitiate: rs3cfg}) function configShards() { cfg1 = new Mongo("127.0.0.1:40201").getDB("admin") cfg1.runCommand({addshard: "127.0.0.1:40001"}) cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"}) cfg2 = new Mongo("127.0.0.1:40202").getDB("admin") cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"}) cfg3 = new Mongo("127.0.0.1:40203").getDB("admin") cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"}) } function configAuth() { var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] if (hasSSL()) { addrs.push("127.0.0.1:40003") } for (var i in addrs) { print("Configuring auth for", addrs[i]) var db = new Mongo(addrs[i]).getDB("admin") var v = db.serverBuildInfo().versionArray var timedOut = false if (v < [2, 5]) { db.addUser("root", "rapadura") } else { try { db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) } catch (err) { // 3.2 consistently fails replication of creds on 40031 (config server) print("createUser command returned an error: " + err) if (String(err).indexOf("timed out") >= 0) { timedOut = true; } } } for (var i = 0; i < 60; i++) { var ok = db.auth("root", "rapadura") if (ok || !timedOut) { break } sleep(1000); } if (v >= [2, 6]) { db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else if (v >= [2, 4]) { db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else { db.addUser("reader", "rapadura", true) } } } function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 if (m.state == 1) { primary = 1 } } } } if (primary == 0) { count = 0 } return count } var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length for (var i = 0; i != 60; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { configShards() configAuth() quit(0) } sleep(1000) } print("Replica sets didn't sync up properly.") quit(12) // vim:ts=4:sw=4:et mgo-r2016.08.01/harness/mongojs/wait.js000066400000000000000000000042441274772341400175150ustar00rootroot00000000000000// We know the master of the first set (pri=1), but not of the second. var settings = {} var rs1cfg = {_id: "rs1", members: [{_id: 1, host: "127.0.0.1:40011", priority: 1}, {_id: 2, host: "127.0.0.1:40012", priority: 0}, {_id: 3, host: "127.0.0.1:40013", priority: 0}]} var rs2cfg = {_id: "rs2", members: [{_id: 1, host: "127.0.0.1:40021", priority: 1}, {_id: 2, host: "127.0.0.1:40022", priority: 1}, {_id: 3, host: "127.0.0.1:40023", priority: 0}]} var rs3cfg = {_id: "rs3", members: [{_id: 1, host: "127.0.0.1:40031", priority: 1}, {_id: 2, host: "127.0.0.1:40032", priority: 1}, {_id: 3, host: "127.0.0.1:40033", priority: 1}], settings: settings} for (var i = 0; i != 60; i++) { try { rs1a = new Mongo("127.0.0.1:40011").getDB("admin") rs2a = new Mongo("127.0.0.1:40021").getDB("admin") rs3a = new Mongo("127.0.0.1:40031").getDB("admin") rs3a.auth("root", "rapadura") db1 = new Mongo("127.0.0.1:40001").getDB("admin") db2 = new Mongo("127.0.0.1:40002").getDB("admin") break } catch(err) { print("Can't connect yet...") } sleep(1000) } function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 if (m.state == 1) { primary = 1 } } } } if (primary == 0) { count = 0 } return count } var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length for (var i = 0; i != 90; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { quit(0) } sleep(1000) } print("Replica sets didn't sync up properly.") quit(12) // vim:ts=4:sw=4:et mgo-r2016.08.01/harness/setup.sh000077500000000000000000000044551274772341400162420ustar00rootroot00000000000000#!/bin/sh -e LINE="---------------" start() { if [ -d _harness ]; then echo "Daemon setup already in place, stop it first." exit 1 fi mkdir -p _harness cd _harness cp -a ../harness/daemons . cp -a ../harness/certs . echo keyfile > certs/keyfile chmod 600 certs/keyfile if ! mongod --help | grep -q -- --ssl; then rm -rf daemons/db3 fi COUNT=$(ls daemons | wc -l) echo "Running daemons..." svscan daemons & SVSCANPID=$! echo $SVSCANPID > svscan.pid if ! kill -0 $SVSCANPID; then echo "Cannot execute svscan." exit 1 fi echo "Starting $COUNT processes..." for i in $(seq 30); do UP=$(svstat daemons/* | grep ' up ' | grep -v ' [0-3] seconds' | wc -l) echo "$UP processes up..." if [ x$COUNT = x$UP ]; then echo "Running setup.js with mongo..." mongo --nodb ../harness/mongojs/init.js exit 0 fi sleep 1 done echo "Failed to start processes. svstat _harness/daemons/* output:" echo $LINE svstat daemons/* echo $LINE for DAEMON in daemons/*; do if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then echo "Logs for _harness/$DAEMON:" echo $LINE cat $DAEMON/log/log.txt echo $LINE fi done exit 1 } stop() { if [ -d _harness ]; then cd _harness if [ -f svscan.pid ]; then kill -9 $(cat svscan.pid) 2> /dev/null || true svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true COUNT=$(ls daemons | wc -l) echo "Shutting down $COUNT processes..." while true; do DOWN=$(svstat daemons/* | grep 'supervise not running' | wc -l) echo "$DOWN processes down..." if [ x$DOWN = x$COUNT ]; then break fi sleep 1 done rm svscan.pid echo "Done." fi cd .. rm -rf _harness fi } if [ ! -f suite_test.go ]; then echo "This script must be run from within the source directory." exit 1 fi case "$1" in start) start $2 ;; stop) stop $2 ;; esac # vim:ts=4:sw=4:et mgo-r2016.08.01/internal/000077500000000000000000000000001274772341400147045ustar00rootroot00000000000000mgo-r2016.08.01/internal/json/000077500000000000000000000000001274772341400156555ustar00rootroot00000000000000mgo-r2016.08.01/internal/json/LICENSE000066400000000000000000000027071274772341400166700ustar00rootroot00000000000000Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mgo-r2016.08.01/internal/json/bench_test.go000066400000000000000000000110231274772341400203170ustar00rootroot00000000000000// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Large data benchmark. // The JSON data is a summary of agl's changes in the // go, webkit, and chromium open source projects. // We benchmark converting between the JSON form // and in-memory data structures. package json import ( "bytes" "compress/gzip" "io/ioutil" "os" "strings" "testing" ) type codeResponse struct { Tree *codeNode `json:"tree"` Username string `json:"username"` } type codeNode struct { Name string `json:"name"` Kids []*codeNode `json:"kids"` CLWeight float64 `json:"cl_weight"` Touches int `json:"touches"` MinT int64 `json:"min_t"` MaxT int64 `json:"max_t"` MeanT int64 `json:"mean_t"` } var codeJSON []byte var codeStruct codeResponse func codeInit() { f, err := os.Open("testdata/code.json.gz") if err != nil { panic(err) } defer f.Close() gz, err := gzip.NewReader(f) if err != nil { panic(err) } data, err := ioutil.ReadAll(gz) if err != nil { panic(err) } codeJSON = data if err := Unmarshal(codeJSON, &codeStruct); err != nil { panic("unmarshal code.json: " + err.Error()) } if data, err = Marshal(&codeStruct); err != nil { panic("marshal code.json: " + err.Error()) } if !bytes.Equal(data, codeJSON) { println("different lengths", len(data), len(codeJSON)) for i := 0; i < len(data) && i < len(codeJSON); i++ { if data[i] != codeJSON[i] { println("re-marshal: changed at byte", i) println("orig: ", string(codeJSON[i-10:i+10])) println("new: ", string(data[i-10:i+10])) break } } panic("re-marshal code.json: different result") } } func BenchmarkCodeEncoder(b *testing.B) { if codeJSON == nil { b.StopTimer() codeInit() b.StartTimer() } enc := NewEncoder(ioutil.Discard) for i := 0; i < b.N; i++ { if err := enc.Encode(&codeStruct); err != nil { b.Fatal("Encode:", err) } } b.SetBytes(int64(len(codeJSON))) } func BenchmarkCodeMarshal(b *testing.B) { if codeJSON == nil { b.StopTimer() codeInit() b.StartTimer() } for i := 0; i < b.N; i++ { if _, err := Marshal(&codeStruct); err != nil { b.Fatal("Marshal:", err) } } b.SetBytes(int64(len(codeJSON))) } func BenchmarkCodeDecoder(b *testing.B) { if codeJSON == nil { b.StopTimer() codeInit() b.StartTimer() } var buf bytes.Buffer dec := NewDecoder(&buf) var r codeResponse for i := 0; i < b.N; i++ { buf.Write(codeJSON) // hide EOF buf.WriteByte('\n') buf.WriteByte('\n') buf.WriteByte('\n') if err := dec.Decode(&r); err != nil { b.Fatal("Decode:", err) } } b.SetBytes(int64(len(codeJSON))) } func BenchmarkDecoderStream(b *testing.B) { b.StopTimer() var buf bytes.Buffer dec := NewDecoder(&buf) buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") var x interface{} if err := dec.Decode(&x); err != nil { b.Fatal("Decode:", err) } ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" b.StartTimer() for i := 0; i < b.N; i++ { if i%300000 == 0 { buf.WriteString(ones) } x = nil if err := dec.Decode(&x); err != nil || x != 1.0 { b.Fatalf("Decode: %v after %d", err, i) } } } func BenchmarkCodeUnmarshal(b *testing.B) { if codeJSON == nil { b.StopTimer() codeInit() b.StartTimer() } for i := 0; i < b.N; i++ { var r codeResponse if err := Unmarshal(codeJSON, &r); err != nil { b.Fatal("Unmarshal:", err) } } b.SetBytes(int64(len(codeJSON))) } func BenchmarkCodeUnmarshalReuse(b *testing.B) { if codeJSON == nil { b.StopTimer() codeInit() b.StartTimer() } var r codeResponse for i := 0; i < b.N; i++ { if err := Unmarshal(codeJSON, &r); err != nil { b.Fatal("Unmarshal:", err) } } } func BenchmarkUnmarshalString(b *testing.B) { data := []byte(`"hello, world"`) var s string for i := 0; i < b.N; i++ { if err := Unmarshal(data, &s); err != nil { b.Fatal("Unmarshal:", err) } } } func BenchmarkUnmarshalFloat64(b *testing.B) { var f float64 data := []byte(`3.14`) for i := 0; i < b.N; i++ { if err := Unmarshal(data, &f); err != nil { b.Fatal("Unmarshal:", err) } } } func BenchmarkUnmarshalInt64(b *testing.B) { var x int64 data := []byte(`3`) for i := 0; i < b.N; i++ { if err := Unmarshal(data, &x); err != nil { b.Fatal("Unmarshal:", err) } } } func BenchmarkIssue10335(b *testing.B) { b.ReportAllocs() var s struct{} j := []byte(`{"a":{ }}`) for n := 0; n < b.N; n++ { if err := Unmarshal(j, &s); err != nil { b.Fatal(err) } } } mgo-r2016.08.01/internal/json/decode.go000066400000000000000000001173001274772341400174310ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Represents JSON data structure using native Go types: booleans, floats, // strings, arrays, and maps. package json import ( "bytes" "encoding" "encoding/base64" "errors" "fmt" "reflect" "runtime" "strconv" "unicode" "unicode/utf16" "unicode/utf8" ) // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. // // Unmarshal uses the inverse of the encodings that // Marshal uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of // the JSON being the JSON literal null. In that case, Unmarshal sets // the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // // To unmarshal JSON into a struct, Unmarshal matches incoming object // keys to the keys used by Marshal (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. // Unmarshal will only set exported fields of the struct. // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // // bool, for JSON booleans // float64, for JSON numbers // string, for JSON strings // []interface{}, for JSON arrays // map[string]interface{}, for JSON objects // nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length // to zero and then appends each element to the slice. // As a special case, to unmarshal an empty JSON array into a slice, // Unmarshal replaces the slice with a new empty slice. // // To unmarshal a JSON array into a Go array, Unmarshal decodes // JSON array elements into corresponding Go array elements. // If the Go array is smaller than the JSON array, // the additional JSON array elements are discarded. // If the JSON array is smaller than the Go array, // the additional Go array elements are set to zero values. // // To unmarshal a JSON object into a map, Unmarshal first establishes a map to // use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal // reuses the existing map, keeping existing entries. Unmarshal then stores key- // value pairs from the JSON object into the map. The map's key type must // either be a string or implement encoding.TextUnmarshaler. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. // If no more serious errors are encountered, Unmarshal returns // an UnmarshalTypeError describing the earliest such error. // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean // ``not present,'' unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. // func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. var d decodeState err := checkValid(data, &d.scan) if err != nil { return err } d.init(data) return d.unmarshal(v) } // Unmarshaler is the interface implemented by types // that can unmarshal a JSON description of themselves. // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. type Unmarshaler interface { UnmarshalJSON([]byte) error } // An UnmarshalTypeError describes a JSON value that was // not appropriate for a value of a specific Go type. type UnmarshalTypeError struct { Value string // description of JSON value - "bool", "array", "number -5" Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes } func (e *UnmarshalTypeError) Error() string { return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } // An UnmarshalFieldError describes a JSON object key that // led to an unexported (and therefore unwritable) struct field. // (No longer used; kept for compatibility.) type UnmarshalFieldError struct { Key string Type reflect.Type Field reflect.StructField } func (e *UnmarshalFieldError) Error() string { return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() } // An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. // (The argument to Unmarshal must be a non-nil pointer.) type InvalidUnmarshalError struct { Type reflect.Type } func (e *InvalidUnmarshalError) Error() string { if e.Type == nil { return "json: Unmarshal(nil)" } if e.Type.Kind() != reflect.Ptr { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } func (d *decodeState) unmarshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } err = r.(error) } }() rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } d.scan.reset() // We decode rv not rv.Elem because the Unmarshaler interface // test must be applied at the top level of the value. d.value(rv) return d.savedError } // A Number represents a JSON number literal. type Number string // String returns the literal text of the number. func (n Number) String() string { return string(n) } // Float64 returns the number as a float64. func (n Number) Float64() (float64, error) { return strconv.ParseFloat(string(n), 64) } // Int64 returns the number as an int64. func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } // isValidNumber reports whether s is a valid JSON number literal. func isValidNumber(s string) bool { // This function implements the JSON numbers grammar. // See https://tools.ietf.org/html/rfc7159#section-6 // and http://json.org/number.gif if s == "" { return false } // Optional - if s[0] == '-' { s = s[1:] if s == "" { return false } } // Digits switch { default: return false case s[0] == '0': s = s[1:] case '1' <= s[0] && s[0] <= '9': s = s[1:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // . followed by 1 or more digits. if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { s = s[2:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // e or E followed by an optional - or + and // 1 or more digits. if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { s = s[1:] if s[0] == '+' || s[0] == '-' { s = s[1:] if s == "" { return false } } for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // Make sure we are at the end. return s == "" } // decodeState represents the state while decoding a JSON value. type decodeState struct { data []byte off int // read offset in data scan scanner nextscan scanner // for calls to nextValue savedError error useNumber bool ext Extension } // errPhase is used for errors that should not happen unless // there is a bug in the JSON decoder or something is editing // the data slice while the decoder executes. var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil return d } // error aborts the decoding by panicking with err. func (d *decodeState) error(err error) { panic(err) } // saveError saves the first err it is called with, // for reporting at the end of the unmarshal. func (d *decodeState) saveError(err error) { if d.savedError == nil { d.savedError = err } } // next cuts off and returns the next full JSON value in d.data[d.off:]. // The next value is known to be an object or array, not a literal. func (d *decodeState) next() []byte { c := d.data[d.off] item, rest, err := nextValue(d.data[d.off:], &d.nextscan) if err != nil { d.error(err) } d.off = len(d.data) - len(rest) // Our scanner has seen the opening brace/bracket // and thinks we're still in the middle of the object. // invent a closing brace/bracket to get it out. if c == '{' { d.scan.step(&d.scan, '}') } else if c == '[' { d.scan.step(&d.scan, ']') } else { // Was inside a function name. Get out of it. d.scan.step(&d.scan, '(') d.scan.step(&d.scan, ')') } return item } // scanWhile processes bytes in d.data[d.off:] until it // receives a scan code not equal to op. // It updates d.off and returns the new scan code. func (d *decodeState) scanWhile(op int) int { var newOp int for { if d.off >= len(d.data) { newOp = d.scan.eof() d.off = len(d.data) + 1 // mark processed EOF with len+1 } else { c := d.data[d.off] d.off++ newOp = d.scan.step(&d.scan, c) } if newOp != op { break } } return newOp } // value decodes a JSON value from d.data[d.off:] into the value. // it updates d.off to point past the decoded value. func (d *decodeState) value(v reflect.Value) { if !v.IsValid() { _, rest, err := nextValue(d.data[d.off:], &d.nextscan) if err != nil { d.error(err) } d.off = len(d.data) - len(rest) // d.scan thinks we're still at the beginning of the item. // Feed in an empty string - the shortest, simplest value - // so that it knows we got to the end of the value. if d.scan.redo { // rewind. d.scan.redo = false d.scan.step = stateBeginValue } d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '"') n := len(d.scan.parseState) if n > 0 && d.scan.parseState[n-1] == parseObjectKey { // d.scan thinks we just read an object key; finish the object d.scan.step(&d.scan, ':') d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '}') } return } switch op := d.scanWhile(scanSkipSpace); op { default: d.error(errPhase) case scanBeginArray: d.array(v) case scanBeginObject: d.object(v) case scanBeginLiteral: d.literal(v) case scanBeginName: d.name(v) } } type unquotedValue struct{} // valueQuoted is like value but decodes a // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. func (d *decodeState) valueQuoted() interface{} { switch op := d.scanWhile(scanSkipSpace); op { default: d.error(errPhase) case scanBeginArray: d.array(reflect.Value{}) case scanBeginObject: d.object(reflect.Value{}) case scanBeginName: switch v := d.nameInterface().(type) { case nil, string: return v } case scanBeginLiteral: switch v := d.literalInterface().(type) { case nil, string: return v } } return unquotedValue{} } // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { v = v.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { v = e continue } } if v.Kind() != reflect.Ptr { break } if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { break } if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(Unmarshaler); ok { return u, nil, v } if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { return nil, u, v } } v = v.Elem() } return nil, nil, v } // array consumes an array from d.data[d.off-1:], decoding into the value v. // the first byte of the array ('[') has been read already. func (d *decodeState) array(v reflect.Value) { // Check for unmarshaler. u, ut, pv := d.indirect(v, false) if u != nil { d.off-- err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } if ut != nil { d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) d.off-- d.next() return } v = pv // Check type of target. switch v.Kind() { case reflect.Interface: if v.NumMethod() == 0 { // Decoding into nil interface? Switch to non-reflect code. v.Set(reflect.ValueOf(d.arrayInterface())) return } // Otherwise it's invalid. fallthrough default: d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) d.off-- d.next() return case reflect.Array: case reflect.Slice: break } i := 0 for { // Look ahead for ] - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndArray { break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) // Get element of array, growing if necessary. if v.Kind() == reflect.Slice { // Grow slice if necessary if i >= v.Cap() { newcap := v.Cap() + v.Cap()/2 if newcap < 4 { newcap = 4 } newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) reflect.Copy(newv, v) v.Set(newv) } if i >= v.Len() { v.SetLen(i + 1) } } if i < v.Len() { // Decode into element. d.value(v.Index(i)) } else { // Ran out of fixed array: skip. d.value(reflect.Value{}) } i++ // Next token must be , or ]. op = d.scanWhile(scanSkipSpace) if op == scanEndArray { break } if op != scanArrayValue { d.error(errPhase) } } if i < v.Len() { if v.Kind() == reflect.Array { // Array. Zero the rest. z := reflect.Zero(v.Type().Elem()) for ; i < v.Len(); i++ { v.Index(i).Set(z) } } else { v.SetLen(i) } } if i == 0 && v.Kind() == reflect.Slice { v.Set(reflect.MakeSlice(v.Type(), 0, 0)) } } var nullLiteral = []byte("null") var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() // object consumes an object from d.data[d.off-1:], decoding into the value v. // the first byte ('{') of the object has been read already. func (d *decodeState) object(v reflect.Value) { // Check for unmarshaler. u, ut, pv := d.indirect(v, false) if d.storeKeyed(pv) { return } if u != nil { d.off-- err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } if ut != nil { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } v = pv // Decoding into nil interface? Switch to non-reflect code. if v.Kind() == reflect.Interface && v.NumMethod() == 0 { v.Set(reflect.ValueOf(d.objectInterface())) return } // Check type of target: // struct or // map[string]T or map[encoding.TextUnmarshaler]T switch v.Kind() { case reflect.Map: // Map key must either have string kind or be an encoding.TextUnmarshaler. t := v.Type() if t.Key().Kind() != reflect.String && !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } if v.IsNil() { v.Set(reflect.MakeMap(t)) } case reflect.Struct: default: d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } var mapElem reflect.Value empty := true for { // Read opening " of string key or closing }. op := d.scanWhile(scanSkipSpace) if op == scanEndObject { if !empty && !d.ext.trailingCommas { d.syntaxError("beginning of object key string") } break } empty = false if op == scanBeginName { if !d.ext.unquotedKeys { d.syntaxError("beginning of object key string") } } else if op != scanBeginLiteral { d.error(errPhase) } unquotedKey := op == scanBeginName // Read key. start := d.off - 1 op = d.scanWhile(scanContinue) item := d.data[start : d.off-1] var key []byte if unquotedKey { key = item // TODO Fix code below to quote item when necessary. } else { var ok bool key, ok = unquoteBytes(item) if !ok { d.error(errPhase) } } // Figure out field corresponding to key. var subv reflect.Value destring := false // whether the value is wrapped in a string to be decoded first if v.Kind() == reflect.Map { elemType := v.Type().Elem() if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { mapElem.Set(reflect.Zero(elemType)) } subv = mapElem } else { var f *field fields := cachedTypeFields(v.Type()) for i := range fields { ff := &fields[i] if bytes.Equal(ff.nameBytes, key) { f = ff break } if f == nil && ff.equalFold(ff.nameBytes, key) { f = ff } } if f != nil { subv = v destring = f.quoted for _, i := range f.index { if subv.Kind() == reflect.Ptr { if subv.IsNil() { subv.Set(reflect.New(subv.Type().Elem())) } subv = subv.Elem() } subv = subv.Field(i) } } } // Read : before value. if op == scanSkipSpace { op = d.scanWhile(scanSkipSpace) } if op != scanObjectKey { d.error(errPhase) } // Read value. if destring { switch qv := d.valueQuoted().(type) { case nil: d.literalStore(nullLiteral, subv, false) case string: d.literalStore([]byte(qv), subv, true) default: d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) } } else { d.value(subv) } // Write value back to map; // if using struct, subv points into struct already. if v.Kind() == reflect.Map { kt := v.Type().Key() var kv reflect.Value switch { case kt.Kind() == reflect.String: kv = reflect.ValueOf(key).Convert(v.Type().Key()) case reflect.PtrTo(kt).Implements(textUnmarshalerType): kv = reflect.New(v.Type().Key()) d.literalStore(item, kv, true) kv = kv.Elem() default: panic("json: Unexpected key type") // should never occur } v.SetMapIndex(kv, subv) } // Next token must be , or }. op = d.scanWhile(scanSkipSpace) if op == scanEndObject { break } if op != scanObjectValue { d.error(errPhase) } } } // isNull returns whether there's a null literal at the provided offset. func (d *decodeState) isNull(off int) bool { if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' { return false } d.nextscan.reset() for i, c := range d.data[off:] { if i > 4 { return false } switch d.nextscan.step(&d.nextscan, c) { case scanContinue, scanBeginName: continue } break } return true } // name consumes a const or function from d.data[d.off-1:], decoding into the value v. // the first byte of the function name has been read already. func (d *decodeState) name(v reflect.Value) { if d.isNull(d.off-1) { d.literal(v) return } // Check for unmarshaler. u, ut, pv := d.indirect(v, false) if d.storeKeyed(pv) { return } if u != nil { d.off-- err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } if ut != nil { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over function in input return } v = pv // Decoding into nil interface? Switch to non-reflect code. if v.Kind() == reflect.Interface && v.NumMethod() == 0 { out := d.nameInterface() if out == nil { v.Set(reflect.Zero(v.Type())) } else { v.Set(reflect.ValueOf(out)) } return } nameStart := d.off - 1 op := d.scanWhile(scanContinue) name := d.data[nameStart : d.off-1] if op != scanParam { // Back up so the byte just read is consumed next. d.off-- d.scan.undo(op) if l, ok := d.convertLiteral(name); ok { d.storeValue(v, l) return } d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) } funcName := string(name) funcData := d.ext.funcs[funcName] if funcData.key == "" { d.error(fmt.Errorf("json: unknown function %q", funcName)) } // Check type of target: // struct or // map[string]T or map[encoding.TextUnmarshaler]T switch v.Kind() { case reflect.Map: // Map key must either have string kind or be an encoding.TextUnmarshaler. t := v.Type() if t.Key().Kind() != reflect.String && !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } if v.IsNil() { v.Set(reflect.MakeMap(t)) } case reflect.Struct: default: d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } // TODO Fix case of func field as map. //topv := v // Figure out field corresponding to function. key := []byte(funcData.key) if v.Kind() == reflect.Map { elemType := v.Type().Elem() v = reflect.New(elemType).Elem() } else { var f *field fields := cachedTypeFields(v.Type()) for i := range fields { ff := &fields[i] if bytes.Equal(ff.nameBytes, key) { f = ff break } if f == nil && ff.equalFold(ff.nameBytes, key) { f = ff } } if f != nil { for _, i := range f.index { if v.Kind() == reflect.Ptr { if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() } v = v.Field(i) } if v.Kind() == reflect.Ptr { if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() } } } // Check for unmarshaler on func field itself. u, ut, pv = d.indirect(v, false) if u != nil { d.off = nameStart err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } var mapElem reflect.Value // Parse function arguments. for i := 0; ; i++ { // closing ) - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndParams { break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) if i >= len(funcData.args) { d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) } key := []byte(funcData.args[i]) // Figure out field corresponding to key. var subv reflect.Value destring := false // whether the value is wrapped in a string to be decoded first if v.Kind() == reflect.Map { elemType := v.Type().Elem() if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { mapElem.Set(reflect.Zero(elemType)) } subv = mapElem } else { var f *field fields := cachedTypeFields(v.Type()) for i := range fields { ff := &fields[i] if bytes.Equal(ff.nameBytes, key) { f = ff break } if f == nil && ff.equalFold(ff.nameBytes, key) { f = ff } } if f != nil { subv = v destring = f.quoted for _, i := range f.index { if subv.Kind() == reflect.Ptr { if subv.IsNil() { subv.Set(reflect.New(subv.Type().Elem())) } subv = subv.Elem() } subv = subv.Field(i) } } } // Read value. if destring { switch qv := d.valueQuoted().(type) { case nil: d.literalStore(nullLiteral, subv, false) case string: d.literalStore([]byte(qv), subv, true) default: d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) } } else { d.value(subv) } // Write value back to map; // if using struct, subv points into struct already. if v.Kind() == reflect.Map { kt := v.Type().Key() var kv reflect.Value switch { case kt.Kind() == reflect.String: kv = reflect.ValueOf(key).Convert(v.Type().Key()) case reflect.PtrTo(kt).Implements(textUnmarshalerType): kv = reflect.New(v.Type().Key()) d.literalStore(key, kv, true) kv = kv.Elem() default: panic("json: Unexpected key type") // should never occur } v.SetMapIndex(kv, subv) } // Next token must be , or ). op = d.scanWhile(scanSkipSpace) if op == scanEndParams { break } if op != scanParam { d.error(errPhase) } } } // keyed attempts to decode an object or function using a keyed doc extension, // and returns the value and true on success, or nil and false otherwise. func (d *decodeState) keyed() (interface{}, bool) { if len(d.ext.keyed) == 0 { return nil, false } unquote := false // Look-ahead first key to check for a keyed document extension. d.nextscan.reset() var start, end int for i, c := range d.data[d.off-1:] { switch op := d.nextscan.step(&d.nextscan, c); op { case scanSkipSpace, scanContinue, scanBeginObject: continue case scanBeginLiteral, scanBeginName: unquote = op == scanBeginLiteral start = i continue } end = i break } name := d.data[d.off-1+start : d.off-1+end] var key []byte var ok bool if unquote { key, ok = unquoteBytes(name) if !ok { d.error(errPhase) } } else { funcData, ok := d.ext.funcs[string(name)] if !ok { return nil, false } key = []byte(funcData.key) } decode, ok := d.ext.keyed[string(key)] if !ok { return nil, false } d.off-- out, err := decode(d.next()) if err != nil { d.error(err) } return out, true } func (d *decodeState) storeKeyed(v reflect.Value) bool { keyed, ok := d.keyed() if !ok { return false } d.storeValue(v, keyed) return true } var ( trueBytes = []byte("true") falseBytes = []byte("false") nullBytes = []byte("null") ) func (d *decodeState) storeValue(v reflect.Value, from interface{}) { switch from { case nil: d.literalStore(nullBytes, v, false) return case true: d.literalStore(trueBytes, v, false) return case false: d.literalStore(falseBytes, v, false) return } fromv := reflect.ValueOf(from) for fromv.Kind() == reflect.Ptr && !fromv.IsNil() { fromv = fromv.Elem() } fromt := fromv.Type() for v.Kind() == reflect.Ptr && !v.IsNil() { v = v.Elem() } vt := v.Type() if fromt.AssignableTo(vt) { v.Set(fromv) } else if fromt.ConvertibleTo(vt) { v.Set(fromv.Convert(vt)) } else { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) } } func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) { if len(name) == 0 { return nil, false } switch name[0] { case 't': if bytes.Equal(name, trueBytes) { return true, true } case 'f': if bytes.Equal(name, falseBytes) { return false, true } case 'n': if bytes.Equal(name, nullBytes) { return nil, true } } if l, ok := d.ext.consts[string(name)]; ok { return l, true } return nil, false } // literal consumes a literal from d.data[d.off-1:], decoding into the value v. // The first byte of the literal has been read already // (that's how the caller knows it's a literal). func (d *decodeState) literal(v reflect.Value) { // All bytes inside literal return scanContinue op code. start := d.off - 1 op := d.scanWhile(scanContinue) // Scan read one byte too far; back up. d.off-- d.scan.undo(op) d.literalStore(d.data[start:d.off], v, false) } // convertNumber converts the number literal s to a float64 or a Number // depending on the setting of d.useNumber. func (d *decodeState) convertNumber(s string) (interface{}, error) { if d.useNumber { return Number(s), nil } f, err := strconv.ParseFloat(s, 64) if err != nil { return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} } return f, nil } var numberType = reflect.TypeOf(Number("")) // literalStore decodes a literal stored in item into v. // // fromQuoted indicates whether this literal came from unwrapping a // string from the ",string" struct tag option. this is used only to // produce more helpful error messages. func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { // Check for unmarshaler. if len(item) == 0 { //Empty string given d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return } wantptr := item[0] == 'n' // null u, ut, pv := d.indirect(v, wantptr) if u != nil { err := u.UnmarshalJSON(item) if err != nil { d.error(err) } return } if ut != nil { if item[0] != '"' { if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) } return } s, ok := unquoteBytes(item) if !ok { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } err := ut.UnmarshalText(s) if err != nil { d.error(err) } return } v = pv switch c := item[0]; c { case 'n': // null switch v.Kind() { case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: v.Set(reflect.Zero(v.Type())) // otherwise, ignore null for primitives/string } case 't', 'f': // true, false value := c == 't' switch v.Kind() { default: if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) } case reflect.Bool: v.SetBool(value) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(value)) } else { d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) } } case '"': // string s, ok := unquoteBytes(item) if !ok { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } switch v.Kind() { default: d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) case reflect.Slice: if v.Type().Elem().Kind() != reflect.Uint8 { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) break } b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) n, err := base64.StdEncoding.Decode(b, s) if err != nil { d.saveError(err) break } v.SetBytes(b[:n]) case reflect.String: v.SetString(string(s)) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(string(s))) } else { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) } } default: // number if c != '-' && (c < '0' || c > '9') { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } s := string(item) switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { v.SetString(s) if !isValidNumber(s) { d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) } break } if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) } case reflect.Interface: n, err := d.convertNumber(s) if err != nil { d.saveError(err) break } if v.NumMethod() != 0 { d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) break } v.Set(reflect.ValueOf(n)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: n, err := strconv.ParseInt(s, 10, 64) if err != nil || v.OverflowInt(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: n, err := strconv.ParseUint(s, 10, 64) if err != nil || v.OverflowUint(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetUint(n) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(s, v.Type().Bits()) if err != nil || v.OverflowFloat(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetFloat(n) } } } // The xxxInterface routines build up a value to be stored // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. // valueInterface is like value but returns interface{} func (d *decodeState) valueInterface() interface{} { switch d.scanWhile(scanSkipSpace) { default: d.error(errPhase) panic("unreachable") case scanBeginArray: return d.arrayInterface() case scanBeginObject: return d.objectInterface() case scanBeginLiteral: return d.literalInterface() case scanBeginName: return d.nameInterface() } } func (d *decodeState) syntaxError(expected string) { msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected) d.error(&SyntaxError{msg, int64(d.off)}) } // arrayInterface is like array but returns []interface{}. func (d *decodeState) arrayInterface() []interface{} { var v = make([]interface{}, 0) for { // Look ahead for ] - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndArray { if len(v) > 0 && !d.ext.trailingCommas { d.syntaxError("beginning of value") } break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) v = append(v, d.valueInterface()) // Next token must be , or ]. op = d.scanWhile(scanSkipSpace) if op == scanEndArray { break } if op != scanArrayValue { d.error(errPhase) } } return v } // objectInterface is like object but returns map[string]interface{}. func (d *decodeState) objectInterface() interface{} { v, ok := d.keyed() if ok { return v } m := make(map[string]interface{}) for { // Read opening " of string key or closing }. op := d.scanWhile(scanSkipSpace) if op == scanEndObject { if len(m) > 0 && !d.ext.trailingCommas { d.syntaxError("beginning of object key string") } break } if op == scanBeginName { if !d.ext.unquotedKeys { d.syntaxError("beginning of object key string") } } else if op != scanBeginLiteral { d.error(errPhase) } unquotedKey := op == scanBeginName // Read string key. start := d.off - 1 op = d.scanWhile(scanContinue) item := d.data[start : d.off-1] var key string if unquotedKey { key = string(item) } else { var ok bool key, ok = unquote(item) if !ok { d.error(errPhase) } } // Read : before value. if op == scanSkipSpace { op = d.scanWhile(scanSkipSpace) } if op != scanObjectKey { d.error(errPhase) } // Read value. m[key] = d.valueInterface() // Next token must be , or }. op = d.scanWhile(scanSkipSpace) if op == scanEndObject { break } if op != scanObjectValue { d.error(errPhase) } } return m } // literalInterface is like literal but returns an interface value. func (d *decodeState) literalInterface() interface{} { // All bytes inside literal return scanContinue op code. start := d.off - 1 op := d.scanWhile(scanContinue) // Scan read one byte too far; back up. d.off-- d.scan.undo(op) item := d.data[start:d.off] switch c := item[0]; c { case 'n': // null return nil case 't', 'f': // true, false return c == 't' case '"': // string s, ok := unquote(item) if !ok { d.error(errPhase) } return s default: // number if c != '-' && (c < '0' || c > '9') { d.error(errPhase) } n, err := d.convertNumber(string(item)) if err != nil { d.saveError(err) } return n } } // nameInterface is like function but returns map[string]interface{}. func (d *decodeState) nameInterface() interface{} { v, ok := d.keyed() if ok { return v } nameStart := d.off - 1 op := d.scanWhile(scanContinue) name := d.data[nameStart : d.off-1] if op != scanParam { // Back up so the byte just read is consumed next. d.off-- d.scan.undo(op) if l, ok := d.convertLiteral(name); ok { return l } d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) } funcName := string(name) funcData := d.ext.funcs[funcName] if funcData.key == "" { d.error(fmt.Errorf("json: unknown function %q", funcName)) } m := make(map[string]interface{}) for i := 0; ; i++ { // Look ahead for ) - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndParams { break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) if i >= len(funcData.args) { d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) } m[funcData.args[i]] = d.valueInterface() // Next token must be , or ). op = d.scanWhile(scanSkipSpace) if op == scanEndParams { break } if op != scanParam { d.error(errPhase) } } return map[string]interface{}{funcData.key: m} } // getu4 decodes \uXXXX from the beginning of s, returning the hex value, // or it returns -1. func getu4(s []byte) rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { return -1 } r, err := strconv.ParseUint(string(s[2:6]), 16, 64) if err != nil { return -1 } return rune(r) } // unquote converts a quoted JSON string literal s into an actual string t. // The rules are different than for Go, so cannot use strconv.Unquote. func unquote(s []byte) (t string, ok bool) { s, ok = unquoteBytes(s) t = string(s) return } func unquoteBytes(s []byte) (t []byte, ok bool) { if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { return } s = s[1 : len(s)-1] // Check for unusual characters. If there are none, // then no unquoting is needed, so return a slice of the // original bytes. r := 0 for r < len(s) { c := s[r] if c == '\\' || c == '"' || c < ' ' { break } if c < utf8.RuneSelf { r++ continue } rr, size := utf8.DecodeRune(s[r:]) if rr == utf8.RuneError && size == 1 { break } r += size } if r == len(s) { return s, true } b := make([]byte, len(s)+2*utf8.UTFMax) w := copy(b, s[0:r]) for r < len(s) { // Out of room? Can only happen if s is full of // malformed UTF-8 and we're replacing each // byte with RuneError. if w >= len(b)-2*utf8.UTFMax { nb := make([]byte, (len(b)+utf8.UTFMax)*2) copy(nb, b[0:w]) b = nb } switch c := s[r]; { case c == '\\': r++ if r >= len(s) { return } switch s[r] { default: return case '"', '\\', '/', '\'': b[w] = s[r] r++ w++ case 'b': b[w] = '\b' r++ w++ case 'f': b[w] = '\f' r++ w++ case 'n': b[w] = '\n' r++ w++ case 'r': b[w] = '\r' r++ w++ case 't': b[w] = '\t' r++ w++ case 'u': r-- rr := getu4(s[r:]) if rr < 0 { return } r += 6 if utf16.IsSurrogate(rr) { rr1 := getu4(s[r:]) if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { // A valid pair; consume. r += 6 w += utf8.EncodeRune(b[w:], dec) break } // Invalid surrogate; fall back to replacement rune. rr = unicode.ReplacementChar } w += utf8.EncodeRune(b[w:], rr) } // Quote, control characters are invalid. case c == '"', c < ' ': return // ASCII case c < utf8.RuneSelf: b[w] = c r++ w++ // Coerce to well-formed UTF-8. default: rr, size := utf8.DecodeRune(s[r:]) r += size w += utf8.EncodeRune(b[w:], rr) } } return b[0:w], true } mgo-r2016.08.01/internal/json/decode_test.go000066400000000000000000001041041274772341400204660ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "bytes" "encoding" "errors" "fmt" "image" "net" "reflect" "strings" "testing" "time" ) type T struct { X string Y int Z int `json:"-"` } type U struct { Alphabet string `json:"alpha"` } type V struct { F1 interface{} F2 int32 F3 Number } // ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and // without UseNumber var ifaceNumAsFloat64 = map[string]interface{}{ "k1": float64(1), "k2": "s", "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, } var ifaceNumAsNumber = map[string]interface{}{ "k1": Number("1"), "k2": "s", "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, } type tx struct { x int } // A type that can unmarshal itself. type unmarshaler struct { T bool } func (u *unmarshaler) UnmarshalJSON(b []byte) error { *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. return nil } type ustruct struct { M unmarshaler } type unmarshalerText struct { A, B string } // needed for re-marshaling tests func (u unmarshalerText) MarshalText() ([]byte, error) { return []byte(u.A + ":" + u.B), nil } func (u *unmarshalerText) UnmarshalText(b []byte) error { pos := bytes.Index(b, []byte(":")) if pos == -1 { return errors.New("missing separator") } u.A, u.B = string(b[:pos]), string(b[pos+1:]) return nil } var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) type ustructText struct { M unmarshalerText } var ( um0, um1 unmarshaler // target2 of unmarshaling ump = &um1 umtrue = unmarshaler{true} umslice = []unmarshaler{{true}} umslicep = new([]unmarshaler) umstruct = ustruct{unmarshaler{true}} um0T, um1T unmarshalerText // target2 of unmarshaling umpType = &um1T umtrueXY = unmarshalerText{"x", "y"} umsliceXY = []unmarshalerText{{"x", "y"}} umslicepType = new([]unmarshalerText) umstructType = new(ustructText) umstructXY = ustructText{unmarshalerText{"x", "y"}} ummapType = map[unmarshalerText]bool{} ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true} ) // Test data structures for anonymous fields. type Point struct { Z int } type Top struct { Level0 int Embed0 *Embed0a *Embed0b `json:"e,omitempty"` // treated as named Embed0c `json:"-"` // ignored Loop Embed0p // has Point with X, Y, used Embed0q // has Point with Z, used embed // contains exported field } type Embed0 struct { Level1a int // overridden by Embed0a's Level1a with json tag Level1b int // used because Embed0a's Level1b is renamed Level1c int // used because Embed0a's Level1c is ignored Level1d int // annihilated by Embed0a's Level1d Level1e int `json:"x"` // annihilated by Embed0a.Level1e } type Embed0a struct { Level1a int `json:"Level1a,omitempty"` Level1b int `json:"LEVEL1B,omitempty"` Level1c int `json:"-"` Level1d int // annihilated by Embed0's Level1d Level1f int `json:"x"` // annihilated by Embed0's Level1e } type Embed0b Embed0 type Embed0c Embed0 type Embed0p struct { image.Point } type Embed0q struct { Point } type embed struct { Q int } type Loop struct { Loop1 int `json:",omitempty"` Loop2 int `json:",omitempty"` *Loop } // From reflect test: // The X in S6 and S7 annihilate, but they also block the X in S8.S9. type S5 struct { S6 S7 S8 } type S6 struct { X int } type S7 S6 type S8 struct { S9 } type S9 struct { X int Y int } // From reflect test: // The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. type S10 struct { S11 S12 S13 } type S11 struct { S6 } type S12 struct { S6 } type S13 struct { S8 } type unmarshalTest struct { in string ptr interface{} out interface{} err error useNumber bool } type Ambig struct { // Given "hello", the first match should win. First int `json:"HELLO"` Second int `json:"Hello"` } type XYZ struct { X interface{} Y interface{} Z interface{} } func sliceAddr(x []int) *[]int { return &x } func mapAddr(x map[string]int) *map[string]int { return &x } var unmarshalTests = []unmarshalTest{ // basic types {in: `true`, ptr: new(bool), out: true}, {in: `1`, ptr: new(int), out: 1}, {in: `1.2`, ptr: new(float64), out: 1.2}, {in: `-5`, ptr: new(int16), out: int16(-5)}, {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, {in: `2`, ptr: new(Number), out: Number("2")}, {in: `2`, ptr: new(interface{}), out: float64(2.0)}, {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, {in: "null", ptr: new(interface{}), out: nil}, {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, // raw values with whitespace {in: "\n true ", ptr: new(bool), out: true}, {in: "\t 1 ", ptr: new(int), out: 1}, {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, // Z has a "-" tag. {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, // syntax errors {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, // raw value errors {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, // array tests {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, // empty array to interface test {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, // composite tests {in: allValueIndent, ptr: new(All), out: allValue}, {in: allValueCompact, ptr: new(All), out: allValue}, {in: allValueIndent, ptr: new(*All), out: &allValue}, {in: allValueCompact, ptr: new(*All), out: &allValue}, {in: pallValueIndent, ptr: new(All), out: pallValue}, {in: pallValueCompact, ptr: new(All), out: pallValue}, {in: pallValueIndent, ptr: new(*All), out: &pallValue}, {in: pallValueCompact, ptr: new(*All), out: &pallValue}, // unmarshal interface test {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called {in: `{"T":false}`, ptr: &ump, out: &umtrue}, {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, {in: `{"M":{"T":"x:y"}}`, ptr: &umstruct, out: umstruct}, // UnmarshalText interface test {in: `"x:y"`, ptr: &um0T, out: umtrueXY}, {in: `"x:y"`, ptr: &umpType, out: &umtrueXY}, {in: `["x:y"]`, ptr: &umsliceXY, out: umsliceXY}, {in: `["x:y"]`, ptr: &umslicepType, out: &umsliceXY}, {in: `{"M":"x:y"}`, ptr: umstructType, out: umstructXY}, // Map keys can be encoding.TextUnmarshalers {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY}, // If multiple values for the same key exists, only the most recent value is used. {in: `{"x:y":false,"x:y":true}`, ptr: &ummapType, out: ummapXY}, // Overwriting of data. // This is different from package xml, but it's what we've always done. // Now documented and tested. {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, { in: `{ "Level0": 1, "Level1b": 2, "Level1c": 3, "x": 4, "Level1a": 5, "LEVEL1B": 6, "e": { "Level1a": 8, "Level1b": 9, "Level1c": 10, "Level1d": 11, "x": 12 }, "Loop1": 13, "Loop2": 14, "X": 15, "Y": 16, "Z": 17, "Q": 18 }`, ptr: new(Top), out: Top{ Level0: 1, Embed0: Embed0{ Level1b: 2, Level1c: 3, }, Embed0a: &Embed0a{ Level1a: 5, Level1b: 6, }, Embed0b: &Embed0b{ Level1a: 8, Level1b: 9, Level1c: 10, Level1d: 11, Level1e: 12, }, Loop: Loop{ Loop1: 13, Loop2: 14, }, Embed0p: Embed0p{ Point: image.Point{X: 15, Y: 16}, }, Embed0q: Embed0q{ Point: Point{Z: 17}, }, embed: embed{ Q: 18, }, }, }, { in: `{"hello": 1}`, ptr: new(Ambig), out: Ambig{First: 1}, }, { in: `{"X": 1,"Y":2}`, ptr: new(S5), out: S5{S8: S8{S9: S9{Y: 2}}}, }, { in: `{"X": 1,"Y":2}`, ptr: new(S10), out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, }, // invalid UTF-8 is coerced to valid UTF-8. { in: "\"hello\xffworld\"", ptr: new(string), out: "hello\ufffdworld", }, { in: "\"hello\xc2\xc2world\"", ptr: new(string), out: "hello\ufffd\ufffdworld", }, { in: "\"hello\xc2\xffworld\"", ptr: new(string), out: "hello\ufffd\ufffdworld", }, { in: "\"hello\\ud800world\"", ptr: new(string), out: "hello\ufffdworld", }, { in: "\"hello\\ud800\\ud800world\"", ptr: new(string), out: "hello\ufffd\ufffdworld", }, { in: "\"hello\\ud800\\ud800world\"", ptr: new(string), out: "hello\ufffd\ufffdworld", }, { in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", ptr: new(string), out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", }, // Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now. { in: `{"2009-11-10T23:00:00Z": "hello world"}`, ptr: &map[time.Time]string{}, out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"}, }, // issue 8305 { in: `{"2009-11-10T23:00:00Z": "hello world"}`, ptr: &map[Point]string{}, err: &UnmarshalTypeError{"object", reflect.TypeOf(map[Point]string{}), 1}, }, { in: `{"asdf": "hello world"}`, ptr: &map[unmarshaler]string{}, err: &UnmarshalTypeError{"object", reflect.TypeOf(map[unmarshaler]string{}), 1}, }, } func TestMarshal(t *testing.T) { b, err := Marshal(allValue) if err != nil { t.Fatalf("Marshal allValue: %v", err) } if string(b) != allValueCompact { t.Errorf("Marshal allValueCompact") diff(t, b, []byte(allValueCompact)) return } b, err = Marshal(pallValue) if err != nil { t.Fatalf("Marshal pallValue: %v", err) } if string(b) != pallValueCompact { t.Errorf("Marshal pallValueCompact") diff(t, b, []byte(pallValueCompact)) return } } var badUTF8 = []struct { in, out string }{ {"hello\xffworld", `"hello\ufffdworld"`}, {"", `""`}, {"\xff", `"\ufffd"`}, {"\xff\xff", `"\ufffd\ufffd"`}, {"a\xffb", `"a\ufffdb"`}, {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, } func TestMarshalBadUTF8(t *testing.T) { for _, tt := range badUTF8 { b, err := Marshal(tt.in) if string(b) != tt.out || err != nil { t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) } } } func TestMarshalNumberZeroVal(t *testing.T) { var n Number out, err := Marshal(n) if err != nil { t.Fatal(err) } outStr := string(out) if outStr != "0" { t.Fatalf("Invalid zero val for Number: %q", outStr) } } func TestMarshalEmbeds(t *testing.T) { top := &Top{ Level0: 1, Embed0: Embed0{ Level1b: 2, Level1c: 3, }, Embed0a: &Embed0a{ Level1a: 5, Level1b: 6, }, Embed0b: &Embed0b{ Level1a: 8, Level1b: 9, Level1c: 10, Level1d: 11, Level1e: 12, }, Loop: Loop{ Loop1: 13, Loop2: 14, }, Embed0p: Embed0p{ Point: image.Point{X: 15, Y: 16}, }, Embed0q: Embed0q{ Point: Point{Z: 17}, }, embed: embed{ Q: 18, }, } b, err := Marshal(top) if err != nil { t.Fatal(err) } want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" if string(b) != want { t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) } } func TestUnmarshal(t *testing.T) { for i, tt := range unmarshalTests { var scan scanner in := []byte(tt.in) if err := checkValid(in, &scan); err != nil { if !reflect.DeepEqual(err, tt.err) { t.Errorf("#%d: checkValid: %#v", i, err) continue } } if tt.ptr == nil { continue } // v = new(right-type) v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) dec := NewDecoder(bytes.NewReader(in)) if tt.useNumber { dec.UseNumber() } if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { t.Errorf("#%d: %v, want %v", i, err, tt.err) continue } else if err != nil { continue } if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) data, _ := Marshal(v.Elem().Interface()) println(string(data)) data, _ = Marshal(tt.out) println(string(data)) continue } // Check round trip. if tt.err == nil { enc, err := Marshal(v.Interface()) if err != nil { t.Errorf("#%d: error re-marshaling: %v", i, err) continue } vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) dec = NewDecoder(bytes.NewReader(enc)) if tt.useNumber { dec.UseNumber() } if err := dec.Decode(vv.Interface()); err != nil { t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) continue } if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) t.Errorf(" In: %q", strings.Map(noSpace, string(in))) t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) continue } } } } func TestUnmarshalMarshal(t *testing.T) { initBig() var v interface{} if err := Unmarshal(jsonBig, &v); err != nil { t.Fatalf("Unmarshal: %v", err) } b, err := Marshal(v) if err != nil { t.Fatalf("Marshal: %v", err) } if !bytes.Equal(jsonBig, b) { t.Errorf("Marshal jsonBig") diff(t, b, jsonBig) return } } var numberTests = []struct { in string i int64 intErr string f float64 floatErr string }{ {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, {in: "-12", i: -12, f: -12.0}, {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, } // Independent of Decode, basic coverage of the accessors in Number func TestNumberAccessors(t *testing.T) { for _, tt := range numberTests { n := Number(tt.in) if s := n.String(); s != tt.in { t.Errorf("Number(%q).String() is %q", tt.in, s) } if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { t.Errorf("Number(%q).Int64() is %d", tt.in, i) } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) } if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { t.Errorf("Number(%q).Float64() is %g", tt.in, f) } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) } } } func TestLargeByteSlice(t *testing.T) { s0 := make([]byte, 2000) for i := range s0 { s0[i] = byte(i) } b, err := Marshal(s0) if err != nil { t.Fatalf("Marshal: %v", err) } var s1 []byte if err := Unmarshal(b, &s1); err != nil { t.Fatalf("Unmarshal: %v", err) } if !bytes.Equal(s0, s1) { t.Errorf("Marshal large byte slice") diff(t, s0, s1) } } type Xint struct { X int } func TestUnmarshalInterface(t *testing.T) { var xint Xint var i interface{} = &xint if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { t.Fatalf("Unmarshal: %v", err) } if xint.X != 1 { t.Fatalf("Did not write to xint") } } func TestUnmarshalPtrPtr(t *testing.T) { var xint Xint pxint := &xint if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { t.Fatalf("Unmarshal: %v", err) } if xint.X != 1 { t.Fatalf("Did not write to xint") } } func TestEscape(t *testing.T) { const input = `"foobar"` + " [\u2028 \u2029]" const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` b, err := Marshal(input) if err != nil { t.Fatalf("Marshal error: %v", err) } if s := string(b); s != expected { t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) } } // WrongString is a struct that's misusing the ,string modifier. type WrongString struct { Message string `json:"result,string"` } type wrongStringTest struct { in, err string } var wrongStringTests = []wrongStringTest{ {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, } // If people misuse the ,string modifier, the error message should be // helpful, telling the user that they're doing it wrong. func TestErrorMessageFromMisusedString(t *testing.T) { for n, tt := range wrongStringTests { r := strings.NewReader(tt.in) var s WrongString err := NewDecoder(r).Decode(&s) got := fmt.Sprintf("%v", err) if got != tt.err { t.Errorf("%d. got err = %q, want %q", n, got, tt.err) } } } func noSpace(c rune) rune { if isSpace(byte(c)) { //only used for ascii return -1 } return c } type All struct { Bool bool Int int Int8 int8 Int16 int16 Int32 int32 Int64 int64 Uint uint Uint8 uint8 Uint16 uint16 Uint32 uint32 Uint64 uint64 Uintptr uintptr Float32 float32 Float64 float64 Foo string `json:"bar"` Foo2 string `json:"bar2,dummyopt"` IntStr int64 `json:",string"` PBool *bool PInt *int PInt8 *int8 PInt16 *int16 PInt32 *int32 PInt64 *int64 PUint *uint PUint8 *uint8 PUint16 *uint16 PUint32 *uint32 PUint64 *uint64 PUintptr *uintptr PFloat32 *float32 PFloat64 *float64 String string PString *string Map map[string]Small MapP map[string]*Small PMap *map[string]Small PMapP *map[string]*Small EmptyMap map[string]Small NilMap map[string]Small Slice []Small SliceP []*Small PSlice *[]Small PSliceP *[]*Small EmptySlice []Small NilSlice []Small StringSlice []string ByteSlice []byte Small Small PSmall *Small PPSmall **Small Interface interface{} PInterface *interface{} unexported int } type Small struct { Tag string } var allValue = All{ Bool: true, Int: 2, Int8: 3, Int16: 4, Int32: 5, Int64: 6, Uint: 7, Uint8: 8, Uint16: 9, Uint32: 10, Uint64: 11, Uintptr: 12, Float32: 14.1, Float64: 15.1, Foo: "foo", Foo2: "foo2", IntStr: 42, String: "16", Map: map[string]Small{ "17": {Tag: "tag17"}, "18": {Tag: "tag18"}, }, MapP: map[string]*Small{ "19": {Tag: "tag19"}, "20": nil, }, EmptyMap: map[string]Small{}, Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, EmptySlice: []Small{}, StringSlice: []string{"str24", "str25", "str26"}, ByteSlice: []byte{27, 28, 29}, Small: Small{Tag: "tag30"}, PSmall: &Small{Tag: "tag31"}, Interface: 5.2, } var pallValue = All{ PBool: &allValue.Bool, PInt: &allValue.Int, PInt8: &allValue.Int8, PInt16: &allValue.Int16, PInt32: &allValue.Int32, PInt64: &allValue.Int64, PUint: &allValue.Uint, PUint8: &allValue.Uint8, PUint16: &allValue.Uint16, PUint32: &allValue.Uint32, PUint64: &allValue.Uint64, PUintptr: &allValue.Uintptr, PFloat32: &allValue.Float32, PFloat64: &allValue.Float64, PString: &allValue.String, PMap: &allValue.Map, PMapP: &allValue.MapP, PSlice: &allValue.Slice, PSliceP: &allValue.SliceP, PPSmall: &allValue.PSmall, PInterface: &allValue.Interface, } var allValueIndent = `{ "Bool": true, "Int": 2, "Int8": 3, "Int16": 4, "Int32": 5, "Int64": 6, "Uint": 7, "Uint8": 8, "Uint16": 9, "Uint32": 10, "Uint64": 11, "Uintptr": 12, "Float32": 14.1, "Float64": 15.1, "bar": "foo", "bar2": "foo2", "IntStr": "42", "PBool": null, "PInt": null, "PInt8": null, "PInt16": null, "PInt32": null, "PInt64": null, "PUint": null, "PUint8": null, "PUint16": null, "PUint32": null, "PUint64": null, "PUintptr": null, "PFloat32": null, "PFloat64": null, "String": "16", "PString": null, "Map": { "17": { "Tag": "tag17" }, "18": { "Tag": "tag18" } }, "MapP": { "19": { "Tag": "tag19" }, "20": null }, "PMap": null, "PMapP": null, "EmptyMap": {}, "NilMap": null, "Slice": [ { "Tag": "tag20" }, { "Tag": "tag21" } ], "SliceP": [ { "Tag": "tag22" }, null, { "Tag": "tag23" } ], "PSlice": null, "PSliceP": null, "EmptySlice": [], "NilSlice": null, "StringSlice": [ "str24", "str25", "str26" ], "ByteSlice": "Gxwd", "Small": { "Tag": "tag30" }, "PSmall": { "Tag": "tag31" }, "PPSmall": null, "Interface": 5.2, "PInterface": null }` var allValueCompact = strings.Map(noSpace, allValueIndent) var pallValueIndent = `{ "Bool": false, "Int": 0, "Int8": 0, "Int16": 0, "Int32": 0, "Int64": 0, "Uint": 0, "Uint8": 0, "Uint16": 0, "Uint32": 0, "Uint64": 0, "Uintptr": 0, "Float32": 0, "Float64": 0, "bar": "", "bar2": "", "IntStr": "0", "PBool": true, "PInt": 2, "PInt8": 3, "PInt16": 4, "PInt32": 5, "PInt64": 6, "PUint": 7, "PUint8": 8, "PUint16": 9, "PUint32": 10, "PUint64": 11, "PUintptr": 12, "PFloat32": 14.1, "PFloat64": 15.1, "String": "", "PString": "16", "Map": null, "MapP": null, "PMap": { "17": { "Tag": "tag17" }, "18": { "Tag": "tag18" } }, "PMapP": { "19": { "Tag": "tag19" }, "20": null }, "EmptyMap": null, "NilMap": null, "Slice": null, "SliceP": null, "PSlice": [ { "Tag": "tag20" }, { "Tag": "tag21" } ], "PSliceP": [ { "Tag": "tag22" }, null, { "Tag": "tag23" } ], "EmptySlice": null, "NilSlice": null, "StringSlice": null, "ByteSlice": null, "Small": { "Tag": "" }, "PSmall": null, "PPSmall": { "Tag": "tag31" }, "Interface": null, "PInterface": 5.2 }` var pallValueCompact = strings.Map(noSpace, pallValueIndent) func TestRefUnmarshal(t *testing.T) { type S struct { // Ref is defined in encode_test.go. R0 Ref R1 *Ref R2 RefText R3 *RefText } want := S{ R0: 12, R1: new(Ref), R2: 13, R3: new(RefText), } *want.R1 = 12 *want.R3 = 13 var got S if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { t.Fatalf("Unmarshal: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } // Test that the empty string doesn't panic decoding when ,string is specified // Issue 3450 func TestEmptyString(t *testing.T) { type T2 struct { Number1 int `json:",string"` Number2 int `json:",string"` } data := `{"Number1":"1", "Number2":""}` dec := NewDecoder(strings.NewReader(data)) var t2 T2 err := dec.Decode(&t2) if err == nil { t.Fatal("Decode: did not return error") } if t2.Number1 != 1 { t.Fatal("Decode: did not set Number1") } } // Test that a null for ,string is not replaced with the previous quoted string (issue 7046). // It should also not be an error (issue 2540, issue 8587). func TestNullString(t *testing.T) { type T struct { A int `json:",string"` B int `json:",string"` C *int `json:",string"` } data := []byte(`{"A": "1", "B": null, "C": null}`) var s T s.B = 1 s.C = new(int) *s.C = 2 err := Unmarshal(data, &s) if err != nil { t.Fatalf("Unmarshal: %v", err) } if s.B != 1 || s.C != nil { t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) } } func intp(x int) *int { p := new(int) *p = x return p } func intpp(x *int) **int { pp := new(*int) *pp = x return pp } var interfaceSetTests = []struct { pre interface{} json string post interface{} }{ {"foo", `"bar"`, "bar"}, {"foo", `2`, 2.0}, {"foo", `true`, true}, {"foo", `null`, nil}, {nil, `null`, nil}, {new(int), `null`, nil}, {(*int)(nil), `null`, nil}, {new(*int), `null`, new(*int)}, {(**int)(nil), `null`, nil}, {intp(1), `null`, nil}, {intpp(nil), `null`, intpp(nil)}, {intpp(intp(1)), `null`, intpp(nil)}, } func TestInterfaceSet(t *testing.T) { for _, tt := range interfaceSetTests { b := struct{ X interface{} }{tt.pre} blob := `{"X":` + tt.json + `}` if err := Unmarshal([]byte(blob), &b); err != nil { t.Errorf("Unmarshal %#q: %v", blob, err) continue } if !reflect.DeepEqual(b.X, tt.post) { t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) } } } // JSON null values should be ignored for primitives and string values instead of resulting in an error. // Issue 2540 func TestUnmarshalNulls(t *testing.T) { jsonData := []byte(`{ "Bool" : null, "Int" : null, "Int8" : null, "Int16" : null, "Int32" : null, "Int64" : null, "Uint" : null, "Uint8" : null, "Uint16" : null, "Uint32" : null, "Uint64" : null, "Float32" : null, "Float64" : null, "String" : null}`) nulls := All{ Bool: true, Int: 2, Int8: 3, Int16: 4, Int32: 5, Int64: 6, Uint: 7, Uint8: 8, Uint16: 9, Uint32: 10, Uint64: 11, Float32: 12.1, Float64: 13.1, String: "14"} err := Unmarshal(jsonData, &nulls) if err != nil { t.Errorf("Unmarshal of null values failed: %v", err) } if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { t.Errorf("Unmarshal of null values affected primitives") } } func TestStringKind(t *testing.T) { type stringKind string var m1, m2 map[stringKind]int m1 = map[stringKind]int{ "foo": 42, } data, err := Marshal(m1) if err != nil { t.Errorf("Unexpected error marshaling: %v", err) } err = Unmarshal(data, &m2) if err != nil { t.Errorf("Unexpected error unmarshaling: %v", err) } if !reflect.DeepEqual(m1, m2) { t.Error("Items should be equal after encoding and then decoding") } } // Custom types with []byte as underlying type could not be marshalled // and then unmarshalled. // Issue 8962. func TestByteKind(t *testing.T) { type byteKind []byte a := byteKind("hello") data, err := Marshal(a) if err != nil { t.Error(err) } var b byteKind err = Unmarshal(data, &b) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a, b) { t.Errorf("expected %v == %v", a, b) } } // The fix for issue 8962 introduced a regression. // Issue 12921. func TestSliceOfCustomByte(t *testing.T) { type Uint8 uint8 a := []Uint8("hello") data, err := Marshal(a) if err != nil { t.Fatal(err) } var b []Uint8 err = Unmarshal(data, &b) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a, b) { t.Fatalf("expected %v == %v", a, b) } } var decodeTypeErrorTests = []struct { dest interface{} src string }{ {new(string), `{"user": "name"}`}, // issue 4628. {new(error), `{}`}, // issue 4222 {new(error), `[]`}, {new(error), `""`}, {new(error), `123`}, {new(error), `true`}, } func TestUnmarshalTypeError(t *testing.T) { for _, item := range decodeTypeErrorTests { err := Unmarshal([]byte(item.src), item.dest) if _, ok := err.(*UnmarshalTypeError); !ok { t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", item.src, item.dest, err) } } } var unmarshalSyntaxTests = []string{ "tru", "fals", "nul", "123e", `"hello`, `[1,2,3`, `{"key":1`, `{"key":1,`, } func TestUnmarshalSyntax(t *testing.T) { var x interface{} for _, src := range unmarshalSyntaxTests { err := Unmarshal([]byte(src), &x) if _, ok := err.(*SyntaxError); !ok { t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) } } } // Test handling of unexported fields that should be ignored. // Issue 4660 type unexportedFields struct { Name string m map[string]interface{} `json:"-"` m2 map[string]interface{} `json:"abcd"` } func TestUnmarshalUnexported(t *testing.T) { input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` want := &unexportedFields{Name: "Bob"} out := &unexportedFields{} err := Unmarshal([]byte(input), out) if err != nil { t.Errorf("got error %v, expected nil", err) } if !reflect.DeepEqual(out, want) { t.Errorf("got %q, want %q", out, want) } } // Time3339 is a time.Time which encodes to and from JSON // as an RFC 3339 time in UTC. type Time3339 time.Time func (t *Time3339) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) } tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) if err != nil { return err } *t = Time3339(tm) return nil } func TestUnmarshalJSONLiteralError(t *testing.T) { var t3 Time3339 err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) if err == nil { t.Fatalf("expected error; got time %v", time.Time(t3)) } if !strings.Contains(err.Error(), "range") { t.Errorf("got err = %v; want out of range error", err) } } // Test that extra object elements in an array do not result in a // "data changing underfoot" error. // Issue 3717 func TestSkipArrayObjects(t *testing.T) { json := `[{}]` var dest [0]interface{} err := Unmarshal([]byte(json), &dest) if err != nil { t.Errorf("got error %q, want nil", err) } } // Test semantics of pre-filled struct fields and pre-filled map fields. // Issue 4900. func TestPrefilled(t *testing.T) { ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } // Values here change, cannot reuse table across runs. var prefillTests = []struct { in string ptr interface{} out interface{} }{ { in: `{"X": 1, "Y": 2}`, ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, }, { in: `{"X": 1, "Y": 2}`, ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), }, } for _, tt := range prefillTests { ptrstr := fmt.Sprintf("%v", tt.ptr) err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here if err != nil { t.Errorf("Unmarshal: %v", err) } if !reflect.DeepEqual(tt.ptr, tt.out) { t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) } } } var invalidUnmarshalTests = []struct { v interface{} want string }{ {nil, "json: Unmarshal(nil)"}, {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, {(*int)(nil), "json: Unmarshal(nil *int)"}, } func TestInvalidUnmarshal(t *testing.T) { buf := []byte(`{"a":"1"}`) for _, tt := range invalidUnmarshalTests { err := Unmarshal(buf, tt.v) if err == nil { t.Errorf("Unmarshal expecting error, got nil") continue } if got := err.Error(); got != tt.want { t.Errorf("Unmarshal = %q; want %q", got, tt.want) } } } var invalidUnmarshalTextTests = []struct { v interface{} want string }{ {nil, "json: Unmarshal(nil)"}, {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, {(*int)(nil), "json: Unmarshal(nil *int)"}, {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, } func TestInvalidUnmarshalText(t *testing.T) { buf := []byte(`123`) for _, tt := range invalidUnmarshalTextTests { err := Unmarshal(buf, tt.v) if err == nil { t.Errorf("Unmarshal expecting error, got nil") continue } if got := err.Error(); got != tt.want { t.Errorf("Unmarshal = %q; want %q", got, tt.want) } } } // Test that string option is ignored for invalid types. // Issue 9812. func TestInvalidStringOption(t *testing.T) { num := 0 item := struct { T time.Time `json:",string"` M map[string]string `json:",string"` S []string `json:",string"` A [1]string `json:",string"` I interface{} `json:",string"` P *int `json:",string"` }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} data, err := Marshal(item) if err != nil { t.Fatalf("Marshal: %v", err) } err = Unmarshal(data, &item) if err != nil { t.Fatalf("Unmarshal: %v", err) } } mgo-r2016.08.01/internal/json/encode.go000066400000000000000000001010111274772341400174330ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package json implements encoding and decoding of JSON as defined in // RFC 4627. The mapping between JSON and Go values is described // in the documentation for the Marshal and Unmarshal functions. // // See "JSON and Go" for an introduction to this package: // https://golang.org/doc/articles/json_and_go.html package json import ( "bytes" "encoding" "encoding/base64" "fmt" "math" "reflect" "runtime" "sort" "strconv" "strings" "sync" "unicode" "unicode/utf8" ) // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. // If an encountered value implements the Marshaler interface // and is not a nil pointer, Marshal calls its MarshalJSON method // to produce JSON. If no MarshalJSON method is present but the // value implements encoding.TextMarshaler instead, Marshal calls // its MarshalText method. // The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of // UnmarshalJSON. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // // Floating point, integer, and Number values encode as JSON numbers. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" // to keep some browsers from misinterpreting JSON output as HTML. // Ampersand "&" is also escaped to "\u0026" for the same reason. // This escaping can be disabled using an Encoder with DisableHTMLEscaping. // // Array and slice values encode as JSON arrays, except that // []byte encodes as a base64-encoded string, and a nil slice // encodes as the null JSON value. // // Struct values encode as JSON objects. Each exported struct field // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name // but can be specified in the struct field's tag value. The "json" key in // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // // // Field is ignored by this package. // Field int `json:"-"` // // // Field appears in JSON as key "myName". // Field int `json:"myName"` // // // Field appears in JSON as key "myName" and // // the field is omitted from the object if its value is empty, // // as defined above. // Field int `json:"myName,omitempty"` // // // Field appears in JSON as key "Field" (the default), but // // the field is skipped if empty. // // Note the leading comma. // Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // // Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, // underscores and slashes. // // Anonymous struct fields are usually marshaled as if their inner exported fields // were fields in the outer struct, subject to the usual Go visibility rules amended // as described in the next paragraph. // An anonymous struct field with a name given in its JSON tag is treated as // having that name, rather than being anonymous. // An anonymous struct field of interface type is treated the same as having // that type as its name, rather than being anonymous. // // The Go visibility rules for struct fields are amended for JSON when // deciding which field to marshal or unmarshal. If there are // multiple fields at the same level, and that level is the least // nested (and would therefore be the nesting level selected by the // usual Go rules), the following extra rules apply: // // 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, // even if there are multiple untagged fields that would otherwise conflict. // 2) If there is exactly one field (tagged or not according to the first rule), that is selected. // 3) Otherwise there are multiple fields, and all are ignored; no error occurs. // // Handling of anonymous struct fields is new in Go 1.1. // Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of // an anonymous struct field in both current and earlier versions, give the field // a JSON tag of "-". // // Map values encode as JSON objects. The map's key type must either be a string // or implement encoding.TextMarshaler. The map keys are used as JSON object // keys, subject to the UTF-8 coercion described for string values above. // // Pointer values encode as the value pointed to. // A nil pointer encodes as the null JSON value. // // Interface values encode as the value contained in the interface. // A nil interface value encodes as the null JSON value. // // Channel, complex, and function values cannot be encoded in JSON. // Attempting to encode such a value causes Marshal to return // an UnsupportedTypeError. // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. // func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v, encOpts{escapeHTML: true}) if err != nil { return nil, err } return e.Bytes(), nil } // MarshalIndent is like Marshal but applies Indent to format the output. func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { b, err := Marshal(v) if err != nil { return nil, err } var buf bytes.Buffer err = Indent(&buf, b, prefix, indent) if err != nil { return nil, err } return buf.Bytes(), nil } // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 // so that the JSON will be safe to embed inside HTML