package/package.json000644 0000003507 3560116604 011553 0ustar00000000 000000 { "name": "mongodb-core", "version": "3.2.7", "description": "Core MongoDB driver functionality, no bells and whistles and meant for integration not end applications", "main": "index.js", "files": [ "index.js", "lib" ], "scripts": { "test": "npm run lint && mongodb-test-runner -t 60000 test/tests", "coverage": "node_modules/.bin/nyc node test/runner.js -t functional -l && node_modules/.bin/nyc report --reporter=text-lcov | node_modules/.bin/coveralls", "lint": "eslint index.js lib test", "format": "prettier --print-width 100 --tab-width 2 --single-quote --write index.js test/**/*.js lib/**/*.js", "changelog": "conventional-changelog -p angular -i HISTORY.md -s", "atlas": "node ./test/atlas.js", "release": "standard-version -i HISTORY.md" }, "repository": { "type": "git", "url": "git://github.com/mongodb-js/mongodb-core.git" }, "keywords": [ "mongodb", "core" ], "dependencies": { "bson": "^1.1.1", "require_optional": "^1.0.1", "safe-buffer": "^5.1.2" }, "devDependencies": { "chai": "^4.2.0", "chai-subset": "^1.6.0", "co": "^4.6.0", "eslint": "^4.6.1", "eslint-plugin-prettier": "^2.2.0", "jsdoc": "3.5.4", "mongodb-extjson": "^2.1.2", "mongodb-mock-server": "^1.0.1", "mongodb-test-runner": "^1.3.4", "prettier": "~1.12.0", "sinon": "^6.0.0", "snappy": "^6.1.1", "standard-version": "^4.4.0" }, "peerOptionalDependencies": { "kerberos": "^1.0.0", "mongodb-extjson": "^2.1.2", "snappy": "^6.1.1", "bson-ext": "^2.0.0" }, "author": "Christian Kvalheim", "license": "Apache-2.0", "bugs": { "url": "https://github.com/mongodb-js/mongodb-core/issues" }, "homepage": "https://github.com/mongodb-js/mongodb-core", "optionalDependencies": { "saslprep": "^1.0.0" } } package/HISTORY.md000644 0000177763 3560116604 010770 0ustar00000000 000000 # Change Log All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. ## [3.2.7](https://github.com/mongodb-js/mongodb-core/compare/v3.2.6...v3.2.7) (2019-06-04) ### Bug Fixes * **pool:** clean up connections if pool is destroyed mid-handshake ([4bd7f1c](https://github.com/mongodb-js/mongodb-core/commit/4bd7f1c)) * **server:** emit "first connect" error if initial connect fails due to ECONNREFUSED ([853bcfe](https://github.com/mongodb-js/mongodb-core/commit/853bcfe)) ## [3.2.6](https://github.com/mongodb-js/mongodb-core/compare/v3.2.4...v3.2.6) (2019-05-23) ### Bug Fixes * **connect:** fixed syntax issue in connect error handler ([83e224b](https://github.com/mongodb-js/mongodb-core/commit/83e224b)) * **uri-parser:** always treat appname as a string ([238aca8](https://github.com/mongodb-js/mongodb-core/commit/238aca8)) ## [3.2.5](https://github.com/mongodb-js/mongodb-core/compare/v3.2.3...v3.2.5) (2019-05-17) ### Bug Fixes * **connect:** fixed syntax issue in connect error handler ([83e224b](https://github.com/mongodb-js/mongodb-core/commit/83e224b)) ## [3.2.4](https://github.com/mongodb-js/mongodb-core/compare/v3.2.3...v3.2.4) (2019-05-08) ### Bug Fixes * **authentication:** fix auth issue with BSON promoteBuffers: true ([bcb87ca](https://github.com/mongodb-js/mongodb-core/commit/bcb87ca)) * **command_result:** return entire result in toJSON ([b958513](https://github.com/mongodb-js/mongodb-core/commit/b958513)) * **connect:** ensure connection errors are MongoNetworkErrors ([380a386](https://github.com/mongodb-js/mongodb-core/commit/380a386)) * **retryable-writes:** don't add `txnNumber` for standalone servers ([8489265](https://github.com/mongodb-js/mongodb-core/commit/8489265)) * **transactions:** send recovery token on abortTransaction ([1fa5763](https://github.com/mongodb-js/mongodb-core/commit/1fa5763)) ## [3.2.3](https://github.com/mongodb-js/mongodb-core/compare/v3.2.2...v3.2.3) (2019-04-05) ### Bug Fixes * **uri_parser:** restore original compression parsing ([70a7d94](https://github.com/mongodb-js/mongodb-core/commit/70a7d94)) * **uri_parser:** support URI Options spec tests ([c067dbc](https://github.com/mongodb-js/mongodb-core/commit/c067dbc)) ## [3.2.2](https://github.com/mongodb-js/mongodb-core/compare/v3.2.1...v3.2.2) (2019-03-22) ### Bug Fixes * do not attempt to auth against an arbiter ([72bb011](https://github.com/mongodb-js/mongodb-core/commit/72bb011)) * **connection:** do not leak a connection if initial handshak fails ([6cba222](https://github.com/mongodb-js/mongodb-core/commit/6cba222)) ## [3.2.1](https://github.com/mongodb-js/mongodb-core/compare/v3.2.0...v3.2.1) (2019-03-21) # [3.2.0](https://github.com/mongodb-js/mongodb-core/compare/v3.1.11...v3.2.0) (2019-03-21) ### Bug Fixes * **command:** invert boolean expression for applying `$query` ([8513ad5](https://github.com/mongodb-js/mongodb-core/commit/8513ad5)) * **command:** only add `$query` to command in OP_QUERY ([3e57690](https://github.com/mongodb-js/mongodb-core/commit/3e57690)) * **cursor:** ensure that cursor id defaults to 0 ([e7e1775](https://github.com/mongodb-js/mongodb-core/commit/e7e1775)) * **get-more:** documents are already returned as raw in this case ([c81f609](https://github.com/mongodb-js/mongodb-core/commit/c81f609)) * **msg:** support raw cursor results using OP_MSG ([f91304b](https://github.com/mongodb-js/mongodb-core/commit/f91304b)) * **op-msg:** only include `$readPreference` if not primary ([0d10317](https://github.com/mongodb-js/mongodb-core/commit/0d10317)) * **scram:** allow errors to be passed through callbacks ([dccc2ba](https://github.com/mongodb-js/mongodb-core/commit/dccc2ba)) * **sessions:** enable sessions in OP_MSG ([d8bf209](https://github.com/mongodb-js/mongodb-core/commit/d8bf209)) * **topology:** correctly pick up command options for cursors ([259231e](https://github.com/mongodb-js/mongodb-core/commit/259231e)) * **topology:** ensure read preferences are translated on selection ([ebefb7b](https://github.com/mongodb-js/mongodb-core/commit/ebefb7b)) * **transactions:** only send recovery token on commitTransaction ([923a089](https://github.com/mongodb-js/mongodb-core/commit/923a089)) * **transactions:** special case non-deterministic wc errors in txns ([5a2ae77](https://github.com/mongodb-js/mongodb-core/commit/5a2ae77)) * **transactions:** write concern is always majority on txn retry ([7b240ea](https://github.com/mongodb-js/mongodb-core/commit/7b240ea)) * **with-transaction:** throw a useful error on invalid return type ([ae64bb4](https://github.com/mongodb-js/mongodb-core/commit/ae64bb4)) * make mongos write commands work the same as replset write commands ([31b984f](https://github.com/mongodb-js/mongodb-core/commit/31b984f)) ### Features * **auth:** add authentication to handshake process ([aacac68](https://github.com/mongodb-js/mongodb-core/commit/aacac68)) * **error:** all `hasErrorLabel` method to MongoError ([32a5e74](https://github.com/mongodb-js/mongodb-core/commit/32a5e74)) * **OP_MSG:** add in parsing of OP_MSG ([c310a83](https://github.com/mongodb-js/mongodb-core/commit/c310a83)) * **OP_MSG:** adding class for translating OP_MSG from binary ([11e4132](https://github.com/mongodb-js/mongodb-core/commit/11e4132)) * **OP_MSG:** adding OP_MSG implementation ([c5adfa3](https://github.com/mongodb-js/mongodb-core/commit/c5adfa3)) * **op-msg:** add support for `OP_MSG` to command monitoring ([9124b67](https://github.com/mongodb-js/mongodb-core/commit/9124b67)) * **sdam:** backport unified SDAM changes from `next` to `master` ([83d744c](https://github.com/mongodb-js/mongodb-core/commit/83d744c)) * **topology-description:** always calculate commonWireVersion ([5c630ab](https://github.com/mongodb-js/mongodb-core/commit/5c630ab)) * **transactions:** tack recovery token for sharded transactions ([e12ae70](https://github.com/mongodb-js/mongodb-core/commit/e12ae70)) * add ability to pin server to transaction state machine ([da13e55](https://github.com/mongodb-js/mongodb-core/commit/da13e55)) * update proxy selection to consider pinned server on session ([189e428](https://github.com/mongodb-js/mongodb-core/commit/189e428)) * **txns:** support mongos pinning in unified topology ([78dab5d](https://github.com/mongodb-js/mongodb-core/commit/78dab5d)) * **with-transaction:** provide helper for convenient txn api ([478d1e7](https://github.com/mongodb-js/mongodb-core/commit/478d1e7)) * **withTransaction:** retry transaction commit on wtimeout ([2bc705c](https://github.com/mongodb-js/mongodb-core/commit/2bc705c)) ## [3.1.11](https://github.com/mongodb-js/mongodb-core/compare/v3.1.10...v3.1.11) (2019-01-16) ### Bug Fixes * **wire-protocol:** don't allow override of `slaveOk` ([8fcef69](https://github.com/mongodb-js/mongodb-core/commit/8fcef69)) ## [3.1.10](https://github.com/mongodb-js/mongodb-core/compare/v3.1.9...v3.1.10) (2019-01-15) ### Bug Fixes * **mongos-replset:** pass connect options to child server instances ([7ffb4bb](https://github.com/mongodb-js/mongodb-core/commit/7ffb4bb)) * **prettier:** fix prettier file paths for Windows ([00c631e](https://github.com/mongodb-js/mongodb-core/commit/00c631e)) ## [3.1.9](https://github.com/mongodb-js/mongodb-core/compare/v3.1.8...v3.1.9) (2018-11-16) ### Bug Fixes * **mongos:** ensure servers are properly cleaned up when destroyed ([68f4fd3](https://github.com/mongodb-js/mongodb-core/commit/68f4fd3)) * **uri_parser:** uri-encoded auth handling ([81b5b45](https://github.com/mongodb-js/mongodb-core/commit/81b5b45)) * **url-parser:** support passing in `auth` to parsing options ([29455ca](https://github.com/mongodb-js/mongodb-core/commit/29455ca)) ## [3.1.8](https://github.com/mongodb-js/mongodb-core/compare/v3.1.7...v3.1.8) (2018-11-05) ### Bug Fixes * **sspi:** correct auth process for SSPI ([808ab21](https://github.com/mongodb-js/mongodb-core/commit/808ab21)) * **uri_parser:** add `replSet` to exemption list for number coercion ([d00b1ab](https://github.com/mongodb-js/mongodb-core/commit/d00b1ab)) ## [3.1.7](https://github.com/mongodb-js/mongodb-core/compare/v3.1.6...v3.1.7) (2018-10-10) ### Bug Fixes * **uri-parser:** persist default database when authSource present ([aa601d3](https://github.com/mongodb-js/mongodb-core/commit/aa601d3)) ## [3.1.6](https://github.com/mongodb-js/mongodb-core/compare/v3.1.4...v3.1.6) (2018-10-09) ### Bug Fixes * **srv-parsing:** ensure parse options are propogated to txt lookup ([923ceb0](https://github.com/mongodb-js/mongodb-core/commit/923ceb0)) * **uri-parser:** add exemption list for number coercion in options ([82896ea](https://github.com/mongodb-js/mongodb-core/commit/82896ea)) ## [3.1.5](https://github.com/mongodb-js/mongodb-core/compare/v3.1.4...v3.1.5) (2018-09-15) ### Bug Fixes * **connection:** Revert fast fallback due to Atlas connect issues ([3133fc3](https://github.com/mongodb-js/mongodb-core/commit/3133fc3)) ## [3.1.4](https://github.com/mongodb-js/mongodb-core/compare/v3.1.3...v3.1.4) (2018-09-14) ### Bug Fixes * **apm:** fix upconversion for OP_QUERY in apm ([f969bee](https://github.com/mongodb-js/mongodb-core/commit/f969bee)) * **gssapi:** check lowercase and camelCase gssapiServiceName option ([bf0315d](https://github.com/mongodb-js/mongodb-core/commit/bf0315d)) * **test:** do not check deep equality when test.auth.db is null ([7d3c057](https://github.com/mongodb-js/mongodb-core/commit/7d3c057)) * **uri_parser:** use admin as default auth.db ([345e6af](https://github.com/mongodb-js/mongodb-core/commit/345e6af)) ### Features * **connection:** Implement fast fallback ([622394a](https://github.com/mongodb-js/mongodb-core/commit/622394a)) ## [3.1.3](https://github.com/mongodb-js/mongodb-core/compare/v3.1.2...v3.1.3) (2018-08-25) ### Bug Fixes * **buffer:** use safe-buffer polyfill to maintain compatibility ([728d897](https://github.com/mongodb-js/mongodb-core/commit/728d897)) * **EJSON:** export the result of optionally requiring EJSON ([645d73d](https://github.com/mongodb-js/mongodb-core/commit/645d73d)) ### Features * **kerberos:** bump kerberos dependency to `^1.0.0` ([1155ebe](https://github.com/mongodb-js/mongodb-core/commit/1155ebe)) * **standard-version:** automate part of the release process ([4c768c4](https://github.com/mongodb-js/mongodb-core/commit/4c768c4)) ## [3.1.2](https://github.com/mongodb-js/mongodb-core/compare/v3.1.1...v3.1.2) (2018-08-13) ### Bug Fixes * **mongos:** fix connection leak when mongos reconnects ([2453746](https://github.com/mongodb-js/mongodb-core/commit/2453746)) ### Features * **bson:** update to bson ^1.1.x ([952a2f0](https://github.com/mongodb-js/mongodb-core/commit/952a2f0)) ## [3.1.1](https://github.com/mongodb-js/mongodb-core/compare/v3.0.6...v3.1.1) (2018-08-13) ### Bug Fixes * **auth:** prevent stalling on authentication when connected ([6b4ac89](https://github.com/mongodb-js/mongodb-core/commit/6b4ac89)) * **buffer:** replace deprecated Buffer constructor ([7c71e19](https://github.com/mongodb-js/mongodb-core/commit/7c71e19)) * **commands:** check doc.cursor errors ([4f2b263](https://github.com/mongodb-js/mongodb-core/commit/4f2b263)) * **cursor:** check for session presence independently ([7c76c62](https://github.com/mongodb-js/mongodb-core/commit/7c76c62)) * **cursor:** check for sessions independently in core cursor ([cb5df28](https://github.com/mongodb-js/mongodb-core/commit/cb5df28)) * **cursor:** typo in _find() ([95f7fd2](https://github.com/mongodb-js/mongodb-core/commit/95f7fd2)) * **error:** attach command response to MongoWriteConcernError ([#322](https://github.com/mongodb-js/mongodb-core/issues/322)) ([24c5d06](https://github.com/mongodb-js/mongodb-core/commit/24c5d06)) * **getmore-killcursor:** slaveOk shall not be included on these ([40fb2f4](https://github.com/mongodb-js/mongodb-core/commit/40fb2f4)) * **kerberos:** loosen restrictions on kerberos versions ([c4add26](https://github.com/mongodb-js/mongodb-core/commit/c4add26)) * **mongos:** use `incrementTransactionNumber` directly on session ([e230d54](https://github.com/mongodb-js/mongodb-core/commit/e230d54)) * **pool:** ensure that lsid is sent in get requests to mongos ([ae820f6](https://github.com/mongodb-js/mongodb-core/commit/ae820f6)) * **read-preference:** correct server sort for `nearest` selection ([dd4eb9a](https://github.com/mongodb-js/mongodb-core/commit/dd4eb9a)) * **sdam:** we can't use Array.includes yet ([9c3b5ab](https://github.com/mongodb-js/mongodb-core/commit/9c3b5ab)) * **server:** correct typo using `this` instead of `server` ([c54f040](https://github.com/mongodb-js/mongodb-core/commit/c54f040)) * **sessions:** add `toBSON` method to `ClientSession` ([d95a4d1](https://github.com/mongodb-js/mongodb-core/commit/d95a4d1)) * **sessions:** never send `endSessions` from a `ClientSession` ([05ffe82](https://github.com/mongodb-js/mongodb-core/commit/05ffe82)) * **topology-description:** we can't use Object.values yet ([91df350](https://github.com/mongodb-js/mongodb-core/commit/91df350)) * **transactions:** do not send txnNumber for non-write commands ([#308](https://github.com/mongodb-js/mongodb-core/issues/308)) ([eb67b1a](https://github.com/mongodb-js/mongodb-core/commit/eb67b1a)) * **uri_parser:** ensure default port is 27017 ([426a95e](https://github.com/mongodb-js/mongodb-core/commit/426a95e)) * **uri-parser:** Incorrect parsing of arrays ([fcff104](https://github.com/mongodb-js/mongodb-core/commit/fcff104)) * **uri-parser:** Parse comma separated option values ([2dd1de0](https://github.com/mongodb-js/mongodb-core/commit/2dd1de0)) * **wireprotocol:** only send bypassDocumentValidation if true ([a81678b](https://github.com/mongodb-js/mongodb-core/commit/a81678b)) ### Features * **auth:** adds saslprep and SCRAM-SHA-256 ([506c087](https://github.com/mongodb-js/mongodb-core/commit/506c087)) * **cursor:** implement cursor for new sdam implementation ([f289226](https://github.com/mongodb-js/mongodb-core/commit/f289226)) * **cursor:** store operation time from initial query ([55e761e](https://github.com/mongodb-js/mongodb-core/commit/55e761e)) * **error:** add more specific error type for write concern errors ([347c5d7](https://github.com/mongodb-js/mongodb-core/commit/347c5d7)) * **Error:** adding error metadata field ([33be560](https://github.com/mongodb-js/mongodb-core/commit/33be560)) * **kerberos:** expose warning for kerberos mismatch versions ([efc0e43](https://github.com/mongodb-js/mongodb-core/commit/efc0e43)) * **max-staleness:** properly support a max staleness reducer ([d9c5c16](https://github.com/mongodb-js/mongodb-core/commit/d9c5c16)) * **MongoTimeoutError:** add common class for timeout events ([c5b4752](https://github.com/mongodb-js/mongodb-core/commit/c5b4752)) * **monitoring:** add support for server monitoring to `Server` ([30a394d](https://github.com/mongodb-js/mongodb-core/commit/30a394d)) * **op-compressed:** add support for OP_COMPRESSED to new sdam impl ([8deec9b](https://github.com/mongodb-js/mongodb-core/commit/8deec9b)) * **retryableWrites:** adding more support for retries ([d4c1597](https://github.com/mongodb-js/mongodb-core/commit/d4c1597)) * **sdam-monitoring:** add basic monitoring for new Topology type ([bb0c522](https://github.com/mongodb-js/mongodb-core/commit/bb0c522)) * **server:** add `command` support to new server class ([d9a8c05](https://github.com/mongodb-js/mongodb-core/commit/d9a8c05)) * **server-selection:** add basic support for server selection ([ccc5e1d](https://github.com/mongodb-js/mongodb-core/commit/ccc5e1d)) * **topology:** introduce a single Topology type, and test runner ([f35d773](https://github.com/mongodb-js/mongodb-core/commit/f35d773)) * **topology-description:** add helper method for server ownership ([2c64c75](https://github.com/mongodb-js/mongodb-core/commit/2c64c75)) * **txns:** add initial transaction interface for sessions ([ed76be0](https://github.com/mongodb-js/mongodb-core/commit/ed76be0)) # [3.1.0](https://github.com/mongodb-js/mongodb-core/compare/v3.0.6...v3.1.0) (2018-06-27) ### Bug Fixes * **auth:** prevent stalling on authentication when connected ([6b4ac89](https://github.com/mongodb-js/mongodb-core/commit/6b4ac89)) * **cursor:** check for session presence independently ([7c76c62](https://github.com/mongodb-js/mongodb-core/commit/7c76c62)) * **cursor:** check for sessions independently in core cursor ([cb5df28](https://github.com/mongodb-js/mongodb-core/commit/cb5df28)) * **error:** attach command response to MongoWriteConcernError ([#322](https://github.com/mongodb-js/mongodb-core/issues/322)) ([24c5d06](https://github.com/mongodb-js/mongodb-core/commit/24c5d06)) * **getmore-killcursor:** slaveOk shall not be included on these ([40fb2f4](https://github.com/mongodb-js/mongodb-core/commit/40fb2f4)) * **kerberos:** loosen restrictions on kerberos versions ([c4add26](https://github.com/mongodb-js/mongodb-core/commit/c4add26)) * **mongos:** use `incrementTransactionNumber` directly on session ([e230d54](https://github.com/mongodb-js/mongodb-core/commit/e230d54)) * **pool:** ensure that lsid is sent in get requests to mongos ([ae820f6](https://github.com/mongodb-js/mongodb-core/commit/ae820f6)) * **sdam:** we can't use Array.includes yet ([9c3b5ab](https://github.com/mongodb-js/mongodb-core/commit/9c3b5ab)) * **sessions:** add `toBSON` method to `ClientSession` ([d95a4d1](https://github.com/mongodb-js/mongodb-core/commit/d95a4d1)) * **sessions:** never send `endSessions` from a `ClientSession` ([05ffe82](https://github.com/mongodb-js/mongodb-core/commit/05ffe82)) * **topology-description:** we can't use Object.values yet ([91df350](https://github.com/mongodb-js/mongodb-core/commit/91df350)) * **transactions:** do not send txnNumber for non-write commands ([#308](https://github.com/mongodb-js/mongodb-core/issues/308)) ([eb67b1a](https://github.com/mongodb-js/mongodb-core/commit/eb67b1a)) * **wireprotocol:** only send bypassDocumentValidation if true ([a81678b](https://github.com/mongodb-js/mongodb-core/commit/a81678b)) ### Features * **auth:** adds saslprep and SCRAM-SHA-256 ([506c087](https://github.com/mongodb-js/mongodb-core/commit/506c087)) * **cursor:** implement cursor for new sdam implementation ([f289226](https://github.com/mongodb-js/mongodb-core/commit/f289226)) * **cursor:** store operation time from initial query ([55e761e](https://github.com/mongodb-js/mongodb-core/commit/55e761e)) * **error:** add more specific error type for write concern errors ([347c5d7](https://github.com/mongodb-js/mongodb-core/commit/347c5d7)) * **Error:** adding error metadata field ([33be560](https://github.com/mongodb-js/mongodb-core/commit/33be560)) * **kerberos:** expose warning for kerberos mismatch versions ([efc0e43](https://github.com/mongodb-js/mongodb-core/commit/efc0e43)) * **max-staleness:** properly support a max staleness reducer ([d9c5c16](https://github.com/mongodb-js/mongodb-core/commit/d9c5c16)) * **MongoTimeoutError:** add common class for timeout events ([c5b4752](https://github.com/mongodb-js/mongodb-core/commit/c5b4752)) * **op-compressed:** add support for OP_COMPRESSED to new sdam impl ([8deec9b](https://github.com/mongodb-js/mongodb-core/commit/8deec9b)) * **retryableWrites:** adding more support for retries ([d4c1597](https://github.com/mongodb-js/mongodb-core/commit/d4c1597)) * **sdam-monitoring:** add basic monitoring for new Topology type ([bb0c522](https://github.com/mongodb-js/mongodb-core/commit/bb0c522)) * **server:** add `command` support to new server class ([d9a8c05](https://github.com/mongodb-js/mongodb-core/commit/d9a8c05)) * **server-selection:** add basic support for server selection ([ccc5e1d](https://github.com/mongodb-js/mongodb-core/commit/ccc5e1d)) * **topology:** introduce a single Topology type, and test runner ([f35d773](https://github.com/mongodb-js/mongodb-core/commit/f35d773)) * **topology-description:** add helper method for server ownership ([2c64c75](https://github.com/mongodb-js/mongodb-core/commit/2c64c75)) * **txns:** add initial transaction interface for sessions ([ed76be0](https://github.com/mongodb-js/mongodb-core/commit/ed76be0)) ## [3.0.6](https://github.com/mongodb-js/mongodb-core/compare/v3.0.5...v3.0.6) (2018-04-09) ### Bug Fixes * **2.6-protocol:** kill cursor callback is called by pool now ([65f2bf7](https://github.com/mongodb-js/mongodb-core/commit/65f2bf7)) * **evergreen:** change name to id ([9303e12](https://github.com/mongodb-js/mongodb-core/commit/9303e12)) * **evergreen:** change nvm path to local ([e42ea5b](https://github.com/mongodb-js/mongodb-core/commit/e42ea5b)) * **evergreen:** pass in flag through npm scripts ([85708dd](https://github.com/mongodb-js/mongodb-core/commit/85708dd)) * **pool:** ensure noResponse callback is only called if cb exists ([5281605](https://github.com/mongodb-js/mongodb-core/commit/5281605)) * **replset:** only remove primary if primary is there ([1acd288](https://github.com/mongodb-js/mongodb-core/commit/1acd288)) * **test-environments:** ensure all servers run on separate ports ([b63e5d8](https://github.com/mongodb-js/mongodb-core/commit/b63e5d8)) * **uri_parser:** support a default database on mongodb+srv uris ([be01ffe](https://github.com/mongodb-js/mongodb-core/commit/be01ffe)) ### Features * **apm:** add events for command monitoring support in core ([37dce9c](https://github.com/mongodb-js/mongodb-core/commit/37dce9c)) * **evergreen:** add evergreen config based on drivers skeleton ([b71da99](https://github.com/mongodb-js/mongodb-core/commit/b71da99)) * **evergreen:** use evergreen flag when running tests ([55dff3b](https://github.com/mongodb-js/mongodb-core/commit/55dff3b)) ## [3.0.5](https://github.com/mongodb-js/mongodb-core/compare/v3.0.4...v3.0.5) (2018-03-14) ### Features * **sessions:** adding implicit cursor session support ([1607321](https://github.com/mongodb-js/mongodb-core/commit/1607321)) ## [3.0.4](https://github.com/mongodb-js/mongodb-core/compare/v3.0.3...v3.0.4) (2018-03-05) ### Bug Fixes * **connection:** ensure socket options are applied to ssl sockets ([e5ff927](https://github.com/mongodb-js/mongodb-core/commit/e5ff927)) ## [3.0.3](https://github.com/mongodb-js/mongodb-core/compare/v3.0.2...v3.0.3) (2018-02-23) ### Bug Fixes * **connection:** make pool not try to reconnect forever when reconnectTries = 0 ([#275](https://github.com/mongodb-js/mongodb-core/issues/275)) ([2d3fa98](https://github.com/mongodb-js/mongodb-core/commit/2d3fa98)), closes [Automattic/mongoose#6028](https://github.com/Automattic/mongoose/issues/6028) * **retryableWrites:** only remove primary after retry ([#274](https://github.com/mongodb-js/mongodb-core/issues/274)) ([7ac171e](https://github.com/mongodb-js/mongodb-core/commit/7ac171e)) * **sessions:** actually allow ending of sessions ([2b81bb6](https://github.com/mongodb-js/mongodb-core/commit/2b81bb6)) * **uri-parser:** do not use `hasOwnProperty` to detect ssl ([69d16c7](https://github.com/mongodb-js/mongodb-core/commit/69d16c7)) ### Features * **sessions:** adding endAllPooledSessions helper method ([d7804ed](https://github.com/mongodb-js/mongodb-core/commit/d7804ed)) ## [3.0.2](https://github.com/mongodb-js/mongodb-core/compare/v3.0.1...v3.0.2) (2018-01-29) ### Bug Fixes * **cursor:** check for autoReconnect option only for single server ([645d6df](https://github.com/mongodb-js/mongodb-core/commit/645d6df)) ### Features * **mongodb+srv:** add support for mongodb+srv to the uri parser ([19b42ce](https://github.com/mongodb-js/mongodb-core/commit/19b42ce)) * **uri-parser:** add initial implementation of uri parser for core ([8f797a7](https://github.com/mongodb-js/mongodb-core/commit/8f797a7)) * **uri-parser:** expose the connection string parser as api ([fdeca2f](https://github.com/mongodb-js/mongodb-core/commit/fdeca2f)) ## [3.0.1](https://github.com/mongodb-js/mongodb-core/compare/v3.0.0...v3.0.1) (2017-12-24) ### Bug Fixes * **connection:** correct erroneous use of `this` in helper method ([06b9388](https://github.com/mongodb-js/mongodb-core/commit/06b9388)) # [3.0.0](https://github.com/christkv/mongodb-core/compare/v3.0.0-rc0...v3.0.0) (2017-12-23) ### Bug Fixes * **connection:** ensure connection cleanup before fallback retry ([de62615](https://github.com/christkv/mongodb-core/commit/de62615)) * **mock-server:** expose potential errors in message handlers ([65dcca4](https://github.com/christkv/mongodb-core/commit/65dcca4)) * **mongos:** remove listener on destroy event ([243e942](https://github.com/christkv/mongodb-core/commit/243e942)), closes [#257](https://github.com/christkv/mongodb-core/issues/257) * **sdam:** more explicit wire protocol error message ([6d6d19a](https://github.com/christkv/mongodb-core/commit/6d6d19a)) * **secondaries:** fixes connection with secondary readPreference ([#258](https://github.com/christkv/mongodb-core/issues/258)) ([0060ad7](https://github.com/christkv/mongodb-core/commit/0060ad7)) * **sessions:** ensure that we ignore session details from arbiters ([de0105c](https://github.com/christkv/mongodb-core/commit/de0105c)) * **sharded-tests:** add `shardsvr` cmdline opt, wait for async fns ([867b080](https://github.com/christkv/mongodb-core/commit/867b080)) ### Features * **connection:** attempt both ipv6 and ipv4 when no family entered ([#260](https://github.com/christkv/mongodb-core/issues/260)) ([107bae5](https://github.com/christkv/mongodb-core/commit/107bae5)) # 3.0.0-rc0 (2017-12-05) ### Bug Fixes * **auth-plain:** only use BSON -after- requiring it ([4934adf](https://github.com/christkv/mongodb-core/commit/4934adf)) * **auth-scram:** cache the ScramSHA1 salted passwords up to 200 entries ([31ef03a](https://github.com/christkv/mongodb-core/commit/31ef03a)) * **client-session:** don't report errors for endSessions commands ([c34eaf5](https://github.com/christkv/mongodb-core/commit/c34eaf5)) * **connection:** default `family` to undefined rather than 4 ([c1b5e04](https://github.com/christkv/mongodb-core/commit/c1b5e04)) * **connection:** fixing leak in 3.0.0 ([#235](https://github.com/christkv/mongodb-core/issues/235)) ([fc669c0](https://github.com/christkv/mongodb-core/commit/fc669c0)) * **cursor:** avoid waiting for reconnect if reconnect disabled ([43e9b23](https://github.com/christkv/mongodb-core/commit/43e9b23)) * **cursor:** callback with server response on `_find` ([6999459](https://github.com/christkv/mongodb-core/commit/6999459)) * **errors:** export MongoError and MongoNetworkError at top-level ([972064a](https://github.com/christkv/mongodb-core/commit/972064a)) * **errors:** throw MongoNetworkError from more places ([2cec239](https://github.com/christkv/mongodb-core/commit/2cec239)) * **errors:** use subclassing for MongoNetworkError ([a132830](https://github.com/christkv/mongodb-core/commit/a132830)) * **errors:** use util.inherits() and protect edge case ([c953246](https://github.com/christkv/mongodb-core/commit/c953246)) * **mocha_server_tests:** rename confusing variable to fix tests ([a9fbae2](https://github.com/christkv/mongodb-core/commit/a9fbae2)) * **mock-tests:** ensure all servers are properly cleaned up ([5dafc86](https://github.com/christkv/mongodb-core/commit/5dafc86)) * **package:** upgrade mongodb-test-runner with bug fix ([5b2e99e](https://github.com/christkv/mongodb-core/commit/5b2e99e)) * **pool:** check topology exists before verifying session support ([0aa146d](https://github.com/christkv/mongodb-core/commit/0aa146d)) * **pool:** ensure inUse and connecting queues are cleared on reauth ([aa2840d](https://github.com/christkv/mongodb-core/commit/aa2840d)) * **pool:** ensure that errors are propagated on force destroy ([8f8ad56](https://github.com/christkv/mongodb-core/commit/8f8ad56)) * **pool:** ensure workItem is not null before accessing properties ([2143963](https://github.com/christkv/mongodb-core/commit/2143963)) * **pool_tests:** remove .only ([8172137](https://github.com/christkv/mongodb-core/commit/8172137)) * **retryable-writes:** don't increment `txnNumber` on retries ([e7c2242](https://github.com/christkv/mongodb-core/commit/e7c2242)) * **retryable-writes:** network errors are retryable, inverted logic ([2727551](https://github.com/christkv/mongodb-core/commit/2727551)) * **scram:** cache salted data, not the original data ([0cbe95f](https://github.com/christkv/mongodb-core/commit/0cbe95f)) * **SDAM:** emit SDAM events on close and reconnect ([3451ff0](https://github.com/christkv/mongodb-core/commit/3451ff0)) * **server:** avoid waiting for reconnect if reconnect disabled ([611a352](https://github.com/christkv/mongodb-core/commit/611a352)) * **server:** correct minor typo in porting 2.x patch ([d92efec](https://github.com/christkv/mongodb-core/commit/d92efec)) * **server_tests:** change 'this' to 'self' in some server tests ([992d9e9](https://github.com/christkv/mongodb-core/commit/992d9e9)) * **server_tests:** fix errors in broken test ([1602e4d](https://github.com/christkv/mongodb-core/commit/1602e4d)) * **server-session-pool:** don't add expired sessions to the pool ([8f48b89](https://github.com/christkv/mongodb-core/commit/8f48b89)) * **server-session-pool:** ensure the queue is LIFO ([ac68e76](https://github.com/christkv/mongodb-core/commit/ac68e76)) * **utils:** don't throw if no snappy ([55bf2ad](https://github.com/christkv/mongodb-core/commit/55bf2ad)) * **wire-protocol:** 2.6 killCursor should not way for reply ([7337d91](https://github.com/christkv/mongodb-core/commit/7337d91)) ### Features * **cluster-time:** ensure clusterTime makes it to outgoing commands ([e700b79](https://github.com/christkv/mongodb-core/commit/e700b79)) * **cluster-time:** track incoming cluster time gossiping ([c910706](https://github.com/christkv/mongodb-core/commit/c910706)) * **compression:** implement wire protocol compression support ([2356ffb](https://github.com/christkv/mongodb-core/commit/2356ffb)) * **connection-spy:** add class for monitoring active connections ([6dd6db3](https://github.com/christkv/mongodb-core/commit/6dd6db3)) * **errors:** create MongoNetworkError ([df12740](https://github.com/christkv/mongodb-core/commit/df12740)) * **inital-cluster-time:** allow session to define initia value ([e3a1c8b](https://github.com/christkv/mongodb-core/commit/e3a1c8b)) * **mock:** support a means of consistently cleaning up mock servers ([ab3b70b](https://github.com/christkv/mongodb-core/commit/ab3b70b)) * **mock-server:** add the ability to set a message handler ([9a8b815](https://github.com/christkv/mongodb-core/commit/9a8b815)) * **operation-time:** track operationTime in relevant sessions ([8d512f1](https://github.com/christkv/mongodb-core/commit/8d512f1)) * **pool:** introduce the concept of a minimum pool size ([b01b1f8](https://github.com/christkv/mongodb-core/commit/b01b1f8)) * **replset:** more verbose replica set errors emission ([6d5eccd](https://github.com/christkv/mongodb-core/commit/6d5eccd)) * **retryable-writes:** add mongos support for retryable writes ([7778067](https://github.com/christkv/mongodb-core/commit/7778067)) * **retryable-writes:** initial support on replicasets ([73ac688](https://github.com/christkv/mongodb-core/commit/73ac688)) * **retryable-writes:** retry on "not master" stepdown errors ([028aec7](https://github.com/christkv/mongodb-core/commit/028aec7)) * **server-check:** reintroduce server version check ([486aace](https://github.com/christkv/mongodb-core/commit/486aace)) * **server-session-pool:** implement session pool per spect ([a1d5b22](https://github.com/christkv/mongodb-core/commit/a1d5b22)) * **session:** allow `session` options to be passed to write cmds ([5da75e4](https://github.com/christkv/mongodb-core/commit/5da75e4)) * **sessions:** add equality operator to ease readability ([6510d7d](https://github.com/christkv/mongodb-core/commit/6510d7d)) * **sessions:** export all sessions types on the top level ([35265b3](https://github.com/christkv/mongodb-core/commit/35265b3)) * **sessions:** support sessions with cursors with find/getMore ([a016602](https://github.com/christkv/mongodb-core/commit/a016602)) * **sessions:** track `logicalSessionTimeoutMinutes` for sessions ([11865bf](https://github.com/christkv/mongodb-core/commit/11865bf)) * **ssl:** adding ssl options ciphers and ecdhCurve ([c839d5c](https://github.com/christkv/mongodb-core/commit/c839d5c)) * **test/:** convert server_tests, undefined_tests, replset_tests, replset_state_tests, and repleset_server_selection_tests to run with mongodb-test-runner ([3a7c5fd](https://github.com/christkv/mongodb-core/commit/3a7c5fd)) 2.1.16 2017-10-11 ----------------- * avoid waiting for reconnect if reconnect disabled in Server topology * avoid waiting for reconnect if reconnect disabled in Cursor * NODE-990 cache the ScramSHA1 salted passwords up to 200 entries * NODE-1153 ensure that errors are propagated on force destroy * NODE-1153 ensure inUse and connecting queues are cleared on reauth 2.1.15 2017-08-08 ----------------- * Emit SDAM events on close and reconnect 2.1.14 2017-07-07 ----------------- * NODE-1073 updates scram.js hi() algorithm to utilize crypto.pbkdf2Sync() * NODE-1049 only include primary server if there are no secondary servers for readPrefrence secondaryPreferred * moved `assign` polyfill to shared utils, replace usage of `extend` in tests 2.1.13 2017-06-19 ----------------- * NODE-1039 ensure we force destroy server instances, forcing queue to be flushed. * Use actual server type in standalone SDAM events. 2.1.12 2017-06-02 ----------------- * NODE-1019 Set keepAlive to 300 seconds or 1/2 of socketTimeout if socketTimeout < keepAlive. * Minor fix to report the correct state on error. * NODE-1020 'family' was added to options to provide high priority for ipv6 addresses (Issue #1518, https://github.com/firej). * Fix require_optional loading of bson-ext. * Ensure no errors are thrown by replset if topology is destroyed before it finished connecting. * NODE-999 SDAM fixes for Mongos and single Server event emitting. * NODE-1014 Set socketTimeout to default to 360 seconds. * NODE-1019 Set keepAlive to 300 seconds or 1/2 of socketTimeout if socketTimeout < keepAlive. 2.1.11 2017-05-22 ----------------- * NODE-987 Clear out old intervalIds on when calling topologyMonitor. * NODE-987 Moved filtering to pingServer method and added test case. * Check for connection destroyed just before writing out and flush out operations correctly if it is (Issue #179, https://github.com/jmholzinger). * NODE-989 Refactored Replicaset monitoring to correcly monitor newly added servers, Also extracted setTimeout and setInterval to use custom wrappers Timeout and Interva. 2.1.10 2017-04-18 ----------------- * NODE-981 delegate auth to replset/mongos if inTopology is set. * NODE-978 Wrap connection.end in try/catch for node 0.10.x issue causing exceptions to be thrown, Also surfaced getConnection for mongos and replset. * Remove dynamic require (Issue #175, https://github.com/tellnes). * NODE-696 Handle interrupted error for createIndexes. * Fixed isse when user is executing find command using Server.command and it get interpreted as a wire protcol message, #172. * NODE-966 promoteValues not being promoted correctly to getMore. * Merged in fix for flushing out monitoring operations. 2.1.9 2017-03-17 ---------------- * Return lastIsMaster correctly when connecting with secondaryOnlyConnectionAllowed is set to true and only a secondary is available in replica state. * Clone options when passed to wireProtocol handler to avoid intermittent modifications causing errors. * Ensure SSL error propegates better for Replset connections when there is a SSL validation error. * NODE-957 Fixed issue where < batchSize not causing cursor to be closed on execution of first batch. * NODE-958 Store reconnectConnection on pool object to allow destroy to close immediately. 2.1.8 2017-02-13 ---------------- * NODE-925 ensure we reschedule operations while pool is < poolSize while pool is growing and there are no connections with not currently performing work. * NODE-927 fixes issue where authentication was performed against arbiter instances. * NODE-915 Normalize all host names to avoid comparison issues. * Fixed issue where pool.destroy would never finish due to a single operation not being executed and keeping it open. 2.1.7 2017-01-24 ---------------- * NODE-919 ReplicaSet connection does not close immediately (Issue #156). * NODE-901 Fixed bug when normalizing host names. * NODE-909 Fixed readPreference issue caused by direct connection to primary. * NODE-910 Fixed issue when bufferMaxEntries == 0 and read preference set to nearest. 2.1.6 2017-01-13 ---------------- * NODE-908 Keep auth contexts in replset and mongos topology to ensure correct application of authentication credentials when primary is first server to be detected causing an immediate connect event to happen. 2.1.5 2017-01-11 ---------------- * updated bson and bson-ext dependencies to 1.0.4 to work past early node 4.x.x version having a broken Buffer.from implementation. 2.1.4 2017-01-03 ---------------- * updated bson and bson-ext dependencies to 1.0.3 due to util.inspect issue with ObjectId optimizations. 2.1.3 2017-01-03 ---------------- * Monitoring operations are re-scheduled in pool if it cannot find a connection that does not already have scheduled work on it, this is to avoid the monitoring socket timeout being applied to any existing operations on the socket due to pipelining * Moved replicaset monitoring away from serial mode and to parallel mode. * updated bson and bson-ext dependencies to 1.0.2. 2.1.2 2016-12-10 ---------------- * Delay topologyMonitoring on successful attemptReconnect as no need to run a full scan immediately. * Emit reconnect event in primary joining when in connected status for a replicaset. 2.1.1 2016-12-08 ---------------- * Updated bson library to 1.0.1. * Added optional support for bson-ext 1.0.1. 2.1.0 2016-12-05 ---------------- * Updated bson library to 1.0.0. * Added optional support for bson-ext 1.0.0. * Expose property parserType allowing for identification of currently configured parser. 2.0.14 2016-11-29 ----------------- * Updated bson library to 0.5.7. * Dont leak connection.workItems elments when killCursor is called (Issue #150, https://github.com/mdlavin). * Remove unnecessary errors formatting (Issue #149, https://github.com/akryvomaz). * Only check isConnected against availableConnections (Issue #142). * NODE-838 Provide better error message on failed to connect on first retry for Mongos topology. * Set default servername to host is not passed through for sni. * Made monitoring happen on exclusive connection and using connectionTimeout to handle the wait time before failure (Issue #148). * NODE-859 Make minimum value of maxStalenessSeconds 90 seconds. * NODE-852 Fix Kerberos module deprecations on linux and windows and release new kerberos version. * NODE-850 Update Max Staleness implementation. * NODE-849 username no longer required for MONGODB-X509 auth. * NODE-848 BSON Regex flags must be alphabetically ordered. * NODE-846 Create notice for all third party libraries. * NODE-843 Executing bulk operations overwrites write concern parameter. * NODE-842 Re-sync SDAM and SDAM Monitoring tests from Specs repo. * NODE-840 Resync CRUD spec tests. * Unescapable while(true) loop (Issue #152). 2.0.13 2016-10-21 ----------------- * Updated bson library to 0.5.6. - Included cyclic dependency detection * Fire callback when topology was destroyed (Issue #147, https://github.com/vkarpov15). * Refactoring to support pipelining ala 1.4.x branch will retaining the benefits of the growing/shrinking pool (Issue #146). * Fix typo in serverHeartbeatFailed event name (Issue #143, https://github.com/jakesjews). * NODE-798 Driver hangs on count command in replica set with one member (Issue #141, https://github.com/isayme). 2.0.12 2016-09-15 ----------------- * fix debug logging message not printing server name. * fixed application metadata being sent by wrong ismaster. * NODE-812 Fixed mongos stall due to proxy monitoring ismaster failure causing reconnect. * NODE-818 Replicaset timeouts in initial connect sequence can "no primary found". * Updated bson library to 0.5.5. * Added DBPointer up conversion to DBRef. 2.0.11 2016-08-29 ----------------- * NODE-803, Fixed issue in how the latency window is calculated for Mongos topology causing issues for single proxy connections. * Avoid timeout in attemptReconnect causing multiple attemptReconnect attempts to happen (Issue #134, https://github.com/dead-horse). * Ensure promoteBuffers is propagated in same fashion as promoteValues and promoteLongs 2.0.10 2016-08-23 ----------------- * Added promoteValues flag (default to true) to allow user to specify they only want wrapped BSON values back instead of promotion to native types. * Do not close mongos proxy connection on failed ismaster check in ha process (Issue #130). 2.0.9 2016-08-19 ---------------- * Allow promoteLongs to be passed in through Response.parse method and overrides default set on the connection. * NODE-798 Driver hangs on count command in replica set with one member. * Allow promoteLongs to be passed in through Response.parse method and overrides default set on the connection. * Allow passing in servername for TLS connections for SNI support. 2.0.8 2016-08-16 ---------------- * Allow execution of store operations indepent of having both a primary and secondary available (Issue #123). * Fixed command execution issue for mongos to ensure buffering of commands when no mongos available. * Added hashed connection names and fullResult. * Updated bson library to 0.5.3. * Wrap callback in nextTick to ensure exceptions are thrown correctly. 2.0.7 2016-07-28 ---------------- * Allow primary to be returned when secondaryPreferred is passed (Issue #117, https://github.com/dhendo). * Added better warnings when passing in illegal seed list members to a Mongos topology. * Minor attemptReconnect bug that would cause multiple attemptReconnect to run in parallel. * Fix wrong opType passed to disconnectHandler.add (Issue #121, https://github.com/adrian-gierakowski) * Implemented domain backward comp support enabled via domainsEnabled options on Server/ReplSet/Mongos and MongoClient.connect. * Initial max staleness implementation for ReplSet and Mongos for 3.4 support. * Added handling of collation for 3.4 support. 2.0.6 2016-07-19 ---------------- * Destroy connection on socket timeout due to newer node versions not closing the socket. 2.0.5 2016-07-15 ---------------- * Minor fixes to handle faster MongoClient connectivity from the driver, allowing single server instances to detect if they are a proxy. * Added numberOfConsecutiveTimeouts to pool that will destroy the pool if the number of consecutive timeouts > reconnectTries. * Print warning if seedlist servers host name does not match the one provided in it's ismaster.me field for Replicaset members. * Fix issue where Replicaset connection would not succeeed if there the replicaset was a single primary server setup. 2.0.4 2016-07-11 ----------------- * Updated bson to version 0.5.1. * handle situation where user is providing seedlist names that do not match host list. fix allows for a single full discovery connection sweep before erroring out. * NODE-747 Polyfill for Object.assign for 0.12.x or 0.10.x. * NODE-746 Improves replicaset errors for wrong setName. 2.0.3 2016-07-08 ----------------- * Implemented Server Selection Specification test suite. * Added warning level to logger. * Added warning message when sockeTimeout < haInterval for Replset/Mongos. 2.0.2 2016-07-06 ----------------- * Mongos emits close event on no proxies available or when reconnect attempt fails. * Replset emits close event when no servers available or when attemptReconnect fails to reconnect. * Don't throw in auth methods but return error in callback. 2.0.1 2016-07-05 ----------------- * Added missing logout method on mongos proxy topology. * Fixed logger error serialization issue. * Documentation fixes. 2.0.0 2016-07-05 ----------------- * Moved all authentication and handling of growing/shrinking of pool connections into actual pool. * All authentication methods now handle both auth/reauthenticate and logout events. * Introduced logout method to get rid of onAll option for logout command. * Updated bson to 0.5.0 that includes Decimal128 support. 1.3.21 2016-05-30 ----------------- * Pool gets stuck if a connection marked for immediateRelease times out (Issue #99, https://github.com/nbrachet). * Make authentication process retry up to authenticationRetries at authenticationRetryIntervalMS interval. * Made ismaster replicaset calls operate with connectTimeout or monitorSocketTimeout to lower impact of big socketTimeouts on monitoring performance. * Make sure connections mark as "immediateRelease" don't linger the inUserConnections list. Otherwise, after that connection times out, getAll() incorrectly returns more connections than are effectively present, causing the pool to not get restarted by reconnectServer. (Issue #99, https://github.com/nbrachet). * Make cursor getMore or killCursor correctly trigger pool reconnect to single server if pool has not been destroyed. * Make ismaster monitoring for single server connection default to avoid user confusion due to change in behavior. 1.3.20 2016-05-25 ----------------- * NODE-710 Allow setting driver loggerLevel and logger function from MongoClient options. * Minor fix for SSL errors on connection attempts, minor fix to reconnect handler for the server. * Don't write to socket before having registered the callback for commands, work around for windows issuing error events twice on node.js when socket gets destroyed by firewall. * Fix minor issue where connectingServers would not be removed correctly causing single server connections to not auto-reconnect. 1.3.19 2016-05-17 ----------------- - Handle situation where a server connection in a replicaset sometimes fails to be destroyed properly due to being in the middle of authentication when the destroy method is called on the replicaset causing it to be orphaned and never collected. - Set keepAlive to false by default to work around bug in node.js for Windows XP and Windows 2003. - Ensure replicaset topology destroy is never called by SDAM. - Ensure all paths are correctly returned on inspectServer in replset. 1.3.18 2016-04-27 ----------------- - Hardened cursor connection handling for getMore and killCursor to ensure mid operation connection kill does not throw null exception. - Fixes for Node 6.0 support. 1.3.17 2016-04-26 ----------------- - Added improved handling of reconnect when topology is a single server. - Added better handling of $query queries passed down for 3.2 or higher. - Introduced getServerFrom method to topologies to let cursor grab a new pool for getMore and killCursors commands and not use connection pipelining. - NODE-693 Move authentication to be after ismaster call to avoid authenticating against arbiters. 1.3.16 2016-04-07 ----------------- - Only call unref on destroy if it exists to ensure proper working destroy method on early node v0.10.x versions. 1.3.15 2016-04-06 ----------------- - NODE-687 Fixed issue where a server object failed to be destroyed if the replicaset state did not update successfully. This could leave active connections accumulating over time. - Fixed some situations where all connections are flushed due to a single connection in the connection pool closing. 1.3.14 2016-04-01 ----------------- - Ensure server inquireServerState exits immediately on server.destroy call. - Refactored readPreference handling in 2.4, 2.6 and 3.2 wire protocol handling. 1.3.13 2016-03-30 ----------------- - Handle missing cursor on getMore when going through a mongos proxy by pinning to socket connection and not server. 1.3.12 2016-03-29 ----------------- - Mongos pickProxies fall back to closest mongos if no proxies meet latency window specified. 1.3.11 2016-03-29 ----------------- - isConnected method for mongos uses same selection code as getServer. - Exceptions in cursor getServer trapped and correctly delegated to high level handler. 1.3.10 2016-03-22 ----------------- - SDAM Monitoring emits diff for Replicasets to simplify detecting the state changes. - SDAM Monitoring correctly emits Mongos as serverDescriptionEvent. 1.3.9 2016-03-20 ---------------- - Removed monitoring exclusive connection, should resolve timeouts and reconnects on idle replicasets where haInteval > socketTimeout. 1.3.8 2016-03-18 ---------------- - Implements the SDAM monitoring specification. - Fix issue where cursor would error out and not be buffered when primary is not connected. 1.3.7 2016-03-16 ---------------- - Fixed issue with replicasetInquirer where it could stop performing monitoring if there was no servers available. 1.3.6 2016-03-15 ---------------- - Fixed raise condition where multiple replicasetInquirer operations could be started in parallel creating redundant connections. 1.3.5 2016-03-14 ---------------- - Handle rogue SSL exceptions (Issue #85, https://github.com/durran). 1.3.4 2016-03-14 ---------------- - Added unref options on server, replicaset and mongos (Issue #81, https://github.com/allevo) - cursorNotFound flag always false (Issue #83, https://github.com/xgfd) - refactor of events emission of fullsetup and all events (Issue #84, https://github.com/xizhibei) 1.3.3 2016-03-08 ---------------- - Added support for promoteLongs option for command function. - Return connection if no callback available - Emit connect event when server reconnects after initial connection failed (Issue #76, https://github.com/vkarpov15) - Introduced optional monitoringSocketTimeout option to allow better control of SDAM monitoring timeouts. - Made monitoringSocketTimeout default to 30000 if no connectionTimeout value specified or if set to 0. - Fixed issue where tailable cursor would not retry even though cursor was still alive. - Disabled exhaust flag support to avoid issues where users could easily write code that would cause memory to run out. - Handle the case where the first command result document returns an empty list of documents but a live cursor. - Allow passing down off CANONICALIZE_HOST_NAME and SERVICE_REALM options for kerberos. 1.3.2 2016-02-09 ---------------- - Harden MessageHandler in server.js to avoid issues where we cannot find a callback for an operation. - Ensure RequestId can never be larger than Max Number integer size. 1.3.1 2016-02-05 ---------------- - Removed annoying missing Kerberos error (NODE-654). 1.3.0 2016-02-03 ---------------- - Added raw support for the command function on topologies. - Fixed issue where raw results that fell on batchSize boundaries failed (Issue #72) - Copy over all the properties to the callback returned from bindToDomain, (Issue #72) - Added connection hash id to be able to reference connection host/name without leaking it outside of driver. - NODE-638, Cannot authenticate database user with utf-8 password. - Refactored pool to be worker queue based, minimizing the impact a slow query have on throughput as long as # slow queries < # connections in the pool. - Pool now grows and shrinks correctly depending on demand not causing a full pool reconnect. - Improvements in monitoring of a Replicaset where in certain situations the inquiry process could get exited. - Switched to using Array.push instead of concat for use cases of a lot of documents. - Fixed issue where re-authentication could loose the credentials if whole Replicaset disconnected at once. - Added peer optional dependencies support using require_optional module. 1.2.32 2016-01-12 ----------------- - Bumped bson to V0.4.21 to allow using minor optimizations. 1.2.31 2016-01-04 ----------------- - Allow connection to secondary if primaryPreferred or secondaryPreferred (Issue #70, https://github.com/leichter) 1.2.30 2015-12-23 ----------------- - Pool allocates size + 1 connections when using replicasets, reserving additional pool connection for monitoring exclusively. - Fixes bug when all replicaset members are down, that would cause it to fail to reconnect using the originally provided seedlist. 1.2.29 2015-12-17 ----------------- - Correctly emit close event when calling destroy on server topology. 1.2.28 2015-12-13 ----------------- - Backed out Prevent Maximum call stack exceeded by calling all callbacks on nextTick, (Issue #64, https://github.com/iamruinous) as it breaks node 0.10.x support. 1.2.27 2015-12-13 ----------------- - Added [options.checkServerIdentity=true] {boolean|function}. Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function, (Issue #29). - Prevent Maximum call stack exceeded by calling all callbacks on nextTick, (Issue #64, https://github.com/iamruinous). - State is not defined in mongos, (Issue #63, https://github.com/flyingfisher). - Fixed corner case issue on exhaust cursors on pre 3.0.x MongoDB. 1.2.26 2015-11-23 ----------------- - Converted test suite to use mongodb-topology-manager. - Upgraded bson library to V0.4.20. - Minor fixes for 3.2 readPreferences. 1.2.25 2015-11-23 ----------------- - Correctly error out when passed a seedlist of non-valid server members. 1.2.24 2015-11-20 ----------------- - Fix Automattic/mongoose#3481; flush callbacks on error, (Issue #57, https://github.com/vkarpov15). - $explain query for wire protocol 2.6 and 2.4 does not set number of returned documents to -1 but to 0. 1.2.23 2015-11-16 ----------------- - ismaster runs against admin.$cmd instead of system.$cmd. 1.2.22 2015-11-16 ----------------- - Fixes to handle getMore command errors for MongoDB 3.2 - Allows the process to properly close upon a Db.close() call on the replica set by shutting down the haTimer and closing arbiter connections. 1.2.21 2015-11-07 ----------------- - Hardened the checking for replicaset equality checks. - OpReplay flag correctly set on Wire protocol query. - Mongos load balancing added, introduced localThresholdMS to control the feature. - Kerberos now a peerDependency, making it not install it by default in Node 5.0 or higher. 1.2.20 2015-10-28 ----------------- - Fixed bug in arbiter connection capping code. - NODE-599 correctly handle arrays of server tags in order of priority. - Fix for 2.6 wire protocol handler related to readPreference handling. - Added maxAwaitTimeMS support for 3.2 getMore to allow for custom timeouts on tailable cursors. - Make CoreCursor check for $err before saying that 'next' succeeded (Issue #53, https://github.com/vkarpov15). 1.2.19 2015-10-15 ----------------- - Make batchSize always be > 0 for 3.2 wire protocol to make it work consistently with pre 3.2 servers. - Locked to bson 0.4.19. 1.2.18 2015-10-15 ----------------- - Minor 3.2 fix for handling readPreferences on sharded commands. - Minor fixes to correctly pass APM specification test suite. 1.2.17 2015-10-08 ----------------- - Connections to arbiters only maintain a single connection. 1.2.15 2015-10-06 ----------------- - Set slaveOk to true for getMore and killCursors commands. - Don't swallow callback errors for 2.4 single server (Issue #49, https://github.com/vkarpov15). - Apply toString('hex') to each buffer in an array when logging (Issue #48, https://github.com/nbrachet). 1.2.14 2015-09-28 ----------------- - NODE-547 only emit error if there are any listeners. - Fixed APM issue with issuing readConcern. 1.2.13 2015-09-18 ----------------- - Added BSON serializer ignoreUndefined option for insert/update/remove/command/cursor. 1.2.12 2015-09-08 ----------------- - NODE-541 Added initial support for readConcern. 1.2.11 2015-08-31 ----------------- - NODE-535 If connectWithNoPrimary is true then primary-only connection is not allowed. - NODE-534 Passive secondaries are not allowed for secondaryOnlyConnectionAllowed. - Fixed filtering bug for logging (Issue 30, https://github.com/christkv/mongodb-core/issues/30). 1.2.10 2015-08-14 ----------------- - Added missing Mongos.prototype.parserType function. 1.2.9 2015-08-05 ---------------- - NODE-525 Reset connectionTimeout after it's overwritten by tls.connect. - NODE-518 connectTimeoutMS is doubled in 2.0.39. 1.2.8 2015-07-24 ----------------- - Minor fix to handle 2.4.x errors better by correctly return driver layer issues. 1.2.7 2015-07-16 ----------------- - Refactoring to allow to tap into find/getmore/killcursor in cursors for APM monitoring in driver. 1.2.6 2015-07-14 ----------------- - NODE-505 Query fails to find records that have a 'result' property with an array value. 1.2.5 2015-07-14 ----------------- - NODE-492 correctly handle hanging replicaset monitoring connections when server is unavailable due to network partitions or firewalls dropping packets, configureable using the connectionTimeoutMS setting. 1.2.4 2015-07-07 ----------------- - NODE-493 staggering the socket connections to avoid overwhelming the mongod process. 1.2.3 2015-06-26 ----------------- - Minor bug fixes. 1.2.2 2015-06-22 ----------------- - Fix issue with SCRAM authentication causing authentication to return true on failed authentication (Issue 26, https://github.com/cglass17). 1.2.1 2015-06-17 ----------------- - Ensure serializeFunctions passed down correctly to wire protocol. 1.2.0 2015-06-17 ----------------- - Switching to using the 0.4.x pure JS serializer, removing dependency on C++ parser. - Refactoring wire protocol messages to avoid expensive size calculations of documents in favor of writing out an array of buffers to the sockets. - NODE-486 fixed issue related to limit and skip when calling toArray in 2.0 driver. - NODE-483 throw error if capabilities of topology is queries before topology has performed connection setup. - NODE-487 fixed issue where killcursor command was not being sent correctly on limit and skip queries. 1.1.33 2015-05-31 ----------------- - NODE-478 Work around authentication race condition in mongos authentication due to multi step authentication methods like SCRAM. 1.1.32 2015-05-20 ----------------- - After reconnect, it updates the allowable reconnect retries to the option settings (Issue #23, https://github.com/owenallenaz) 1.1.31 2015-05-19 ----------------- - Minor fixes for issues with re-authentication of mongos. 1.1.30 2015-05-18 ----------------- - Correctly emit 'all' event when primary + all secondaries have connected. 1.1.29 2015-05-17 ----------------- - NODE-464 Only use a single socket against arbiters and hidden servers. - Ensure we filter out hidden servers from any server queries. 1.1.28 2015-05-12 ----------------- - Fixed buffer compare for electionId for < node 12.0.2 1.1.27 2015-05-12 ----------------- - NODE-455 Update SDAM specification support to cover electionId and Mongos load balancing. 1.1.26 2015-05-06 ----------------- - NODE-456 Allow mongodb-core to pipeline commands (ex findAndModify+GLE) along the same connection and handle the returned results. - Fixes to make mongodb-core work for node 0.8.x when using scram and setImmediate. 1.1.25 2015-04-24 ----------------- - Handle lack of callback in crud operations when returning error on application closed. 1.1.24 2015-04-22 ----------------- - Error out when topology has been destroyed either by connection retries being exhausted or destroy called on topology. 1.1.23 2015-04-15 ----------------- - Standardizing mongoErrors and its API (Issue #14) - Creating a new connection is slow because of 100ms setTimeout() (Issue #17, https://github.com/vkarpov15) - remove mkdirp and rimraf dependencies (Issue #12) - Updated default value of param options.rejectUnauthorized to match documentation (Issue #16) - ISSUE: NODE-417 Resolution. Improving behavior of thrown errors (Issue #14, https://github.com/owenallenaz) - Fix cursor hanging when next() called on exhausted cursor (Issue #18, https://github.com/vkarpov15) 1.1.22 2015-04-10 ----------------- - Minor refactorings in cursor code to make extending the cursor simpler. - NODE-417 Resolution. Improving behavior of thrown errors using Error.captureStackTrace. 1.1.21 2015-03-26 ----------------- - Updated bson module to 0.3.0 that extracted the c++ parser into bson-ext and made it an optional dependency. 1.1.20 2015-03-24 ----------------- - NODE-395 Socket Not Closing, db.close called before full set finished initalizing leading to server connections in progress not being closed properly. 1.1.19 2015-03-21 ----------------- - Made kerberos module ~0.0 to allow for quicker releases due to io.js of kerberos module. 1.1.18 2015-03-17 ----------------- - Added support for minHeartbeatFrequencyMS on server reconnect according to the SDAM specification. 1.1.17 2015-03-16 ----------------- - NODE-377, fixed issue where tags would correctly be checked on secondary and nearest to filter out eligible server candidates. 1.1.16 2015-03-06 ----------------- - rejectUnauthorized parameter is set to true for ssl certificates by default instead of false. 1.1.15 2015-03-04 ----------------- - Removed check for type in replset pickserver function. 1.1.14 2015-02-26 ----------------- - NODE-374 correctly adding passive secondaries to the list of eligable servers for reads 1.1.13 2015-02-24 ----------------- - NODE-365 mongoDB native node.js driver infinite reconnect attempts (fixed issue around handling of retry attempts) 1.1.12 2015-02-16 ----------------- - Fixed cursor transforms for buffered document reads from cursor. 1.1.11 2015-02-02 ----------------- - Remove the required setName for replicaset connections, if not set it will pick the first setName returned. 1.1.10 2015-31-01 ----------------- - Added tranforms.doc option to cursor to allow for pr. document transformations. 1.1.9 2015-21-01 ---------------- - Updated BSON dependency to 0.2.18 to fix issues with io.js and node. - Updated Kerberos dependency to 0.0.8 to fix issues with io.js and node. - Don't treat findOne() as a command cursor. - Refactored out state changes into methods to simplify read the next method. 1.1.8 2015-09-12 ---------------- - Stripped out Object.defineProperty for performance reasons - Applied more performance optimizations. - properties cursorBatchSize, cursorSkip, cursorLimit are not methods setCursorBatchSize/cursorBatchSize, setCursorSkip/cursorSkip, setCursorLimit/cursorLimit 1.1.7 2014-18-12 ---------------- - Use ns variable for getMore commands for command cursors to work properly with cursor version of listCollections and listIndexes. 1.1.6 2014-18-12 ---------------- - Server manager fixed to support 2.2.X servers for travis test matrix. 1.1.5 2014-17-12 ---------------- - Fall back to errmsg when creating MongoError for command errors 1.1.4 2014-17-12 ---------------- - Added transform method support for cursor (initially just for initial query results) to support listCollections/listIndexes in 2.8. - Fixed variable leak in scram. - Fixed server manager to deal better with killing processes. - Bumped bson to 0.2.16. 1.1.3 2014-01-12 ---------------- - Fixed error handling issue with nonce generation in mongocr. - Fixed issues with restarting servers when using ssl. - Using strict for all classes. - Cleaned up any escaping global variables. 1.1.2 2014-20-11 ---------------- - Correctly encoding UTF8 collection names on wire protocol messages. - Added emitClose parameter to topology destroy methods to allow users to specify that they wish the topology to emit the close event to any listeners. 1.1.1 2014-14-11 ---------------- - Refactored code to use prototype instead of privileged methods. - Fixed issue with auth where a runtime condition could leave replicaset members without proper authentication. - Several deopt optimizations for v8 to improve performance and reduce GC pauses. 1.0.5 2014-29-10 ---------------- - Fixed issue with wrong namespace being created for command cursors. 1.0.4 2014-24-10 ---------------- - switched from using shift for the cursor due to bad slowdown on big batchSizes as shift causes entire array to be copied on each call. 1.0.3 2014-21-10 ---------------- - fixed error issuing problem on cursor.next when iterating over a huge dataset with a very small batchSize. 1.0.2 2014-07-10 ---------------- - fullsetup is now defined as a primary and secondary being available allowing for all read preferences to be satisfied. - fixed issue with replset_state logging. 1.0.1 2014-07-10 ---------------- - Dependency issue solved 1.0.0 2014-07-10 ---------------- - Initial release of mongodb-core package/index.js000644 0000003533 3560116604 010731 0ustar00000000 000000 'use strict'; var BSON = require('bson'); var require_optional = require('require_optional'); const EJSON = require('./lib/utils').retrieveEJSON(); try { // Attempt to grab the native BSON parser var BSONNative = require_optional('bson-ext'); // If we got the native parser, use it instead of the // Javascript one if (BSONNative) { BSON = BSONNative; } } catch (err) {} // eslint-disable-line module.exports = { // Errors MongoError: require('./lib/error').MongoError, MongoNetworkError: require('./lib/error').MongoNetworkError, MongoParseError: require('./lib/error').MongoParseError, MongoTimeoutError: require('./lib/error').MongoTimeoutError, MongoWriteConcernError: require('./lib/error').MongoWriteConcernError, mongoErrorContextSymbol: require('./lib/error').mongoErrorContextSymbol, // Core Connection: require('./lib/connection/connection'), Server: require('./lib/topologies/server'), ReplSet: require('./lib/topologies/replset'), Mongos: require('./lib/topologies/mongos'), Logger: require('./lib/connection/logger'), Cursor: require('./lib/cursor'), ReadPreference: require('./lib/topologies/read_preference'), Sessions: require('./lib/sessions'), BSON: BSON, EJSON: EJSON, Topology: require('./lib/sdam/topology'), // Raw operations Query: require('./lib/connection/commands').Query, // Auth mechanisms MongoCredentials: require('./lib/auth/mongo_credentials').MongoCredentials, defaultAuthProviders: require('./lib/auth/defaultAuthProviders').defaultAuthProviders, MongoCR: require('./lib/auth/mongocr'), X509: require('./lib/auth/x509'), Plain: require('./lib/auth/plain'), GSSAPI: require('./lib/auth/gssapi'), ScramSHA1: require('./lib/auth/scram').ScramSHA1, ScramSHA256: require('./lib/auth/scram').ScramSHA256, // Utilities parseConnectionString: require('./lib/uri_parser') }; package/LICENSE000644 0000026073 3560116604 010275 0ustar00000000 000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.package/README.md000644 0000020421 3560116604 010536 0ustar00000000 000000 [![Build Status](https://secure.travis-ci.org/mongodb-js/mongodb-core.png)](http://travis-ci.org/mongodb-js/mongodb-core) [![Coverage Status](https://coveralls.io/repos/github/mongodb-js/mongodb-core/badge.svg?branch=1.3)](https://coveralls.io/github/mongodb-js/mongodb-core?branch=1.3) # Description The MongoDB Core driver is the low level part of the 2.0 or higher MongoDB driver and is meant for library developers not end users. It does not contain any abstractions or helpers outside of the basic management of MongoDB topology connections, CRUD operations and authentication. ## MongoDB Node.JS Core Driver | what | where | |---------------|------------------------------------------------| | documentation | http://mongodb.github.io/node-mongodb-native/ | | apidoc | http://mongodb.github.io/node-mongodb-native/ | | source | https://github.com/mongodb-js/mongodb-core | | mongodb | http://www.mongodb.org/ | ### Blogs of Engineers involved in the driver - Christian Kvalheim [@christkv](https://twitter.com/christkv) ### Bugs / Feature Requests Think you’ve found a bug? Want to see a new feature in node-mongodb-native? Please open a case in our issue management tool, JIRA: - Create an account and login . - Navigate to the NODE project . - Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it. Bug reports in JIRA for all driver projects (i.e. NODE, PYTHON, CSHARP, JAVA) and the Core Server (i.e. SERVER) project are **public**. ### Questions and Bug Reports * mailing list: https://groups.google.com/forum/#!forum/node-mongodb-native * jira: http://jira.mongodb.org/ ### Change Log http://jira.mongodb.org/browse/NODE # QuickStart The quick start guide will show you how to set up a simple application using Core driver and MongoDB. It scope is only how to set up the driver and perform the simple crud operations. For more inn depth coverage we encourage reading the tutorials. ## Create the package.json file Let's create a directory where our application will live. In our case we will put this under our projects directory. ``` mkdir myproject cd myproject ``` Create a **package.json** using your favorite text editor and fill it in. ```json { "name": "myproject", "version": "1.0.0", "description": "My first project", "main": "index.js", "repository": { "type": "git", "url": "git://github.com/christkv/myfirstproject.git" }, "dependencies": { "mongodb-core": "~1.0" }, "author": "Christian Kvalheim", "license": "Apache 2.0", "bugs": { "url": "https://github.com/christkv/myfirstproject/issues" }, "homepage": "https://github.com/christkv/myfirstproject" } ``` Save the file and return to the shell or command prompt and use **NPM** to install all the dependencies. ``` npm install ``` You should see **NPM** download a lot of files. Once it's done you'll find all the downloaded packages under the **node_modules** directory. Booting up a MongoDB Server --------------------------- Let's boot up a MongoDB server instance. Download the right MongoDB version from [MongoDB](http://www.mongodb.org), open a new shell or command line and ensure the **mongod** command is in the shell or command line path. Now let's create a database directory (in our case under **/data**). ``` mongod --dbpath=/data --port 27017 ``` You should see the **mongod** process start up and print some status information. ## Connecting to MongoDB Let's create a new **app.js** file that we will use to show the basic CRUD operations using the MongoDB driver. First let's add code to connect to the server. Notice that there is no concept of a database here and we use the topology directly to perform the connection. ```js var Server = require('mongodb-core').Server , assert = require('assert'); // Set up server connection var server = new Server({ host: 'localhost' , port: 27017 , reconnect: true , reconnectInterval: 50 }); // Add event listeners server.on('connect', function(_server) { console.log('connected'); test.done(); }); server.on('close', function() { console.log('closed'); }); server.on('reconnect', function() { console.log('reconnect'); }); // Start connection server.connect(); ``` To connect to a replicaset we would use the `ReplSet` class and for a set of Mongos proxies we use the `Mongos` class. Each topology class offer the same CRUD operations and you operate on the topology directly. Let's look at an example exercising all the different available CRUD operations. ```js var Server = require('mongodb-core').Server , assert = require('assert'); // Set up server connection var server = new Server({ host: 'localhost' , port: 27017 , reconnect: true , reconnectInterval: 50 }); // Add event listeners server.on('connect', function(_server) { console.log('connected'); // Execute the ismaster command _server.command('system.$cmd', {ismaster: true}, function(err, result) { // Perform a document insert _server.insert('myproject.inserts1', [{a:1}, {a:2}], { writeConcern: {w:1}, ordered:true }, function(err, results) { assert.equal(null, err); assert.equal(2, results.result.n); // Perform a document update _server.update('myproject.inserts1', [{ q: {a: 1}, u: {'$set': {b:1}} }], { writeConcern: {w:1}, ordered:true }, function(err, results) { assert.equal(null, err); assert.equal(1, results.result.n); // Remove a document _server.remove('myproject.inserts1', [{ q: {a: 1}, limit: 1 }], { writeConcern: {w:1}, ordered:true }, function(err, results) { assert.equal(null, err); assert.equal(1, results.result.n); // Get a document var cursor = _server.cursor('integration_tests.inserts_example4', { find: 'integration_tests.example4' , query: {a:1} }); // Get the first document cursor.next(function(err, doc) { assert.equal(null, err); assert.equal(2, doc.a); // Execute the ismaster command _server.command("system.$cmd" , {ismaster: true}, function(err, result) { assert.equal(null, err) _server.destroy(); }); }); }); }); test.done(); }); }); server.on('close', function() { console.log('closed'); }); server.on('reconnect', function() { console.log('reconnect'); }); // Start connection server.connect(); ``` The core driver does not contain any helpers or abstractions only the core crud operations. These consist of the following commands. * `insert`, Insert takes an array of 1 or more documents to be inserted against the topology and allows you to specify a write concern and if you wish to execute the inserts in order or out of order. * `update`, Update takes an array of 1 or more update commands to be executed against the server topology and also allows you to specify a write concern and if you wish to execute the updates in order or out of order. * `remove`, Remove takes an array of 1 or more remove commands to be executed against the server topology and also allows you to specify a write concern and if you wish to execute the removes in order or out of order. * `cursor`, Returns you a cursor for either the 'virtual' `find` command, a command that returns a cursor id or a plain cursor id. Read the cursor tutorial for more inn depth coverage. * `command`, Executes a command against MongoDB and returns the result. * `auth`, Authenticates the current topology using a supported authentication scheme. The Core Driver is a building block for library builders and is not meant for usage by end users as it lacks a lot of features the end user might need such as automatic buffering of operations when a primary is changing in a replicaset or the db and collections abstraction. ## Next steps The next step is to get more in depth information about how the different aspects of the core driver works and how to leverage them to extend the functionality of the cursors. Please view the tutorials for more detailed information. package/lib/auth/auth_provider.js000644 0000011351 3560116604 014201 0ustar00000000 000000 'use strict'; const MongoError = require('../error').MongoError; /** * Creates a new AuthProvider, which dictates how to authenticate for a given * mechanism. * @class */ class AuthProvider { constructor(bson) { this.bson = bson; this.authStore = []; } /** * Authenticate * @method * @param {SendAuthCommand} sendAuthCommand Writes an auth command directly to a specific connection * @param {Connection[]} connections Connections to authenticate using this authenticator * @param {MongoCredentials} credentials Authentication credentials * @param {authResultCallback} callback The callback to return the result from the authentication */ auth(sendAuthCommand, connections, credentials, callback) { // Total connections let count = connections.length; if (count === 0) { callback(null, null); return; } // Valid connections let numberOfValidConnections = 0; let errorObject = null; const execute = connection => { this._authenticateSingleConnection(sendAuthCommand, connection, credentials, (err, r) => { // Adjust count count = count - 1; // If we have an error if (err) { errorObject = new MongoError(err); } else if (r && (r.$err || r.errmsg)) { errorObject = new MongoError(r); } else { numberOfValidConnections = numberOfValidConnections + 1; } // Still authenticating against other connections. if (count !== 0) { return; } // We have authenticated all connections if (numberOfValidConnections > 0) { // Store the auth details this.addCredentials(credentials); // Return correct authentication callback(null, true); } else { if (errorObject == null) { errorObject = new MongoError(`failed to authenticate using ${credentials.mechanism}`); } callback(errorObject, false); } }); }; const executeInNextTick = _connection => process.nextTick(() => execute(_connection)); // For each connection we need to authenticate while (connections.length > 0) { executeInNextTick(connections.shift()); } } /** * Implementation of a single connection authenticating. Is meant to be overridden. * Will error if called directly * @ignore */ _authenticateSingleConnection(/*sendAuthCommand, connection, credentials, callback*/) { throw new Error('_authenticateSingleConnection must be overridden'); } /** * Adds credentials to store only if it does not exist * @param {MongoCredentials} credentials credentials to add to store */ addCredentials(credentials) { const found = this.authStore.some(cred => cred.equals(credentials)); if (!found) { this.authStore.push(credentials); } } /** * Re authenticate pool * @method * @param {SendAuthCommand} sendAuthCommand Writes an auth command directly to a specific connection * @param {Connection[]} connections Connections to authenticate using this authenticator * @param {authResultCallback} callback The callback to return the result from the authentication */ reauthenticate(sendAuthCommand, connections, callback) { const authStore = this.authStore.slice(0); let count = authStore.length; if (count === 0) { return callback(null, null); } for (let i = 0; i < authStore.length; i++) { this.auth(sendAuthCommand, connections, authStore[i], function(err) { count = count - 1; if (count === 0) { callback(err, null); } }); } } /** * Remove credentials that have been previously stored in the auth provider * @method * @param {string} source Name of database we are removing authStore details about * @return {object} */ logout(source) { this.authStore = this.authStore.filter(credentials => credentials.source !== source); } } /** * A function that writes authentication commands to a specific connection * @callback SendAuthCommand * @param {Connection} connection The connection to write to * @param {Command} command A command with a toBin method that can be written to a connection * @param {AuthWriteCallback} callback Callback called when command response is received */ /** * A callback for a specific auth command * @callback AuthWriteCallback * @param {Error} err If command failed, an error from the server * @param {object} r The response from the server */ /** * This is a result from an authentication strategy * * @callback authResultCallback * @param {error} error An error object. Set to null if no error present * @param {boolean} result The result of the authentication process */ module.exports = { AuthProvider }; package/lib/auth/defaultAuthProviders.js000644 0000001402 3560116604 015466 0ustar00000000 000000 'use strict'; const MongoCR = require('./mongocr'); const X509 = require('./x509'); const Plain = require('./plain'); const GSSAPI = require('./gssapi'); const SSPI = require('./sspi'); const ScramSHA1 = require('./scram').ScramSHA1; const ScramSHA256 = require('./scram').ScramSHA256; /** * Returns the default authentication providers. * * @param {BSON} bson Bson definition * @returns {Object} a mapping of auth names to auth types */ function defaultAuthProviders(bson) { return { mongocr: new MongoCR(bson), x509: new X509(bson), plain: new Plain(bson), gssapi: new GSSAPI(bson), sspi: new SSPI(bson), 'scram-sha-1': new ScramSHA1(bson), 'scram-sha-256': new ScramSHA256(bson) }; } module.exports = { defaultAuthProviders }; package/lib/auth/gssapi.js000644 0000011764 3560116604 012624 0ustar00000000 000000 'use strict'; const AuthProvider = require('./auth_provider').AuthProvider; const retrieveKerberos = require('../utils').retrieveKerberos; let kerberos; /** * Creates a new GSSAPI authentication mechanism * @class * @extends AuthProvider */ class GSSAPI extends AuthProvider { /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { const source = credentials.source; const username = credentials.username; const password = credentials.password; const mechanismProperties = credentials.mechanismProperties; const gssapiServiceName = mechanismProperties['gssapiservicename'] || mechanismProperties['gssapiServiceName'] || 'mongodb'; GSSAPIInitialize( this, kerberos.processes.MongoAuthProcess, source, username, password, source, gssapiServiceName, sendAuthCommand, connection, mechanismProperties, callback ); } /** * Authenticate * @override * @method */ auth(sendAuthCommand, connections, credentials, callback) { if (kerberos == null) { try { kerberos = retrieveKerberos(); } catch (e) { return callback(e, null); } } super.auth(sendAuthCommand, connections, credentials, callback); } } // // Initialize step var GSSAPIInitialize = function( self, MongoAuthProcess, db, username, password, authdb, gssapiServiceName, sendAuthCommand, connection, options, callback ) { // Create authenticator var mongo_auth_process = new MongoAuthProcess( connection.host, connection.port, gssapiServiceName, options ); // Perform initialization mongo_auth_process.init(username, password, function(err) { if (err) return callback(err, false); // Perform the first step mongo_auth_process.transition('', function(err, payload) { if (err) return callback(err, false); // Call the next db step MongoDBGSSAPIFirstStep( self, mongo_auth_process, payload, db, username, password, authdb, sendAuthCommand, connection, callback ); }); }); }; // // Perform first step against mongodb var MongoDBGSSAPIFirstStep = function( self, mongo_auth_process, payload, db, username, password, authdb, sendAuthCommand, connection, callback ) { // Build the sasl start command var command = { saslStart: 1, mechanism: 'GSSAPI', payload: payload, autoAuthorize: 1 }; // Write the commmand on the connection sendAuthCommand(connection, '$external.$cmd', command, (err, doc) => { if (err) return callback(err, false); // Execute mongodb transition mongo_auth_process.transition(doc.payload, function(err, payload) { if (err) return callback(err, false); // MongoDB API Second Step MongoDBGSSAPISecondStep( self, mongo_auth_process, payload, doc, db, username, password, authdb, sendAuthCommand, connection, callback ); }); }); }; // // Perform first step against mongodb var MongoDBGSSAPISecondStep = function( self, mongo_auth_process, payload, doc, db, username, password, authdb, sendAuthCommand, connection, callback ) { // Build Authentication command to send to MongoDB var command = { saslContinue: 1, conversationId: doc.conversationId, payload: payload }; // Execute the command // Write the commmand on the connection sendAuthCommand(connection, '$external.$cmd', command, (err, doc) => { if (err) return callback(err, false); // Call next transition for kerberos mongo_auth_process.transition(doc.payload, function(err, payload) { if (err) return callback(err, false); // Call the last and third step MongoDBGSSAPIThirdStep( self, mongo_auth_process, payload, doc, db, username, password, authdb, sendAuthCommand, connection, callback ); }); }); }; var MongoDBGSSAPIThirdStep = function( self, mongo_auth_process, payload, doc, db, username, password, authdb, sendAuthCommand, connection, callback ) { // Build final command var command = { saslContinue: 1, conversationId: doc.conversationId, payload: payload }; // Execute the command sendAuthCommand(connection, '$external.$cmd', command, (err, r) => { if (err) return callback(err, false); mongo_auth_process.transition(null, function(err) { if (err) return callback(err, null); callback(null, r); }); }); }; /** * This is a result from a authentication strategy * * @callback authResultCallback * @param {error} error An error object. Set to null if no error present * @param {boolean} result The result of the authentication process */ module.exports = GSSAPI; package/lib/auth/mongo_credentials.js000644 0000005603 3560116604 015025 0ustar00000000 000000 'use strict'; // Resolves the default auth mechanism according to // https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst function getDefaultAuthMechanism(ismaster) { if (ismaster) { // If ismaster contains saslSupportedMechs, use scram-sha-256 // if it is available, else scram-sha-1 if (Array.isArray(ismaster.saslSupportedMechs)) { return ismaster.saslSupportedMechs.indexOf('SCRAM-SHA-256') >= 0 ? 'scram-sha-256' : 'scram-sha-1'; } // Fallback to legacy selection method. If wire version >= 3, use scram-sha-1 if (ismaster.maxWireVersion >= 3) { return 'scram-sha-1'; } } // Default for wireprotocol < 3 return 'mongocr'; } /** * A representation of the credentials used by MongoDB * @class * @property {string} mechanism The method used to authenticate * @property {string} [username] The username used for authentication * @property {string} [password] The password used for authentication * @property {string} [source] The database that the user should authenticate against * @property {object} [mechanismProperties] Special properties used by some types of auth mechanisms */ class MongoCredentials { /** * Creates a new MongoCredentials object * @param {object} [options] * @param {string} [options.username] The username used for authentication * @param {string} [options.password] The password used for authentication * @param {string} [options.source] The database that the user should authenticate against * @param {string} [options.mechanism] The method used to authenticate * @param {object} [options.mechanismProperties] Special properties used by some types of auth mechanisms */ constructor(options) { options = options || {}; this.username = options.username; this.password = options.password; this.source = options.source || options.db; this.mechanism = options.mechanism || 'default'; this.mechanismProperties = options.mechanismProperties; } /** * Determines if two MongoCredentials objects are equivalent * @param {MongoCredentials} other another MongoCredentials object * @returns {boolean} true if the two objects are equal. */ equals(other) { return ( this.mechanism === other.mechanism && this.username === other.username && this.password === other.password && this.source === other.source ); } /** * If the authentication mechanism is set to "default", resolves the authMechanism * based on the server version and server supported sasl mechanisms. * * @param {Object} [ismaster] An ismaster response from the server */ resolveAuthMechanism(ismaster) { // If the mechanism is not "default", then it does not need to be resolved if (this.mechanism.toLowerCase() === 'default') { this.mechanism = getDefaultAuthMechanism(ismaster); } } } module.exports = { MongoCredentials }; package/lib/auth/mongocr.js000644 0000002546 3560116604 013000 0ustar00000000 000000 'use strict'; const crypto = require('crypto'); const AuthProvider = require('./auth_provider').AuthProvider; /** * Creates a new MongoCR authentication mechanism * * @extends AuthProvider */ class MongoCR extends AuthProvider { /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { const username = credentials.username; const password = credentials.password; const source = credentials.source; sendAuthCommand(connection, `${source}.$cmd`, { getnonce: 1 }, (err, r) => { let nonce = null; let key = null; // Get nonce if (err == null) { nonce = r.nonce; // Use node md5 generator let md5 = crypto.createHash('md5'); // Generate keys used for authentication md5.update(username + ':mongo:' + password, 'utf8'); const hash_password = md5.digest('hex'); // Final key md5 = crypto.createHash('md5'); md5.update(nonce + username + hash_password, 'utf8'); key = md5.digest('hex'); } const authenticateCommand = { authenticate: 1, user: username, nonce, key }; sendAuthCommand(connection, `${source}.$cmd`, authenticateCommand, callback); }); } } module.exports = MongoCR; package/lib/auth/plain.js000644 0000001644 3560116604 012435 0ustar00000000 000000 'use strict'; const retrieveBSON = require('../connection/utils').retrieveBSON; const AuthProvider = require('./auth_provider').AuthProvider; // TODO: can we get the Binary type from this.bson instead? const BSON = retrieveBSON(); const Binary = BSON.Binary; /** * Creates a new Plain authentication mechanism * * @extends AuthProvider */ class Plain extends AuthProvider { /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { const username = credentials.username; const password = credentials.password; const payload = new Binary(`\x00${username}\x00${password}`); const command = { saslStart: 1, mechanism: 'PLAIN', payload: payload, autoAuthorize: 1 }; sendAuthCommand(connection, '$external.$cmd', command, callback); } } module.exports = Plain; package/lib/auth/scram.js000644 0000016344 3560116604 012442 0ustar00000000 000000 'use strict'; const crypto = require('crypto'); const Buffer = require('safe-buffer').Buffer; const retrieveBSON = require('../connection/utils').retrieveBSON; const MongoError = require('../error').MongoError; const AuthProvider = require('./auth_provider').AuthProvider; const BSON = retrieveBSON(); const Binary = BSON.Binary; let saslprep; try { saslprep = require('saslprep'); } catch (e) { // don't do anything; } var parsePayload = function(payload) { var dict = {}; var parts = payload.split(','); for (var i = 0; i < parts.length; i++) { var valueParts = parts[i].split('='); dict[valueParts[0]] = valueParts[1]; } return dict; }; var passwordDigest = function(username, password) { if (typeof username !== 'string') throw new MongoError('username must be a string'); if (typeof password !== 'string') throw new MongoError('password must be a string'); if (password.length === 0) throw new MongoError('password cannot be empty'); // Use node md5 generator var md5 = crypto.createHash('md5'); // Generate keys used for authentication md5.update(username + ':mongo:' + password, 'utf8'); return md5.digest('hex'); }; // XOR two buffers function xor(a, b) { if (!Buffer.isBuffer(a)) a = Buffer.from(a); if (!Buffer.isBuffer(b)) b = Buffer.from(b); const length = Math.max(a.length, b.length); const res = []; for (let i = 0; i < length; i += 1) { res.push(a[i] ^ b[i]); } return Buffer.from(res).toString('base64'); } function H(method, text) { return crypto .createHash(method) .update(text) .digest(); } function HMAC(method, key, text) { return crypto .createHmac(method, key) .update(text) .digest(); } var _hiCache = {}; var _hiCacheCount = 0; var _hiCachePurge = function() { _hiCache = {}; _hiCacheCount = 0; }; const hiLengthMap = { sha256: 32, sha1: 20 }; function HI(data, salt, iterations, cryptoMethod) { // omit the work if already generated const key = [data, salt.toString('base64'), iterations].join('_'); if (_hiCache[key] !== undefined) { return _hiCache[key]; } // generate the salt const saltedData = crypto.pbkdf2Sync( data, salt, iterations, hiLengthMap[cryptoMethod], cryptoMethod ); // cache a copy to speed up the next lookup, but prevent unbounded cache growth if (_hiCacheCount >= 200) { _hiCachePurge(); } _hiCache[key] = saltedData; _hiCacheCount += 1; return saltedData; } /** * Creates a new ScramSHA authentication mechanism * @class * @extends AuthProvider */ class ScramSHA extends AuthProvider { constructor(bson, cryptoMethod) { super(bson); this.cryptoMethod = cryptoMethod || 'sha1'; } static _getError(err, r) { if (err) { return err; } if (r.$err || r.errmsg) { return new MongoError(r); } } /** * @ignore */ _executeScram(sendAuthCommand, connection, credentials, nonce, callback) { let username = credentials.username; const password = credentials.password; const db = credentials.source; const cryptoMethod = this.cryptoMethod; let mechanism = 'SCRAM-SHA-1'; let processedPassword; if (cryptoMethod === 'sha256') { mechanism = 'SCRAM-SHA-256'; processedPassword = saslprep ? saslprep(password) : password; } else { try { processedPassword = passwordDigest(username, password); } catch (e) { return callback(e); } } // Clean up the user username = username.replace('=', '=3D').replace(',', '=2C'); // NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8. // Since the username is not sasl-prep-d, we need to do this here. const firstBare = Buffer.concat([ Buffer.from('n=', 'utf8'), Buffer.from(username, 'utf8'), Buffer.from(',r=', 'utf8'), Buffer.from(nonce, 'utf8') ]); // Build command structure const saslStartCmd = { saslStart: 1, mechanism, payload: new Binary(Buffer.concat([Buffer.from('n,,', 'utf8'), firstBare])), autoAuthorize: 1 }; // Write the commmand on the connection sendAuthCommand(connection, `${db}.$cmd`, saslStartCmd, (err, r) => { let tmpError = ScramSHA._getError(err, r); if (tmpError) { return callback(tmpError, null); } const payload = Buffer.isBuffer(r.payload) ? new Binary(r.payload) : r.payload; const dict = parsePayload(payload.value()); const iterations = parseInt(dict.i, 10); const salt = dict.s; const rnonce = dict.r; // Set up start of proof const withoutProof = `c=biws,r=${rnonce}`; const saltedPassword = HI( processedPassword, Buffer.from(salt, 'base64'), iterations, cryptoMethod ); if (iterations && iterations < 4096) { const error = new MongoError(`Server returned an invalid iteration count ${iterations}`); return callback(error, false); } const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key'); const storedKey = H(cryptoMethod, clientKey); const authMessage = [firstBare, payload.value().toString('base64'), withoutProof].join(','); const clientSignature = HMAC(cryptoMethod, storedKey, authMessage); const clientProof = `p=${xor(clientKey, clientSignature)}`; const clientFinal = [withoutProof, clientProof].join(','); const saslContinueCmd = { saslContinue: 1, conversationId: r.conversationId, payload: new Binary(Buffer.from(clientFinal)) }; sendAuthCommand(connection, `${db}.$cmd`, saslContinueCmd, (err, r) => { if (!r || r.done !== false) { return callback(err, r); } const retrySaslContinueCmd = { saslContinue: 1, conversationId: r.conversationId, payload: Buffer.alloc(0) }; sendAuthCommand(connection, `${db}.$cmd`, retrySaslContinueCmd, callback); }); }); } /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { // Create a random nonce crypto.randomBytes(24, (err, buff) => { if (err) { return callback(err, null); } return this._executeScram( sendAuthCommand, connection, credentials, buff.toString('base64'), callback ); }); } /** * Authenticate * @override * @method */ auth(sendAuthCommand, connections, credentials, callback) { this._checkSaslprep(); super.auth(sendAuthCommand, connections, credentials, callback); } _checkSaslprep() { const cryptoMethod = this.cryptoMethod; if (cryptoMethod === 'sha256') { if (!saslprep) { console.warn('Warning: no saslprep library specified. Passwords will not be sanitized'); } } } } /** * Creates a new ScramSHA1 authentication mechanism * @class * @extends ScramSHA */ class ScramSHA1 extends ScramSHA { constructor(bson) { super(bson, 'sha1'); } } /** * Creates a new ScramSHA256 authentication mechanism * @class * @extends ScramSHA */ class ScramSHA256 extends ScramSHA { constructor(bson) { super(bson, 'sha256'); } } module.exports = { ScramSHA1, ScramSHA256 }; package/lib/auth/sspi.js000644 0000006157 3560116604 012314 0ustar00000000 000000 'use strict'; const AuthProvider = require('./auth_provider').AuthProvider; const retrieveKerberos = require('../utils').retrieveKerberos; let kerberos; /** * Creates a new SSPI authentication mechanism * @class * @extends AuthProvider */ class SSPI extends AuthProvider { /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { // TODO: Destructure this const username = credentials.username; const password = credentials.password; const mechanismProperties = credentials.mechanismProperties; const gssapiServiceName = mechanismProperties['gssapiservicename'] || mechanismProperties['gssapiServiceName'] || 'mongodb'; SSIPAuthenticate( this, kerberos.processes.MongoAuthProcess, username, password, gssapiServiceName, sendAuthCommand, connection, mechanismProperties, callback ); } /** * Authenticate * @override * @method */ auth(sendAuthCommand, connections, credentials, callback) { if (kerberos == null) { try { kerberos = retrieveKerberos(); } catch (e) { return callback(e, null); } } super.auth(sendAuthCommand, connections, credentials, callback); } } function SSIPAuthenticate( self, MongoAuthProcess, username, password, gssapiServiceName, sendAuthCommand, connection, options, callback ) { const authProcess = new MongoAuthProcess( connection.host, connection.port, gssapiServiceName, options ); function authCommand(command, authCb) { sendAuthCommand(connection, '$external.$cmd', command, authCb); } authProcess.init(username, password, err => { if (err) return callback(err, false); authProcess.transition('', (err, payload) => { if (err) return callback(err, false); const command = { saslStart: 1, mechanism: 'GSSAPI', payload, autoAuthorize: 1 }; authCommand(command, (err, doc) => { if (err) return callback(err, false); authProcess.transition(doc.payload, (err, payload) => { if (err) return callback(err, false); const command = { saslContinue: 1, conversationId: doc.conversationId, payload }; authCommand(command, (err, doc) => { if (err) return callback(err, false); authProcess.transition(doc.payload, (err, payload) => { if (err) return callback(err, false); const command = { saslContinue: 1, conversationId: doc.conversationId, payload }; authCommand(command, (err, response) => { if (err) return callback(err, false); authProcess.transition(null, err => { if (err) return callback(err, null); callback(null, response); }); }); }); }); }); }); }); }); } module.exports = SSPI; package/lib/auth/x509.js000644 0000001206 3560116604 012031 0ustar00000000 000000 'use strict'; const AuthProvider = require('./auth_provider').AuthProvider; /** * Creates a new X509 authentication mechanism * @class * @extends AuthProvider */ class X509 extends AuthProvider { /** * Implementation of authentication for a single connection * @override */ _authenticateSingleConnection(sendAuthCommand, connection, credentials, callback) { const username = credentials.username; const command = { authenticate: 1, mechanism: 'MONGODB-X509' }; if (username) { command.user = username; } sendAuthCommand(connection, '$external.$cmd', command, callback); } } module.exports = X509; package/lib/connection/apm.js000644 0000014253 3560116604 013305 0ustar00000000 000000 'use strict'; const Msg = require('../connection/msg').Msg; const KillCursor = require('../connection/commands').KillCursor; const GetMore = require('../connection/commands').GetMore; const calculateDurationInMs = require('../utils').calculateDurationInMs; /** Commands that we want to redact because of the sensitive nature of their contents */ const SENSITIVE_COMMANDS = new Set([ 'authenticate', 'saslStart', 'saslContinue', 'getnonce', 'createUser', 'updateUser', 'copydbgetnonce', 'copydbsaslstart', 'copydb' ]); // helper methods const extractCommandName = commandDoc => Object.keys(commandDoc)[0]; const namespace = command => command.ns; const databaseName = command => command.ns.split('.')[0]; const collectionName = command => command.ns.split('.')[1]; const generateConnectionId = pool => `${pool.options.host}:${pool.options.port}`; const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result); const LEGACY_FIND_QUERY_MAP = { $query: 'filter', $orderby: 'sort', $hint: 'hint', $comment: 'comment', $maxScan: 'maxScan', $max: 'max', $min: 'min', $returnKey: 'returnKey', $showDiskLoc: 'showRecordId', $maxTimeMS: 'maxTimeMS', $snapshot: 'snapshot' }; const LEGACY_FIND_OPTIONS_MAP = { numberToSkip: 'skip', numberToReturn: 'batchSize', returnFieldsSelector: 'projection' }; const OP_QUERY_KEYS = [ 'tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial', 'exhaust' ]; /** * Extract the actual command from the query, possibly upconverting if it's a legacy * format * * @param {Object} command the command */ const extractCommand = command => { if (command instanceof GetMore) { return { getMore: command.cursorId, collection: collectionName(command), batchSize: command.numberToReturn }; } if (command instanceof KillCursor) { return { killCursors: collectionName(command), cursors: command.cursorIds }; } if (command instanceof Msg) { return command.command; } if (command.query && command.query.$query) { let result; if (command.ns === 'admin.$cmd') { // upconvert legacy command result = Object.assign({}, command.query.$query); } else { // upconvert legacy find command result = { find: collectionName(command) }; Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => { if (typeof command.query[key] !== 'undefined') result[LEGACY_FIND_QUERY_MAP[key]] = command.query[key]; }); } Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => { if (typeof command[key] !== 'undefined') result[LEGACY_FIND_OPTIONS_MAP[key]] = command[key]; }); OP_QUERY_KEYS.forEach(key => { if (command[key]) result[key] = command[key]; }); if (typeof command.pre32Limit !== 'undefined') { result.limit = command.pre32Limit; } if (command.query.$explain) { return { explain: result }; } return result; } return command.query ? command.query : command; }; const extractReply = (command, reply) => { if (command instanceof GetMore) { return { ok: 1, cursor: { id: reply.message.cursorId, ns: namespace(command), nextBatch: reply.message.documents } }; } if (command instanceof KillCursor) { return { ok: 1, cursorsUnknown: command.cursorIds }; } // is this a legacy find command? if (command.query && typeof command.query.$query !== 'undefined') { return { ok: 1, cursor: { id: reply.message.cursorId, ns: namespace(command), firstBatch: reply.message.documents } }; } // in the event of a `noResponse` command, just return if (reply === null) return reply; return reply.result; }; /** An event indicating the start of a given command */ class CommandStartedEvent { /** * Create a started event * * @param {Pool} pool the pool that originated the command * @param {Object} command the command */ constructor(pool, command) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); // NOTE: remove in major revision, this is not spec behavior if (SENSITIVE_COMMANDS.has(commandName)) { this.commandObj = {}; this.commandObj[commandName] = true; } Object.assign(this, { command: cmd, databaseName: databaseName(command), commandName, requestId: command.requestId, connectionId: generateConnectionId(pool) }); } } /** An event indicating the success of a given command */ class CommandSucceededEvent { /** * Create a succeeded event * * @param {Pool} pool the pool that originated the command * @param {Object} command the command * @param {Object} reply the reply for this command from the server * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration */ constructor(pool, command, reply, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); Object.assign(this, { duration: calculateDurationInMs(started), commandName, reply: maybeRedact(commandName, extractReply(command, reply)), requestId: command.requestId, connectionId: generateConnectionId(pool) }); } } /** An event indicating the failure of a given command */ class CommandFailedEvent { /** * Create a failure event * * @param {Pool} pool the pool that originated the command * @param {Object} command the command * @param {MongoError|Object} error the generated error or a server error response * @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration */ constructor(pool, command, error, started) { const cmd = extractCommand(command); const commandName = extractCommandName(cmd); Object.assign(this, { duration: calculateDurationInMs(started), commandName, failure: maybeRedact(commandName, error), requestId: command.requestId, connectionId: generateConnectionId(pool) }); } } module.exports = { CommandStartedEvent, CommandSucceededEvent, CommandFailedEvent }; package/lib/connection/command_result.js000644 0000001471 3560116604 015542 0ustar00000000 000000 'use strict'; /** * Creates a new CommandResult instance * @class * @param {object} result CommandResult object * @param {Connection} connection A connection instance associated with this result * @return {CommandResult} A cursor instance */ var CommandResult = function(result, connection, message) { this.result = result; this.connection = connection; this.message = message; }; /** * Convert CommandResult to JSON * @method * @return {object} */ CommandResult.prototype.toJSON = function() { let result = Object.assign({}, this, this.result); delete result.message; return result; }; /** * Convert CommandResult to String representation * @method * @return {string} */ CommandResult.prototype.toString = function() { return JSON.stringify(this.toJSON()); }; module.exports = CommandResult; package/lib/connection/commands.js000644 0000037257 3560116604 014342 0ustar00000000 000000 'use strict'; var retrieveBSON = require('./utils').retrieveBSON; var BSON = retrieveBSON(); var Long = BSON.Long; const Buffer = require('safe-buffer').Buffer; // Incrementing request id var _requestId = 0; // Wire command operation ids var opcodes = require('../wireprotocol/shared').opcodes; // Query flags var OPTS_TAILABLE_CURSOR = 2; var OPTS_SLAVE = 4; var OPTS_OPLOG_REPLAY = 8; var OPTS_NO_CURSOR_TIMEOUT = 16; var OPTS_AWAIT_DATA = 32; var OPTS_EXHAUST = 64; var OPTS_PARTIAL = 128; // Response flags var CURSOR_NOT_FOUND = 1; var QUERY_FAILURE = 2; var SHARD_CONFIG_STALE = 4; var AWAIT_CAPABLE = 8; /************************************************************** * QUERY **************************************************************/ var Query = function(bson, ns, query, options) { var self = this; // Basic options needed to be passed in if (ns == null) throw new Error('ns must be specified for query'); if (query == null) throw new Error('query must be specified for query'); // Validate that we are not passing 0x00 in the collection name if (ns.indexOf('\x00') !== -1) { throw new Error('namespace cannot contain a null character'); } // Basic options this.bson = bson; this.ns = ns; this.query = query; // Additional options this.numberToSkip = options.numberToSkip || 0; this.numberToReturn = options.numberToReturn || 0; this.returnFieldSelector = options.returnFieldSelector || null; this.requestId = Query.getRequestId(); // special case for pre-3.2 find commands, delete ASAP this.pre32Limit = options.pre32Limit; // Serialization option this.serializeFunctions = typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; this.ignoreUndefined = typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : true; this.batchSize = self.numberToReturn; // Flags this.tailable = false; this.slaveOk = typeof options.slaveOk === 'boolean' ? options.slaveOk : false; this.oplogReplay = false; this.noCursorTimeout = false; this.awaitData = false; this.exhaust = false; this.partial = false; }; // // Assign a new request Id Query.prototype.incRequestId = function() { this.requestId = _requestId++; }; // // Assign a new request Id Query.nextRequestId = function() { return _requestId + 1; }; // // Uses a single allocated buffer for the process, avoiding multiple memory allocations Query.prototype.toBin = function() { var self = this; var buffers = []; var projection = null; // Set up the flags var flags = 0; if (this.tailable) { flags |= OPTS_TAILABLE_CURSOR; } if (this.slaveOk) { flags |= OPTS_SLAVE; } if (this.oplogReplay) { flags |= OPTS_OPLOG_REPLAY; } if (this.noCursorTimeout) { flags |= OPTS_NO_CURSOR_TIMEOUT; } if (this.awaitData) { flags |= OPTS_AWAIT_DATA; } if (this.exhaust) { flags |= OPTS_EXHAUST; } if (this.partial) { flags |= OPTS_PARTIAL; } // If batchSize is different to self.numberToReturn if (self.batchSize !== self.numberToReturn) self.numberToReturn = self.batchSize; // Allocate write protocol header buffer var header = Buffer.alloc( 4 * 4 + // Header 4 + // Flags Buffer.byteLength(self.ns) + 1 + // namespace 4 + // numberToSkip 4 // numberToReturn ); // Add header to buffers buffers.push(header); // Serialize the query var query = self.bson.serialize(this.query, { checkKeys: this.checkKeys, serializeFunctions: this.serializeFunctions, ignoreUndefined: this.ignoreUndefined }); // Add query document buffers.push(query); if (self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) { // Serialize the projection document projection = self.bson.serialize(this.returnFieldSelector, { checkKeys: this.checkKeys, serializeFunctions: this.serializeFunctions, ignoreUndefined: this.ignoreUndefined }); // Add projection document buffers.push(projection); } // Total message size var totalLength = header.length + query.length + (projection ? projection.length : 0); // Set up the index var index = 4; // Write total document length header[3] = (totalLength >> 24) & 0xff; header[2] = (totalLength >> 16) & 0xff; header[1] = (totalLength >> 8) & 0xff; header[0] = totalLength & 0xff; // Write header information requestId header[index + 3] = (this.requestId >> 24) & 0xff; header[index + 2] = (this.requestId >> 16) & 0xff; header[index + 1] = (this.requestId >> 8) & 0xff; header[index] = this.requestId & 0xff; index = index + 4; // Write header information responseTo header[index + 3] = (0 >> 24) & 0xff; header[index + 2] = (0 >> 16) & 0xff; header[index + 1] = (0 >> 8) & 0xff; header[index] = 0 & 0xff; index = index + 4; // Write header information OP_QUERY header[index + 3] = (opcodes.OP_QUERY >> 24) & 0xff; header[index + 2] = (opcodes.OP_QUERY >> 16) & 0xff; header[index + 1] = (opcodes.OP_QUERY >> 8) & 0xff; header[index] = opcodes.OP_QUERY & 0xff; index = index + 4; // Write header information flags header[index + 3] = (flags >> 24) & 0xff; header[index + 2] = (flags >> 16) & 0xff; header[index + 1] = (flags >> 8) & 0xff; header[index] = flags & 0xff; index = index + 4; // Write collection name index = index + header.write(this.ns, index, 'utf8') + 1; header[index - 1] = 0; // Write header information flags numberToSkip header[index + 3] = (this.numberToSkip >> 24) & 0xff; header[index + 2] = (this.numberToSkip >> 16) & 0xff; header[index + 1] = (this.numberToSkip >> 8) & 0xff; header[index] = this.numberToSkip & 0xff; index = index + 4; // Write header information flags numberToReturn header[index + 3] = (this.numberToReturn >> 24) & 0xff; header[index + 2] = (this.numberToReturn >> 16) & 0xff; header[index + 1] = (this.numberToReturn >> 8) & 0xff; header[index] = this.numberToReturn & 0xff; index = index + 4; // Return the buffers return buffers; }; Query.getRequestId = function() { return ++_requestId; }; /************************************************************** * GETMORE **************************************************************/ var GetMore = function(bson, ns, cursorId, opts) { opts = opts || {}; this.numberToReturn = opts.numberToReturn || 0; this.requestId = _requestId++; this.bson = bson; this.ns = ns; this.cursorId = cursorId; }; // // Uses a single allocated buffer for the process, avoiding multiple memory allocations GetMore.prototype.toBin = function() { var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + 4 * 4; // Create command buffer var index = 0; // Allocate buffer var _buffer = Buffer.alloc(length); // Write header information // index = write32bit(index, _buffer, length); _buffer[index + 3] = (length >> 24) & 0xff; _buffer[index + 2] = (length >> 16) & 0xff; _buffer[index + 1] = (length >> 8) & 0xff; _buffer[index] = length & 0xff; index = index + 4; // index = write32bit(index, _buffer, requestId); _buffer[index + 3] = (this.requestId >> 24) & 0xff; _buffer[index + 2] = (this.requestId >> 16) & 0xff; _buffer[index + 1] = (this.requestId >> 8) & 0xff; _buffer[index] = this.requestId & 0xff; index = index + 4; // index = write32bit(index, _buffer, 0); _buffer[index + 3] = (0 >> 24) & 0xff; _buffer[index + 2] = (0 >> 16) & 0xff; _buffer[index + 1] = (0 >> 8) & 0xff; _buffer[index] = 0 & 0xff; index = index + 4; // index = write32bit(index, _buffer, OP_GETMORE); _buffer[index + 3] = (opcodes.OP_GETMORE >> 24) & 0xff; _buffer[index + 2] = (opcodes.OP_GETMORE >> 16) & 0xff; _buffer[index + 1] = (opcodes.OP_GETMORE >> 8) & 0xff; _buffer[index] = opcodes.OP_GETMORE & 0xff; index = index + 4; // index = write32bit(index, _buffer, 0); _buffer[index + 3] = (0 >> 24) & 0xff; _buffer[index + 2] = (0 >> 16) & 0xff; _buffer[index + 1] = (0 >> 8) & 0xff; _buffer[index] = 0 & 0xff; index = index + 4; // Write collection name index = index + _buffer.write(this.ns, index, 'utf8') + 1; _buffer[index - 1] = 0; // Write batch size // index = write32bit(index, _buffer, numberToReturn); _buffer[index + 3] = (this.numberToReturn >> 24) & 0xff; _buffer[index + 2] = (this.numberToReturn >> 16) & 0xff; _buffer[index + 1] = (this.numberToReturn >> 8) & 0xff; _buffer[index] = this.numberToReturn & 0xff; index = index + 4; // Write cursor id // index = write32bit(index, _buffer, cursorId.getLowBits()); _buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff; _buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff; _buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff; _buffer[index] = this.cursorId.getLowBits() & 0xff; index = index + 4; // index = write32bit(index, _buffer, cursorId.getHighBits()); _buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff; _buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff; _buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff; _buffer[index] = this.cursorId.getHighBits() & 0xff; index = index + 4; // Return buffer return _buffer; }; /************************************************************** * KILLCURSOR **************************************************************/ var KillCursor = function(bson, ns, cursorIds) { this.ns = ns; this.requestId = _requestId++; this.cursorIds = cursorIds; }; // // Uses a single allocated buffer for the process, avoiding multiple memory allocations KillCursor.prototype.toBin = function() { var length = 4 + 4 + 4 * 4 + this.cursorIds.length * 8; // Create command buffer var index = 0; var _buffer = Buffer.alloc(length); // Write header information // index = write32bit(index, _buffer, length); _buffer[index + 3] = (length >> 24) & 0xff; _buffer[index + 2] = (length >> 16) & 0xff; _buffer[index + 1] = (length >> 8) & 0xff; _buffer[index] = length & 0xff; index = index + 4; // index = write32bit(index, _buffer, requestId); _buffer[index + 3] = (this.requestId >> 24) & 0xff; _buffer[index + 2] = (this.requestId >> 16) & 0xff; _buffer[index + 1] = (this.requestId >> 8) & 0xff; _buffer[index] = this.requestId & 0xff; index = index + 4; // index = write32bit(index, _buffer, 0); _buffer[index + 3] = (0 >> 24) & 0xff; _buffer[index + 2] = (0 >> 16) & 0xff; _buffer[index + 1] = (0 >> 8) & 0xff; _buffer[index] = 0 & 0xff; index = index + 4; // index = write32bit(index, _buffer, OP_KILL_CURSORS); _buffer[index + 3] = (opcodes.OP_KILL_CURSORS >> 24) & 0xff; _buffer[index + 2] = (opcodes.OP_KILL_CURSORS >> 16) & 0xff; _buffer[index + 1] = (opcodes.OP_KILL_CURSORS >> 8) & 0xff; _buffer[index] = opcodes.OP_KILL_CURSORS & 0xff; index = index + 4; // index = write32bit(index, _buffer, 0); _buffer[index + 3] = (0 >> 24) & 0xff; _buffer[index + 2] = (0 >> 16) & 0xff; _buffer[index + 1] = (0 >> 8) & 0xff; _buffer[index] = 0 & 0xff; index = index + 4; // Write batch size // index = write32bit(index, _buffer, this.cursorIds.length); _buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff; _buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff; _buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff; _buffer[index] = this.cursorIds.length & 0xff; index = index + 4; // Write all the cursor ids into the array for (var i = 0; i < this.cursorIds.length; i++) { // Write cursor id // index = write32bit(index, _buffer, cursorIds[i].getLowBits()); _buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff; _buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff; _buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff; _buffer[index] = this.cursorIds[i].getLowBits() & 0xff; index = index + 4; // index = write32bit(index, _buffer, cursorIds[i].getHighBits()); _buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff; _buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff; _buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff; _buffer[index] = this.cursorIds[i].getHighBits() & 0xff; index = index + 4; } // Return buffer return _buffer; }; var Response = function(bson, message, msgHeader, msgBody, opts) { opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; this.parsed = false; this.raw = message; this.data = msgBody; this.bson = bson; this.opts = opts; // Read the message header this.length = msgHeader.length; this.requestId = msgHeader.requestId; this.responseTo = msgHeader.responseTo; this.opCode = msgHeader.opCode; this.fromCompressed = msgHeader.fromCompressed; // Read the message body this.responseFlags = msgBody.readInt32LE(0); this.cursorId = new Long(msgBody.readInt32LE(4), msgBody.readInt32LE(8)); this.startingFrom = msgBody.readInt32LE(12); this.numberReturned = msgBody.readInt32LE(16); // Preallocate document array this.documents = new Array(this.numberReturned); // Flag values this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0; this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0; this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0; this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0; this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; }; Response.prototype.isParsed = function() { return this.parsed; }; Response.prototype.parse = function(options) { // Don't parse again if not needed if (this.parsed) return; options = options || {}; // Allow the return of raw documents instead of parsing var raw = options.raw || false; var documentsReturnedIn = options.documentsReturnedIn || null; var promoteLongs = typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; var promoteValues = typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; var promoteBuffers = typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : this.opts.promoteBuffers; var bsonSize, _options; // Set up the options _options = { promoteLongs: promoteLongs, promoteValues: promoteValues, promoteBuffers: promoteBuffers }; // Position within OP_REPLY at which documents start // (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply) this.index = 20; // // Parse Body // for (var i = 0; i < this.numberReturned; i++) { bsonSize = this.data[this.index] | (this.data[this.index + 1] << 8) | (this.data[this.index + 2] << 16) | (this.data[this.index + 3] << 24); // If we have raw results specified slice the return document if (raw) { this.documents[i] = this.data.slice(this.index, this.index + bsonSize); } else { this.documents[i] = this.bson.deserialize( this.data.slice(this.index, this.index + bsonSize), _options ); } // Adjust the index this.index = this.index + bsonSize; } if (this.documents.length === 1 && documentsReturnedIn != null && raw) { const fieldsAsRaw = {}; fieldsAsRaw[documentsReturnedIn] = true; _options.fieldsAsRaw = fieldsAsRaw; const doc = this.bson.deserialize(this.documents[0], _options); this.documents = [doc]; } // Set parsed this.parsed = true; }; module.exports = { Query: Query, GetMore: GetMore, Response: Response, KillCursor: KillCursor }; package/lib/connection/connect.js000644 0000026154 3560116604 014164 0ustar00000000 000000 'use strict'; const net = require('net'); const tls = require('tls'); const Connection = require('./connection'); const Query = require('./commands').Query; const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const defaultAuthProviders = require('../auth/defaultAuthProviders').defaultAuthProviders; const WIRE_CONSTANTS = require('../wireprotocol/constants'); const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; let AUTH_PROVIDERS; function connect(options, callback) { if (AUTH_PROVIDERS == null) { AUTH_PROVIDERS = defaultAuthProviders(options.bson); } if (options.family !== void 0) { makeConnection(options.family, options, (err, socket) => { if (err) { callback(err, socket); // in the error case, `socket` is the originating error event name return; } performInitialHandshake(new Connection(socket, options), options, callback); }); return; } return makeConnection(6, options, (err, ipv6Socket) => { if (err) { makeConnection(4, options, (err, ipv4Socket) => { if (err) { callback(err, ipv4Socket); // in the error case, `ipv4Socket` is the originating error event name return; } performInitialHandshake(new Connection(ipv4Socket, options), options, callback); }); return; } performInitialHandshake(new Connection(ipv6Socket, options), options, callback); }); } function getSaslSupportedMechs(options) { if (!(options && options.credentials)) { return {}; } const credentials = options.credentials; // TODO: revisit whether or not items like `options.user` and `options.dbName` should be checked here const authMechanism = credentials.mechanism; const authSource = credentials.source || options.dbName || 'admin'; const user = credentials.username || options.user; if (typeof authMechanism === 'string' && authMechanism.toUpperCase() !== 'DEFAULT') { return {}; } if (!user) { return {}; } return { saslSupportedMechs: `${authSource}.${user}` }; } function checkSupportedServer(ismaster, options) { const serverVersionHighEnough = ismaster && typeof ismaster.maxWireVersion === 'number' && ismaster.maxWireVersion >= MIN_SUPPORTED_WIRE_VERSION; const serverVersionLowEnough = ismaster && typeof ismaster.minWireVersion === 'number' && ismaster.minWireVersion <= MAX_SUPPORTED_WIRE_VERSION; if (serverVersionHighEnough) { if (serverVersionLowEnough) { return null; } const message = `Server at ${options.host}:${options.port} reports minimum wire version ${ ismaster.minWireVersion }, but this version of the Node.js Driver requires at most ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`; return new MongoError(message); } const message = `Server at ${options.host}:${ options.port } reports maximum wire version ${ismaster.maxWireVersion || 0}, but this version of the Node.js Driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION})`; return new MongoError(message); } function performInitialHandshake(conn, options, _callback) { const callback = function(err, ret) { if (err && conn) { conn.destroy(); } _callback(err, ret); }; let compressors = []; if (options.compression && options.compression.compressors) { compressors = options.compression.compressors; } const handshakeDoc = Object.assign( { ismaster: true, client: createClientInfo(options), compression: compressors }, getSaslSupportedMechs(options) ); const start = new Date().getTime(); runCommand(conn, 'admin.$cmd', handshakeDoc, options, (err, ismaster) => { if (err) { callback(err, null); return; } if (ismaster.ok === 0) { callback(new MongoError(ismaster), null); return; } const supportedServerErr = checkSupportedServer(ismaster, options); if (supportedServerErr) { callback(supportedServerErr, null); return; } // resolve compression if (ismaster.compression) { const agreedCompressors = compressors.filter( compressor => ismaster.compression.indexOf(compressor) !== -1 ); if (agreedCompressors.length) { conn.agreedCompressor = agreedCompressors[0]; } if (options.compression && options.compression.zlibCompressionLevel) { conn.zlibCompressionLevel = options.compression.zlibCompressionLevel; } } // NOTE: This is metadata attached to the connection while porting away from // handshake being done in the `Server` class. Likely, it should be // relocated, or at very least restructured. conn.ismaster = ismaster; conn.lastIsMasterMS = new Date().getTime() - start; const credentials = options.credentials; if (!ismaster.arbiterOnly && credentials) { credentials.resolveAuthMechanism(ismaster); authenticate(conn, credentials, callback); return; } callback(null, conn); }); } const LEGAL_SSL_SOCKET_OPTIONS = [ 'pfx', 'key', 'passphrase', 'cert', 'ca', 'ciphers', 'NPNProtocols', 'ALPNProtocols', 'servername', 'ecdhCurve', 'secureProtocol', 'secureContext', 'session', 'minDHSize', 'crl', 'rejectUnauthorized' ]; function parseConnectOptions(family, options) { const host = typeof options.host === 'string' ? options.host : 'localhost'; if (host.indexOf('/') !== -1) { return { path: host }; } const result = { family, host, port: typeof options.port === 'number' ? options.port : 27017, rejectUnauthorized: false }; return result; } function parseSslOptions(family, options) { const result = parseConnectOptions(family, options); // Merge in valid SSL options for (const name in options) { if (options[name] != null && LEGAL_SSL_SOCKET_OPTIONS.indexOf(name) !== -1) { result[name] = options[name]; } } // Override checkServerIdentity behavior if (options.checkServerIdentity === false) { // Skip the identiy check by retuning undefined as per node documents // https://nodejs.org/api/tls.html#tls_tls_connect_options_callback result.checkServerIdentity = function() { return undefined; }; } else if (typeof options.checkServerIdentity === 'function') { result.checkServerIdentity = options.checkServerIdentity; } // Set default sni servername to be the same as host if (result.servername == null) { result.servername = result.host; } return result; } function makeConnection(family, options, _callback) { const useSsl = typeof options.ssl === 'boolean' ? options.ssl : false; const keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true; let keepAliveInitialDelay = typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000; const noDelay = typeof options.noDelay === 'boolean' ? options.noDelay : true; const connectionTimeout = typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000; const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const rejectUnauthorized = typeof options.rejectUnauthorized === 'boolean' ? options.rejectUnauthorized : true; if (keepAliveInitialDelay > socketTimeout) { keepAliveInitialDelay = Math.round(socketTimeout / 2); } let socket; const callback = function(err, ret) { if (err && socket) { socket.destroy(); } _callback(err, ret); }; try { if (useSsl) { socket = tls.connect(parseSslOptions(family, options)); if (typeof socket.disableRenegotiation === 'function') { socket.disableRenegotiation(); } } else { socket = net.createConnection(parseConnectOptions(family, options)); } } catch (err) { return callback(err); } socket.setKeepAlive(keepAlive, keepAliveInitialDelay); socket.setTimeout(connectionTimeout); socket.setNoDelay(noDelay); const errorEvents = ['error', 'close', 'timeout', 'parseError', 'connect']; function errorHandler(eventName) { return err => { errorEvents.forEach(event => socket.removeAllListeners(event)); socket.removeListener('connect', connectHandler); callback(connectionFailureError(eventName, err), eventName); }; } function connectHandler() { errorEvents.forEach(event => socket.removeAllListeners(event)); if (socket.authorizationError && rejectUnauthorized) { return callback(socket.authorizationError); } socket.setTimeout(socketTimeout); callback(null, socket); } socket.once('error', errorHandler('error')); socket.once('close', errorHandler('close')); socket.once('timeout', errorHandler('timeout')); socket.once('parseError', errorHandler('parseError')); socket.once('connect', connectHandler); } const CONNECTION_ERROR_EVENTS = ['error', 'close', 'timeout', 'parseError']; function runCommand(conn, ns, command, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; const bson = conn.options.bson; const query = new Query(bson, ns, command, { numberToSkip: 0, numberToReturn: 1 }); function errorHandler(err) { conn.resetSocketTimeout(); CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler)); conn.removeListener('message', messageHandler); callback(err, null); } function messageHandler(msg) { if (msg.responseTo !== query.requestId) { return; } conn.resetSocketTimeout(); CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler)); conn.removeListener('message', messageHandler); msg.parse({ promoteValues: true }); callback(null, msg.documents[0]); } conn.setSocketTimeout(socketTimeout); CONNECTION_ERROR_EVENTS.forEach(eventName => conn.once(eventName, errorHandler)); conn.on('message', messageHandler); conn.write(query.toBin()); } function authenticate(conn, credentials, callback) { const mechanism = credentials.mechanism; if (!AUTH_PROVIDERS[mechanism]) { callback(new MongoError(`authMechanism '${mechanism}' not supported`)); return; } const provider = AUTH_PROVIDERS[mechanism]; provider.auth(runCommand, [conn], credentials, err => { if (err) return callback(err); callback(null, conn); }); } function connectionFailureError(type, err) { switch (type) { case 'error': return new MongoNetworkError(err); case 'timeout': return new MongoNetworkError(`connection timed out`); case 'close': return new MongoNetworkError(`connection closed`); default: return new MongoNetworkError(`unknown network error`); } } module.exports = connect; package/lib/connection/connection.js000644 0000044111 3560116604 014663 0ustar00000000 000000 'use strict'; const EventEmitter = require('events').EventEmitter; const crypto = require('crypto'); const debugOptions = require('./utils').debugOptions; const parseHeader = require('../wireprotocol/shared').parseHeader; const decompress = require('../wireprotocol/compression').decompress; const Response = require('./commands').Response; const BinMsg = require('./msg').BinMsg; const MongoNetworkError = require('../error').MongoNetworkError; const MongoError = require('../error').MongoError; const Logger = require('./logger'); const OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED; const OP_MSG = require('../wireprotocol/shared').opcodes.OP_MSG; const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; const Buffer = require('safe-buffer').Buffer; let _id = 0; const DEFAULT_MAX_BSON_MESSAGE_SIZE = 1024 * 1024 * 16 * 4; const DEBUG_FIELDS = [ 'host', 'port', 'size', 'keepAlive', 'keepAliveInitialDelay', 'noDelay', 'connectionTimeout', 'socketTimeout', 'ssl', 'ca', 'crl', 'cert', 'rejectUnauthorized', 'promoteLongs', 'promoteValues', 'promoteBuffers', 'checkServerIdentity' ]; let connectionAccountingSpy = undefined; let connectionAccounting = false; let connections = {}; /** * A class representing a single connection to a MongoDB server * * @fires Connection#connect * @fires Connection#close * @fires Connection#error * @fires Connection#timeout * @fires Connection#parseError * @fires Connection#message */ class Connection extends EventEmitter { /** * Creates a new Connection instance * * @param {Socket} socket The socket this connection wraps * @param {Object} [options] Optional settings * @param {string} [options.host] The host the socket is connected to * @param {number} [options.port] The port used for the socket connection * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting * @param {boolean} [options.promoteLongs] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers] Promotes Binary BSON values to native Node Buffers. */ constructor(socket, options) { super(); options = options || {}; if (!options.bson) { throw new TypeError('must pass in valid bson parser'); } this.id = _id++; this.options = options; this.logger = Logger('Connection', options); this.bson = options.bson; this.tag = options.tag; this.maxBsonMessageSize = options.maxBsonMessageSize || DEFAULT_MAX_BSON_MESSAGE_SIZE; this.port = options.port || 27017; this.host = options.host || 'localhost'; this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000; // These values are inspected directly in tests, but maybe not necessary to keep around this.keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true; this.keepAliveInitialDelay = typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000; this.connectionTimeout = typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000; if (this.keepAliveInitialDelay > this.socketTimeout) { this.keepAliveInitialDelay = Math.round(this.socketTimeout / 2); } // Debug information if (this.logger.isDebug()) { this.logger.debug( `creating connection ${this.id} with options [${JSON.stringify( debugOptions(DEBUG_FIELDS, options) )}]` ); } // Response options this.responseOptions = { promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false }; // Flushing this.flushing = false; this.queue = []; // Internal state this.writeStream = null; this.destroyed = false; // Create hash method const hash = crypto.createHash('sha1'); hash.update(this.address); this.hashedName = hash.digest('hex'); // All operations in flight on the connection this.workItems = []; // setup socket this.socket = socket; this.socket.once('error', errorHandler(this)); this.socket.once('timeout', timeoutHandler(this)); this.socket.once('close', closeHandler(this)); this.socket.on('data', dataHandler(this)); if (connectionAccounting) { addConnection(this.id, this); } } setSocketTimeout(value) { if (this.socket) { this.socket.setTimeout(value); } } resetSocketTimeout() { if (this.socket) { this.socket.setTimeout(this.socketTimeout); } } static enableConnectionAccounting(spy) { if (spy) { connectionAccountingSpy = spy; } connectionAccounting = true; connections = {}; } static disableConnectionAccounting() { connectionAccounting = false; connectionAccountingSpy = undefined; } static connections() { return connections; } get address() { return `${this.host}:${this.port}`; } /** * Unref this connection * @method * @return {boolean} */ unref() { if (this.socket == null) { this.once('connect', () => this.socket.unref()); return; } this.socket.unref(); } /** * Destroy connection * @method */ destroy(options, callback) { if (typeof options === 'function') { callback = options; options = {}; } options = Object.assign({ force: false }, options); if (connectionAccounting) { deleteConnection(this.id); } if (this.socket == null) { this.destroyed = true; return; } if (options.force) { this.socket.destroy(); this.destroyed = true; if (typeof callback === 'function') callback(null, null); return; } this.socket.end(err => { this.destroyed = true; if (typeof callback === 'function') callback(err, null); }); } /** * Write to connection * @method * @param {Command} command Command to write out need to implement toBin and toBinUnified */ write(buffer) { // Debug Log if (this.logger.isDebug()) { if (!Array.isArray(buffer)) { this.logger.debug(`writing buffer [${buffer.toString('hex')}] to ${this.address}`); } else { for (let i = 0; i < buffer.length; i++) this.logger.debug(`writing buffer [${buffer[i].toString('hex')}] to ${this.address}`); } } // Double check that the connection is not destroyed if (this.socket.destroyed === false) { // Write out the command if (!Array.isArray(buffer)) { this.socket.write(buffer, 'binary'); return true; } // Iterate over all buffers and write them in order to the socket for (let i = 0; i < buffer.length; i++) { this.socket.write(buffer[i], 'binary'); } return true; } // Connection is destroyed return write failed return false; } /** * Return id of connection as a string * @method * @return {string} */ toString() { return '' + this.id; } /** * Return json object of connection * @method * @return {object} */ toJSON() { return { id: this.id, host: this.host, port: this.port }; } /** * Is the connection connected * @method * @return {boolean} */ isConnected() { if (this.destroyed) return false; return !this.socket.destroyed && this.socket.writable; } } function deleteConnection(id) { // console.log("=== deleted connection " + id + " :: " + (connections[id] ? connections[id].port : '')) delete connections[id]; if (connectionAccountingSpy) { connectionAccountingSpy.deleteConnection(id); } } function addConnection(id, connection) { // console.log("=== added connection " + id + " :: " + connection.port) connections[id] = connection; if (connectionAccountingSpy) { connectionAccountingSpy.addConnection(id, connection); } } // // Connection handlers function errorHandler(conn) { return function(err) { if (connectionAccounting) deleteConnection(conn.id); // Debug information if (conn.logger.isDebug()) { conn.logger.debug( `connection ${conn.id} for [${conn.address}] errored out with [${JSON.stringify(err)}]` ); } conn.emit('error', new MongoNetworkError(err), conn); }; } function timeoutHandler(conn) { return function() { if (connectionAccounting) deleteConnection(conn.id); if (conn.logger.isDebug()) { conn.logger.debug(`connection ${conn.id} for [${conn.address}] timed out`); } conn.emit( 'timeout', new MongoNetworkError(`connection ${conn.id} to ${conn.address} timed out`), conn ); }; } function closeHandler(conn) { return function(hadError) { if (connectionAccounting) deleteConnection(conn.id); if (conn.logger.isDebug()) { conn.logger.debug(`connection ${conn.id} with for [${conn.address}] closed`); } if (!hadError) { conn.emit( 'close', new MongoNetworkError(`connection ${conn.id} to ${conn.address} closed`), conn ); } }; } // Handle a message once it is received function processMessage(conn, message) { const msgHeader = parseHeader(message); if (msgHeader.opCode !== OP_COMPRESSED) { const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response; conn.emit( 'message', new ResponseConstructor( conn.bson, message, msgHeader, message.slice(MESSAGE_HEADER_SIZE), conn.responseOptions ), conn ); return; } msgHeader.fromCompressed = true; let index = MESSAGE_HEADER_SIZE; msgHeader.opCode = message.readInt32LE(index); index += 4; msgHeader.length = message.readInt32LE(index); index += 4; const compressorID = message[index]; index++; decompress(compressorID, message.slice(index), (err, decompressedMsgBody) => { if (err) { conn.emit('error', err); return; } if (decompressedMsgBody.length !== msgHeader.length) { conn.emit( 'error', new MongoError( 'Decompressing a compressed message from the server failed. The message is corrupt.' ) ); return; } const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response; conn.emit( 'message', new ResponseConstructor( conn.bson, message, msgHeader, decompressedMsgBody, conn.responseOptions ), conn ); }); } function dataHandler(conn) { return function(data) { // Parse until we are done with the data while (data.length > 0) { // If we still have bytes to read on the current message if (conn.bytesRead > 0 && conn.sizeOfMessage > 0) { // Calculate the amount of remaining bytes const remainingBytesToRead = conn.sizeOfMessage - conn.bytesRead; // Check if the current chunk contains the rest of the message if (remainingBytesToRead > data.length) { // Copy the new data into the exiting buffer (should have been allocated when we know the message size) data.copy(conn.buffer, conn.bytesRead); // Adjust the number of bytes read so it point to the correct index in the buffer conn.bytesRead = conn.bytesRead + data.length; // Reset state of buffer data = Buffer.alloc(0); } else { // Copy the missing part of the data into our current buffer data.copy(conn.buffer, conn.bytesRead, 0, remainingBytesToRead); // Slice the overflow into a new buffer that we will then re-parse data = data.slice(remainingBytesToRead); // Emit current complete message const emitBuffer = conn.buffer; // Reset state of buffer conn.buffer = null; conn.sizeOfMessage = 0; conn.bytesRead = 0; conn.stubBuffer = null; processMessage(conn, emitBuffer); } } else { // Stub buffer is kept in case we don't get enough bytes to determine the // size of the message (< 4 bytes) if (conn.stubBuffer != null && conn.stubBuffer.length > 0) { // If we have enough bytes to determine the message size let's do it if (conn.stubBuffer.length + data.length > 4) { // Prepad the data const newData = Buffer.alloc(conn.stubBuffer.length + data.length); conn.stubBuffer.copy(newData, 0); data.copy(newData, conn.stubBuffer.length); // Reassign for parsing data = newData; // Reset state of buffer conn.buffer = null; conn.sizeOfMessage = 0; conn.bytesRead = 0; conn.stubBuffer = null; } else { // Add the the bytes to the stub buffer const newStubBuffer = Buffer.alloc(conn.stubBuffer.length + data.length); // Copy existing stub buffer conn.stubBuffer.copy(newStubBuffer, 0); // Copy missing part of the data data.copy(newStubBuffer, conn.stubBuffer.length); // Exit parsing loop data = Buffer.alloc(0); } } else { if (data.length > 4) { // Retrieve the message size const sizeOfMessage = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); // If we have a negative sizeOfMessage emit error and return if (sizeOfMessage < 0 || sizeOfMessage > conn.maxBsonMessageSize) { const errorObject = { err: 'socketHandler', trace: '', bin: conn.buffer, parseState: { sizeOfMessage: sizeOfMessage, bytesRead: conn.bytesRead, stubBuffer: conn.stubBuffer } }; // We got a parse Error fire it off then keep going conn.emit('parseError', errorObject, conn); return; } // Ensure that the size of message is larger than 0 and less than the max allowed if ( sizeOfMessage > 4 && sizeOfMessage < conn.maxBsonMessageSize && sizeOfMessage > data.length ) { conn.buffer = Buffer.alloc(sizeOfMessage); // Copy all the data into the buffer data.copy(conn.buffer, 0); // Update bytes read conn.bytesRead = data.length; // Update sizeOfMessage conn.sizeOfMessage = sizeOfMessage; // Ensure stub buffer is null conn.stubBuffer = null; // Exit parsing loop data = Buffer.alloc(0); } else if ( sizeOfMessage > 4 && sizeOfMessage < conn.maxBsonMessageSize && sizeOfMessage === data.length ) { const emitBuffer = data; // Reset state of buffer conn.buffer = null; conn.sizeOfMessage = 0; conn.bytesRead = 0; conn.stubBuffer = null; // Exit parsing loop data = Buffer.alloc(0); // Emit the message processMessage(conn, emitBuffer); } else if (sizeOfMessage <= 4 || sizeOfMessage > conn.maxBsonMessageSize) { const errorObject = { err: 'socketHandler', trace: null, bin: data, parseState: { sizeOfMessage: sizeOfMessage, bytesRead: 0, buffer: null, stubBuffer: null } }; // We got a parse Error fire it off then keep going conn.emit('parseError', errorObject, conn); // Clear out the state of the parser conn.buffer = null; conn.sizeOfMessage = 0; conn.bytesRead = 0; conn.stubBuffer = null; // Exit parsing loop data = Buffer.alloc(0); } else { const emitBuffer = data.slice(0, sizeOfMessage); // Reset state of buffer conn.buffer = null; conn.sizeOfMessage = 0; conn.bytesRead = 0; conn.stubBuffer = null; // Copy rest of message data = data.slice(sizeOfMessage); // Emit the message processMessage(conn, emitBuffer); } } else { // Create a buffer that contains the space for the non-complete message conn.stubBuffer = Buffer.alloc(data.length); // Copy the data to the stub buffer data.copy(conn.stubBuffer, 0); // Exit parsing loop data = Buffer.alloc(0); } } } } }; } /** * A server connect event, used to verify that the connection is up and running * * @event Connection#connect * @type {Connection} */ /** * The server connection closed, all pool connections closed * * @event Connection#close * @type {Connection} */ /** * The server connection caused an error, all pool connections closed * * @event Connection#error * @type {Connection} */ /** * The server connection timed out, all pool connections closed * * @event Connection#timeout * @type {Connection} */ /** * The driver experienced an invalid message, all pool connections closed * * @event Connection#parseError * @type {Connection} */ /** * An event emitted each time the connection receives a parsed message from the wire * * @event Connection#message * @type {Connection} */ module.exports = Connection; package/lib/connection/logger.js000644 0000014660 3560116604 014011 0ustar00000000 000000 'use strict'; var f = require('util').format, MongoError = require('../error').MongoError; // Filters for classes var classFilters = {}; var filteredClasses = {}; var level = null; // Save the process id var pid = process.pid; // current logger var currentLogger = null; /** * Creates a new Logger instance * @class * @param {string} className The Class name associated with the logging instance * @param {object} [options=null] Optional settings. * @param {Function} [options.logger=null] Custom logger function; * @param {string} [options.loggerLevel=error] Override default global log level. * @return {Logger} a Logger instance. */ var Logger = function(className, options) { if (!(this instanceof Logger)) return new Logger(className, options); options = options || {}; // Current reference this.className = className; // Current logger if (options.logger) { currentLogger = options.logger; } else if (currentLogger == null) { currentLogger = console.log; } // Set level of logging, default is error if (options.loggerLevel) { level = options.loggerLevel || 'error'; } // Add all class names if (filteredClasses[this.className] == null) classFilters[this.className] = true; }; /** * Log a message at the debug level * @method * @param {string} message The message to log * @param {object} object additional meta data to log * @return {null} */ Logger.prototype.debug = function(message, object) { if ( this.isDebug() && ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) || (Object.keys(filteredClasses).length === 0 && classFilters[this.className])) ) { var dateTime = new Date().getTime(); var msg = f('[%s-%s:%s] %s %s', 'DEBUG', this.className, pid, dateTime, message); var state = { type: 'debug', message: message, className: this.className, pid: pid, date: dateTime }; if (object) state.meta = object; currentLogger(msg, state); } }; /** * Log a message at the warn level * @method * @param {string} message The message to log * @param {object} object additional meta data to log * @return {null} */ (Logger.prototype.warn = function(message, object) { if ( this.isWarn() && ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) || (Object.keys(filteredClasses).length === 0 && classFilters[this.className])) ) { var dateTime = new Date().getTime(); var msg = f('[%s-%s:%s] %s %s', 'WARN', this.className, pid, dateTime, message); var state = { type: 'warn', message: message, className: this.className, pid: pid, date: dateTime }; if (object) state.meta = object; currentLogger(msg, state); } }), /** * Log a message at the info level * @method * @param {string} message The message to log * @param {object} object additional meta data to log * @return {null} */ (Logger.prototype.info = function(message, object) { if ( this.isInfo() && ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) || (Object.keys(filteredClasses).length === 0 && classFilters[this.className])) ) { var dateTime = new Date().getTime(); var msg = f('[%s-%s:%s] %s %s', 'INFO', this.className, pid, dateTime, message); var state = { type: 'info', message: message, className: this.className, pid: pid, date: dateTime }; if (object) state.meta = object; currentLogger(msg, state); } }), /** * Log a message at the error level * @method * @param {string} message The message to log * @param {object} object additional meta data to log * @return {null} */ (Logger.prototype.error = function(message, object) { if ( this.isError() && ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) || (Object.keys(filteredClasses).length === 0 && classFilters[this.className])) ) { var dateTime = new Date().getTime(); var msg = f('[%s-%s:%s] %s %s', 'ERROR', this.className, pid, dateTime, message); var state = { type: 'error', message: message, className: this.className, pid: pid, date: dateTime }; if (object) state.meta = object; currentLogger(msg, state); } }), /** * Is the logger set at info level * @method * @return {boolean} */ (Logger.prototype.isInfo = function() { return level === 'info' || level === 'debug'; }), /** * Is the logger set at error level * @method * @return {boolean} */ (Logger.prototype.isError = function() { return level === 'error' || level === 'info' || level === 'debug'; }), /** * Is the logger set at error level * @method * @return {boolean} */ (Logger.prototype.isWarn = function() { return level === 'error' || level === 'warn' || level === 'info' || level === 'debug'; }), /** * Is the logger set at debug level * @method * @return {boolean} */ (Logger.prototype.isDebug = function() { return level === 'debug'; }); /** * Resets the logger to default settings, error and no filtered classes * @method * @return {null} */ Logger.reset = function() { level = 'error'; filteredClasses = {}; }; /** * Get the current logger function * @method * @return {function} */ Logger.currentLogger = function() { return currentLogger; }; /** * Set the current logger function * @method * @param {function} logger Logger function. * @return {null} */ Logger.setCurrentLogger = function(logger) { if (typeof logger !== 'function') throw new MongoError('current logger must be a function'); currentLogger = logger; }; /** * Set what classes to log. * @method * @param {string} type The type of filter (currently only class) * @param {string[]} values The filters to apply * @return {null} */ Logger.filter = function(type, values) { if (type === 'class' && Array.isArray(values)) { filteredClasses = {}; values.forEach(function(x) { filteredClasses[x] = true; }); } }; /** * Set the current log level * @method * @param {string} level Set current log level (debug, info, error) * @return {null} */ Logger.setLevel = function(_level) { if (_level !== 'info' && _level !== 'error' && _level !== 'debug' && _level !== 'warn') { throw new Error(f('%s is an illegal logging level', _level)); } level = _level; }; module.exports = Logger; package/lib/connection/msg.js000644 0000014530 3560116604 013314 0ustar00000000 000000 'use strict'; // Implementation of OP_MSG spec: // https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst // // struct Section { // uint8 payloadType; // union payload { // document document; // payloadType == 0 // struct sequence { // payloadType == 1 // int32 size; // cstring identifier; // document* documents; // }; // }; // }; // struct OP_MSG { // struct MsgHeader { // int32 messageLength; // int32 requestID; // int32 responseTo; // int32 opCode = 2013; // }; // uint32 flagBits; // Section+ sections; // [uint32 checksum;] // }; const opcodes = require('../wireprotocol/shared').opcodes; const databaseNamespace = require('../wireprotocol/shared').databaseNamespace; const ReadPreference = require('../topologies/read_preference'); // Incrementing request id let _requestId = 0; // Msg Flags const OPTS_CHECKSUM_PRESENT = 1; const OPTS_MORE_TO_COME = 2; const OPTS_EXHAUST_ALLOWED = 1 << 16; class Msg { constructor(bson, ns, command, options) { // Basic options needed to be passed in if (command == null) throw new Error('query must be specified for query'); // Basic options this.bson = bson; this.ns = ns; this.command = command; this.command.$db = databaseNamespace(ns); if (options.readPreference && options.readPreference.mode !== ReadPreference.PRIMARY) { this.command.$readPreference = options.readPreference.toJSON(); } // Ensure empty options this.options = options || {}; // Additional options this.requestId = Msg.getRequestId(); // Serialization option this.serializeFunctions = typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; this.ignoreUndefined = typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false; this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16; // flags this.checksumPresent = false; this.moreToCome = options.moreToCome || false; this.exhaustAllowed = false; } toBin() { const buffers = []; let flags = 0; if (this.checksumPresent) { flags |= OPTS_CHECKSUM_PRESENT; } if (this.moreToCome) { flags |= OPTS_MORE_TO_COME; } if (this.exhaustAllowed) { flags |= OPTS_EXHAUST_ALLOWED; } const header = new Buffer( 4 * 4 + // Header 4 // Flags ); buffers.push(header); let totalLength = header.length; const command = this.command; totalLength += this.makeDocumentSegment(buffers, command); header.writeInt32LE(totalLength, 0); // messageLength header.writeInt32LE(this.requestId, 4); // requestID header.writeInt32LE(0, 8); // responseTo header.writeInt32LE(opcodes.OP_MSG, 12); // opCode header.writeUInt32LE(flags, 16); // flags return buffers; } makeDocumentSegment(buffers, document) { const payloadTypeBuffer = new Buffer(1); payloadTypeBuffer[0] = 0; const documentBuffer = this.serializeBson(document); buffers.push(payloadTypeBuffer); buffers.push(documentBuffer); return payloadTypeBuffer.length + documentBuffer.length; } serializeBson(document) { return this.bson.serialize(document, { checkKeys: this.checkKeys, serializeFunctions: this.serializeFunctions, ignoreUndefined: this.ignoreUndefined }); } } Msg.getRequestId = function() { return ++_requestId; }; class BinMsg { constructor(bson, message, msgHeader, msgBody, opts) { opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false }; this.parsed = false; this.raw = message; this.data = msgBody; this.bson = bson; this.opts = opts; // Read the message header this.length = msgHeader.length; this.requestId = msgHeader.requestId; this.responseTo = msgHeader.responseTo; this.opCode = msgHeader.opCode; this.fromCompressed = msgHeader.fromCompressed; // Read response flags this.responseFlags = msgBody.readInt32LE(0); this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0; this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0; this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0; this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true; this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true; this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false; this.documents = []; } isParsed() { return this.parsed; } parse(options) { // Don't parse again if not needed if (this.parsed) return; options = options || {}; this.index = 4; // Allow the return of raw documents instead of parsing const raw = options.raw || false; const documentsReturnedIn = options.documentsReturnedIn || null; const promoteLongs = typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs; const promoteValues = typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues; const promoteBuffers = typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : this.opts.promoteBuffers; // Set up the options const _options = { promoteLongs: promoteLongs, promoteValues: promoteValues, promoteBuffers: promoteBuffers }; while (this.index < this.data.length) { const payloadType = this.data.readUInt8(this.index++); if (payloadType === 1) { console.error('TYPE 1'); } else if (payloadType === 0) { const bsonSize = this.data.readUInt32LE(this.index); const bin = this.data.slice(this.index, this.index + bsonSize); this.documents.push(raw ? bin : this.bson.deserialize(bin, _options)); this.index += bsonSize; } } if (this.documents.length === 1 && documentsReturnedIn != null && raw) { const fieldsAsRaw = {}; fieldsAsRaw[documentsReturnedIn] = true; _options.fieldsAsRaw = fieldsAsRaw; const doc = this.bson.deserialize(this.documents[0], _options); this.documents = [doc]; } this.parsed = true; } } module.exports = { Msg, BinMsg }; package/lib/connection/pool.js000644 0000111476 3560116604 013506 0ustar00000000 000000 'use strict'; const inherits = require('util').inherits; const EventEmitter = require('events').EventEmitter; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const MongoWriteConcernError = require('../error').MongoWriteConcernError; const Logger = require('./logger'); const f = require('util').format; const Msg = require('./msg').Msg; const CommandResult = require('./command_result'); const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; const COMPRESSION_DETAILS_SIZE = require('../wireprotocol/shared').COMPRESSION_DETAILS_SIZE; const opcodes = require('../wireprotocol/shared').opcodes; const compress = require('../wireprotocol/compression').compress; const compressorIDs = require('../wireprotocol/compression').compressorIDs; const uncompressibleCommands = require('../wireprotocol/compression').uncompressibleCommands; const apm = require('./apm'); const Buffer = require('safe-buffer').Buffer; const connect = require('./connect'); const updateSessionFromResponse = require('../sessions').updateSessionFromResponse; var DISCONNECTED = 'disconnected'; var CONNECTING = 'connecting'; var CONNECTED = 'connected'; var DESTROYING = 'destroying'; var DESTROYED = 'destroyed'; var _id = 0; /** * Creates a new Pool instance * @class * @param {string} options.host The server host * @param {number} options.port The server port * @param {number} [options.size=5] Max server connection pool size * @param {number} [options.minSize=0] Minimum server connection pool size * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting * @param {number} [options.monitoringSocketTimeout=30000] TCP Socket timeout setting for replicaset monitoring socket * @param {boolean} [options.ssl=false] Use SSL for connection * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. * @param {Buffer} [options.ca] SSL Certificate store binary buffer * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer * @param {Buffer} [options.cert] SSL Certificate binary buffer * @param {Buffer} [options.key] SSL Key file binary buffer * @param {string} [options.passPhrase] SSL Certificate pass phrase * @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @fires Pool#connect * @fires Pool#close * @fires Pool#error * @fires Pool#timeout * @fires Pool#parseError * @return {Pool} A cursor instance */ var Pool = function(topology, options) { // Add event listener EventEmitter.call(this); // Store topology for later use this.topology = topology; // Add the options this.options = Object.assign( { // Host and port settings host: 'localhost', port: 27017, // Pool default max size size: 5, // Pool default min size minSize: 0, // socket settings connectionTimeout: 30000, socketTimeout: 360000, keepAlive: true, keepAliveInitialDelay: 300000, noDelay: true, // SSL Settings ssl: false, checkServerIdentity: true, ca: null, crl: null, cert: null, key: null, passPhrase: null, rejectUnauthorized: false, promoteLongs: true, promoteValues: true, promoteBuffers: false, // Reconnection options reconnect: true, reconnectInterval: 1000, reconnectTries: 30, // Enable domains domainsEnabled: false }, options ); // Identification information this.id = _id++; // Current reconnect retries this.retriesLeft = this.options.reconnectTries; this.reconnectId = null; // No bson parser passed in if ( !options.bson || (options.bson && (typeof options.bson.serialize !== 'function' || typeof options.bson.deserialize !== 'function')) ) { throw new Error('must pass in valid bson parser'); } // Logger instance this.logger = Logger('Pool', options); // Pool state this.state = DISCONNECTED; // Connections this.availableConnections = []; this.inUseConnections = []; this.connectingConnections = 0; // Currently executing this.executing = false; // Operation work queue this.queue = []; // Contains the reconnect connection this.reconnectConnection = null; // Number of consecutive timeouts caught this.numberOfConsecutiveTimeouts = 0; // Current pool Index this.connectionIndex = 0; // event handlers const pool = this; this._messageHandler = messageHandler(this); this._connectionCloseHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'close', err, connection); }; this._connectionErrorHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'error', err, connection); }; this._connectionTimeoutHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'timeout', err, connection); }; this._connectionParseErrorHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'parseError', err, connection); }; }; inherits(Pool, EventEmitter); Object.defineProperty(Pool.prototype, 'size', { enumerable: true, get: function() { return this.options.size; } }); Object.defineProperty(Pool.prototype, 'minSize', { enumerable: true, get: function() { return this.options.minSize; } }); Object.defineProperty(Pool.prototype, 'connectionTimeout', { enumerable: true, get: function() { return this.options.connectionTimeout; } }); Object.defineProperty(Pool.prototype, 'socketTimeout', { enumerable: true, get: function() { return this.options.socketTimeout; } }); function stateTransition(self, newState) { var legalTransitions = { disconnected: [CONNECTING, DESTROYING, DISCONNECTED], connecting: [CONNECTING, DESTROYING, CONNECTED, DISCONNECTED], connected: [CONNECTED, DISCONNECTED, DESTROYING], destroying: [DESTROYING, DESTROYED], destroyed: [DESTROYED] }; // Get current state var legalStates = legalTransitions[self.state]; if (legalStates && legalStates.indexOf(newState) !== -1) { self.emit('stateChanged', self.state, newState); self.state = newState; } else { self.logger.error( f( 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', self.id, self.state, newState, legalStates ) ); } } function connectionFailureHandler(pool, event, err, conn) { if (conn) { if (conn._connectionFailHandled) return; conn._connectionFailHandled = true; conn.destroy(); // Remove the connection removeConnection(pool, conn); // Flush all work Items on this connection while (conn.workItems.length > 0) { const workItem = conn.workItems.shift(); if (workItem.cb) workItem.cb(err); } } // Did we catch a timeout, increment the numberOfConsecutiveTimeouts if (event === 'timeout') { pool.numberOfConsecutiveTimeouts = pool.numberOfConsecutiveTimeouts + 1; // Have we timed out more than reconnectTries in a row ? // Force close the pool as we are trying to connect to tcp sink hole if (pool.numberOfConsecutiveTimeouts > pool.options.reconnectTries) { pool.numberOfConsecutiveTimeouts = 0; // Destroy all connections and pool pool.destroy(true); // Emit close event return pool.emit('close', pool); } } // No more socket available propegate the event if (pool.socketCount() === 0) { if (pool.state !== DESTROYED && pool.state !== DESTROYING) { stateTransition(pool, DISCONNECTED); } // Do not emit error events, they are always close events // do not trigger the low level error handler in node event = event === 'error' ? 'close' : event; pool.emit(event, err); } // Start reconnection attempts if (!pool.reconnectId && pool.options.reconnect) { pool.reconnectId = setTimeout(attemptReconnect(pool), pool.options.reconnectInterval); } // Do we need to do anything to maintain the minimum pool size const totalConnections = totalConnectionCount(pool); if (totalConnections < pool.minSize) { _createConnection(pool); } } function attemptReconnect(self) { return function() { self.emit('attemptReconnect', self); if (self.state === DESTROYED || self.state === DESTROYING) return; // We are connected do not try again if (self.isConnected()) { self.reconnectId = null; return; } self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } self.retriesLeft = self.retriesLeft - 1; if (self.retriesLeft <= 0) { self.destroy(); self.emit( 'reconnectFailed', new MongoNetworkError( f( 'failed to reconnect after %s attempts with interval %s ms', self.options.reconnectTries, self.options.reconnectInterval ) ) ); } else { self.reconnectId = setTimeout(attemptReconnect(self), self.options.reconnectInterval); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { return connection.destroy(); } self.reconnectId = null; handlers.forEach(event => connection.removeAllListeners(event)); connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); self.retriesLeft = self.options.reconnectTries; self.availableConnections.push(connection); self.reconnectConnection = null; self.emit('reconnect', self); _execute(self)(); }); }; } function moveConnectionBetween(connection, from, to) { var index = from.indexOf(connection); // Move the connection from connecting to available if (index !== -1) { from.splice(index, 1); to.push(connection); } } function messageHandler(self) { return function(message, connection) { // workItem to execute var workItem = null; // Locate the workItem for (var i = 0; i < connection.workItems.length; i++) { if (connection.workItems[i].requestId === message.responseTo) { // Get the callback workItem = connection.workItems[i]; // Remove from list of workItems connection.workItems.splice(i, 1); } } if (workItem && workItem.monitoring) { moveConnectionBetween(connection, self.inUseConnections, self.availableConnections); } // Reset timeout counter self.numberOfConsecutiveTimeouts = 0; // Reset the connection timeout if we modified it for // this operation if (workItem && workItem.socketTimeout) { connection.resetSocketTimeout(); } // Log if debug enabled if (self.logger.isDebug()) { self.logger.debug( f( 'message [%s] received from %s:%s', message.raw.toString('hex'), self.options.host, self.options.port ) ); } function handleOperationCallback(self, cb, err, result) { // No domain enabled if (!self.options.domainsEnabled) { return process.nextTick(function() { return cb(err, result); }); } // Domain enabled just call the callback cb(err, result); } // Keep executing, ensure current message handler does not stop execution if (!self.executing) { process.nextTick(function() { _execute(self)(); }); } // Time to dispatch the message if we have a callback if (workItem && !workItem.immediateRelease) { try { // Parse the message according to the provided options message.parse(workItem); } catch (err) { return handleOperationCallback(self, workItem.cb, new MongoError(err)); } if (message.documents[0]) { const document = message.documents[0]; const session = workItem.session; if (session) { updateSessionFromResponse(session, document); } if (document.$clusterTime) { self.topology.clusterTime = document.$clusterTime; } } // Establish if we have an error if (workItem.command && message.documents[0]) { const responseDoc = message.documents[0]; if (responseDoc.writeConcernError) { const err = new MongoWriteConcernError(responseDoc.writeConcernError, responseDoc); return handleOperationCallback(self, workItem.cb, err); } if (responseDoc.ok === 0 || responseDoc.$err || responseDoc.errmsg || responseDoc.code) { return handleOperationCallback(self, workItem.cb, new MongoError(responseDoc)); } } // Add the connection details message.hashedName = connection.hashedName; // Return the documents handleOperationCallback( self, workItem.cb, null, new CommandResult(workItem.fullResult ? message : message.documents[0], connection, message) ); } }; } /** * Return the total socket count in the pool. * @method * @return {Number} The number of socket available. */ Pool.prototype.socketCount = function() { return this.availableConnections.length + this.inUseConnections.length; // + this.connectingConnections.length; }; function totalConnectionCount(pool) { return ( pool.availableConnections.length + pool.inUseConnections.length + pool.connectingConnections ); } /** * Return all pool connections * @method * @return {Connection[]} The pool connections */ Pool.prototype.allConnections = function() { return this.availableConnections.concat(this.inUseConnections); }; /** * Get a pool connection (round-robin) * @method * @return {Connection} */ Pool.prototype.get = function() { return this.allConnections()[0]; }; /** * Is the pool connected * @method * @return {boolean} */ Pool.prototype.isConnected = function() { // We are in a destroyed state if (this.state === DESTROYED || this.state === DESTROYING) { return false; } // Get connections var connections = this.availableConnections.concat(this.inUseConnections); // Check if we have any connected connections for (var i = 0; i < connections.length; i++) { if (connections[i].isConnected()) return true; } // Not connected return false; }; /** * Was the pool destroyed * @method * @return {boolean} */ Pool.prototype.isDestroyed = function() { return this.state === DESTROYED || this.state === DESTROYING; }; /** * Is the pool in a disconnected state * @method * @return {boolean} */ Pool.prototype.isDisconnected = function() { return this.state === DISCONNECTED; }; /** * Connect pool */ Pool.prototype.connect = function() { if (this.state !== DISCONNECTED) { throw new MongoError('connection in unlawful state ' + this.state); } const self = this; stateTransition(this, CONNECTING); self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } if (self.state === CONNECTING) { self.emit('error', err); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { connection.destroy(); return self.destroy(); } // attach event handlers connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); // If we are in a topology, delegate the auth to it // This is to avoid issues where we would auth against an // arbiter if (self.options.inTopology) { stateTransition(self, CONNECTED); self.availableConnections.push(connection); return self.emit('connect', self, connection); } if (self.state === DESTROYED || self.state === DESTROYING) { return self.destroy(); } if (err) { self.destroy(); return self.emit('error', err); } stateTransition(self, CONNECTED); self.availableConnections.push(connection); if (self.minSize) { for (let i = 0; i < self.minSize; i++) { _createConnection(self); } } self.emit('connect', self, connection); }); }; /** * Authenticate using a specified mechanism * @param {authResultCallback} callback A callback function */ Pool.prototype.auth = function(credentials, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Logout all users against a database * @param {authResultCallback} callback A callback function */ Pool.prototype.logout = function(dbName, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Unref the pool * @method */ Pool.prototype.unref = function() { // Get all the known connections var connections = this.availableConnections.concat(this.inUseConnections); connections.forEach(function(c) { c.unref(); }); }; // Events var events = ['error', 'close', 'timeout', 'parseError', 'connect', 'message']; // Destroy the connections function destroy(self, connections, options, callback) { let connectionCount = connections.length; function connectionDestroyed() { connectionCount--; if (connectionCount > 0) { return; } // Zero out all connections self.inUseConnections = []; self.availableConnections = []; self.connectingConnections = 0; // Set state to destroyed stateTransition(self, DESTROYED); if (typeof callback === 'function') { callback(null, null); } } if (connectionCount === 0) { connectionDestroyed(); return; } // Destroy all connections connections.forEach(conn => { for (var i = 0; i < events.length; i++) { conn.removeAllListeners(events[i]); } conn.destroy(options, connectionDestroyed); }); } /** * Destroy pool * @method */ Pool.prototype.destroy = function(force, callback) { var self = this; // Do not try again if the pool is already dead if (this.state === DESTROYED || self.state === DESTROYING) { if (typeof callback === 'function') callback(null, null); return; } // Set state to destroyed stateTransition(this, DESTROYING); // Are we force closing if (force) { // Get all the known connections var connections = self.availableConnections.concat(self.inUseConnections); // Flush any remaining work items with // an error while (self.queue.length > 0) { var workItem = self.queue.shift(); if (typeof workItem.cb === 'function') { workItem.cb(new MongoError('Pool was force destroyed')); } } // Destroy the topology return destroy(self, connections, { force: true }, callback); } // Clear out the reconnect if set if (this.reconnectId) { clearTimeout(this.reconnectId); } // If we have a reconnect connection running, close // immediately if (this.reconnectConnection) { this.reconnectConnection.destroy(); } // Wait for the operations to drain before we close the pool function checkStatus() { flushMonitoringOperations(self.queue); if (self.queue.length === 0) { // Get all the known connections var connections = self.availableConnections.concat(self.inUseConnections); // Check if we have any in flight operations for (var i = 0; i < connections.length; i++) { // There is an operation still in flight, reschedule a // check waiting for it to drain if (connections[i].workItems.length > 0) { return setTimeout(checkStatus, 1); } } destroy(self, connections, { force: false }, callback); // } else if (self.queue.length > 0 && !this.reconnectId) { } else { // Ensure we empty the queue _execute(self)(); // Set timeout setTimeout(checkStatus, 1); } } // Initiate drain of operations checkStatus(); }; /** * Reset all connections of this pool * * @param {function} [callback] */ Pool.prototype.reset = function(callback) { // this.destroy(true, err => { // if (err && typeof callback === 'function') { // callback(err, null); // return; // } // stateTransition(this, DISCONNECTED); // this.connect(); // if (typeof callback === 'function') callback(null, null); // }); if (typeof callback === 'function') callback(); }; // Prepare the buffer that Pool.prototype.write() uses to send to the server function serializeCommand(self, command, callback) { const originalCommandBuffer = command.toBin(); // Check whether we and the server have agreed to use a compressor const shouldCompress = !!self.options.agreedCompressor; if (!shouldCompress || !canCompress(command)) { return callback(null, originalCommandBuffer); } // Transform originalCommandBuffer into OP_COMPRESSED const concatenatedOriginalCommandBuffer = Buffer.concat(originalCommandBuffer); const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE); // Extract information needed for OP_COMPRESSED from the uncompressed message const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12); // Compress the message body compress(self, messageToBeCompressed, function(err, compressedMessage) { if (err) return callback(err, null); // Create the msgHeader of OP_COMPRESSED const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE); msgHeader.writeInt32LE( MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length, 0 ); // messageLength msgHeader.writeInt32LE(command.requestId, 4); // requestID msgHeader.writeInt32LE(0, 8); // responseTo (zero) msgHeader.writeInt32LE(opcodes.OP_COMPRESSED, 12); // opCode // Create the compression details of OP_COMPRESSED const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE); compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader compressionDetails.writeUInt8(compressorIDs[self.options.agreedCompressor], 8); // compressorID return callback(null, [msgHeader, compressionDetails, compressedMessage]); }); } /** * Write a message to MongoDB * @method * @return {Connection} */ Pool.prototype.write = function(command, options, cb) { var self = this; // Ensure we have a callback if (typeof options === 'function') { cb = options; } // Always have options options = options || {}; // We need to have a callback function unless the message returns no response if (!(typeof cb === 'function') && !options.noResponse) { throw new MongoError('write method must provide a callback'); } // Pool was destroyed error out if (this.state === DESTROYED || this.state === DESTROYING) { // Callback with an error if (cb) { try { cb(new MongoError('pool destroyed')); } catch (err) { process.nextTick(function() { throw err; }); } } return; } if (this.options.domainsEnabled && process.domain && typeof cb === 'function') { // if we have a domain bind to it var oldCb = cb; cb = process.domain.bind(function() { // v8 - argumentsToArray one-liner var args = new Array(arguments.length); for (var i = 0; i < arguments.length; i++) { args[i] = arguments[i]; } // bounce off event loop so domain switch takes place process.nextTick(function() { oldCb.apply(null, args); }); }); } // Do we have an operation var operation = { cb: cb, raw: false, promoteLongs: true, promoteValues: true, promoteBuffers: false, fullResult: false }; // Set the options for the parsing operation.promoteLongs = typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true; operation.promoteValues = typeof options.promoteValues === 'boolean' ? options.promoteValues : true; operation.promoteBuffers = typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false; operation.raw = typeof options.raw === 'boolean' ? options.raw : false; operation.immediateRelease = typeof options.immediateRelease === 'boolean' ? options.immediateRelease : false; operation.documentsReturnedIn = options.documentsReturnedIn; operation.command = typeof options.command === 'boolean' ? options.command : false; operation.fullResult = typeof options.fullResult === 'boolean' ? options.fullResult : false; operation.noResponse = typeof options.noResponse === 'boolean' ? options.noResponse : false; operation.session = options.session || null; // Optional per operation socketTimeout operation.socketTimeout = options.socketTimeout; operation.monitoring = options.monitoring; // Custom socket Timeout if (options.socketTimeout) { operation.socketTimeout = options.socketTimeout; } // Get the requestId operation.requestId = command.requestId; // If command monitoring is enabled we need to modify the callback here if (self.options.monitorCommands) { this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); operation.started = process.hrtime(); operation.cb = (err, reply) => { if (err) { self.emit( 'commandFailed', new apm.CommandFailedEvent(this, command, err, operation.started) ); } else { if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { self.emit( 'commandFailed', new apm.CommandFailedEvent(this, command, reply.result, operation.started) ); } else { self.emit( 'commandSucceeded', new apm.CommandSucceededEvent(this, command, reply, operation.started) ); } } if (typeof cb === 'function') cb(err, reply); }; } // Prepare the operation buffer serializeCommand(self, command, (err, serializedBuffers) => { if (err) throw err; // Set the operation's buffer to the serialization of the commands operation.buffer = serializedBuffers; // If we have a monitoring operation schedule as the very first operation // Otherwise add to back of queue if (options.monitoring) { self.queue.unshift(operation); } else { self.queue.push(operation); } // Attempt to execute the operation if (!self.executing) { process.nextTick(function() { _execute(self)(); }); } }); }; // Return whether a command contains an uncompressible command term // Will return true if command contains no uncompressible command terms function canCompress(command) { const commandDoc = command instanceof Msg ? command.command : command.query; const commandName = Object.keys(commandDoc)[0]; return uncompressibleCommands.indexOf(commandName) === -1; } // Remove connection method function remove(connection, connections) { for (var i = 0; i < connections.length; i++) { if (connections[i] === connection) { connections.splice(i, 1); return true; } } } function removeConnection(self, connection) { if (remove(connection, self.availableConnections)) return; if (remove(connection, self.inUseConnections)) return; } const handlers = ['close', 'message', 'error', 'timeout', 'parseError', 'connect']; function _createConnection(self) { if (self.state === DESTROYED || self.state === DESTROYING) { return; } self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } if (!self.reconnectId && self.options.reconnect) { self.reconnectId = setTimeout(attemptReconnect(self), self.options.reconnectInterval); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { removeConnection(self, connection); return connection.destroy(); } connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); if (self.state === DESTROYED || self.state === DESTROYING) { return connection.destroy(); } // Remove the connection from the connectingConnections list removeConnection(self, connection); // Handle error if (err) { return connection.destroy(); } // Push to available self.availableConnections.push(connection); // Execute any work waiting _execute(self)(); }); } function flushMonitoringOperations(queue) { for (var i = 0; i < queue.length; i++) { if (queue[i].monitoring) { var workItem = queue[i]; queue.splice(i, 1); workItem.cb( new MongoError({ message: 'no connection available for monitoring', driver: true }) ); } } } function _execute(self) { return function() { if (self.state === DESTROYED) return; // Already executing, skip if (self.executing) return; // Set pool as executing self.executing = true; // New pool connections are in progress, wait them to finish // before executing any more operation to ensure distribution of // operations if (self.connectingConnections > 0) { self.executing = false; return; } // As long as we have available connections // eslint-disable-next-line while (true) { // Total availble connections const totalConnections = totalConnectionCount(self); // No available connections available, flush any monitoring ops if (self.availableConnections.length === 0) { // Flush any monitoring operations flushMonitoringOperations(self.queue); break; } // No queue break if (self.queue.length === 0) { break; } var connection = null; const connections = self.availableConnections.filter(conn => conn.workItems.length === 0); // No connection found that has no work on it, just pick one for pipelining if (connections.length === 0) { connection = self.availableConnections[self.connectionIndex++ % self.availableConnections.length]; } else { connection = connections[self.connectionIndex++ % connections.length]; } // Is the connection connected if (!connection.isConnected()) { // Remove the disconnected connection removeConnection(self, connection); // Flush any monitoring operations in the queue, failing fast flushMonitoringOperations(self.queue); break; } // Get the next work item var workItem = self.queue.shift(); // If we are monitoring we need to use a connection that is not // running another operation to avoid socket timeout changes // affecting an existing operation if (workItem.monitoring) { var foundValidConnection = false; for (let i = 0; i < self.availableConnections.length; i++) { // If the connection is connected // And there are no pending workItems on it // Then we can safely use it for monitoring. if ( self.availableConnections[i].isConnected() && self.availableConnections[i].workItems.length === 0 ) { foundValidConnection = true; connection = self.availableConnections[i]; break; } } // No safe connection found, attempt to grow the connections // if possible and break from the loop if (!foundValidConnection) { // Put workItem back on the queue self.queue.unshift(workItem); // Attempt to grow the pool if it's not yet maxsize if (totalConnections < self.options.size && self.queue.length > 0) { // Create a new connection _createConnection(self); } // Re-execute the operation setTimeout(function() { _execute(self)(); }, 10); break; } } // Don't execute operation until we have a full pool if (totalConnections < self.options.size) { // Connection has work items, then put it back on the queue // and create a new connection if (connection.workItems.length > 0) { // Lets put the workItem back on the list self.queue.unshift(workItem); // Create a new connection _createConnection(self); // Break from the loop break; } } // Get actual binary commands var buffer = workItem.buffer; // If we are monitoring take the connection of the availableConnections if (workItem.monitoring) { moveConnectionBetween(connection, self.availableConnections, self.inUseConnections); } // Track the executing commands on the mongo server // as long as there is an expected response if (!workItem.noResponse) { connection.workItems.push(workItem); } // We have a custom socketTimeout if (!workItem.immediateRelease && typeof workItem.socketTimeout === 'number') { connection.setSocketTimeout(workItem.socketTimeout); } // Capture if write was successful var writeSuccessful = true; // Put operation on the wire if (Array.isArray(buffer)) { for (let i = 0; i < buffer.length; i++) { writeSuccessful = connection.write(buffer[i]); } } else { writeSuccessful = connection.write(buffer); } // if the command is designated noResponse, call the callback immeditely if (workItem.noResponse && typeof workItem.cb === 'function') { workItem.cb(null, null); } if (writeSuccessful === false) { // If write not successful put back on queue self.queue.unshift(workItem); // Remove the disconnected connection removeConnection(self, connection); // Flush any monitoring operations in the queue, failing fast flushMonitoringOperations(self.queue); break; } } self.executing = false; }; } // Make execution loop available for testing Pool._execute = _execute; /** * A server connect event, used to verify that the connection is up and running * * @event Pool#connect * @type {Pool} */ /** * A server reconnect event, used to verify that pool reconnected. * * @event Pool#reconnect * @type {Pool} */ /** * The server connection closed, all pool connections closed * * @event Pool#close * @type {Pool} */ /** * The server connection caused an error, all pool connections closed * * @event Pool#error * @type {Pool} */ /** * The server connection timed out, all pool connections closed * * @event Pool#timeout * @type {Pool} */ /** * The driver experienced an invalid message, all pool connections closed * * @event Pool#parseError * @type {Pool} */ /** * The driver attempted to reconnect * * @event Pool#attemptReconnect * @type {Pool} */ /** * The driver exhausted all reconnect attempts * * @event Pool#reconnectFailed * @type {Pool} */ module.exports = Pool; package/lib/connection/utils.js000644 0000002354 3560116604 013667 0ustar00000000 000000 'use strict'; const require_optional = require('require_optional'); function debugOptions(debugFields, options) { var finaloptions = {}; debugFields.forEach(function(n) { finaloptions[n] = options[n]; }); return finaloptions; } function retrieveBSON() { var BSON = require('bson'); BSON.native = false; try { var optionalBSON = require_optional('bson-ext'); if (optionalBSON) { optionalBSON.native = true; return optionalBSON; } } catch (err) {} // eslint-disable-line return BSON; } // Throw an error if an attempt to use Snappy is made when Snappy is not installed function noSnappyWarning() { throw new Error( 'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.' ); } // Facilitate loading Snappy optionally function retrieveSnappy() { var snappy = null; try { snappy = require_optional('snappy'); } catch (error) {} // eslint-disable-line if (!snappy) { snappy = { compress: noSnappyWarning, uncompress: noSnappyWarning, compressSync: noSnappyWarning, uncompressSync: noSnappyWarning }; } return snappy; } module.exports = { debugOptions, retrieveBSON, retrieveSnappy }; package/lib/cursor.js000644 0000055406 3560116604 011713 0ustar00000000 000000 'use strict'; const Logger = require('./connection/logger'); const retrieveBSON = require('./connection/utils').retrieveBSON; const MongoError = require('./error').MongoError; const MongoNetworkError = require('./error').MongoNetworkError; const mongoErrorContextSymbol = require('./error').mongoErrorContextSymbol; const f = require('util').format; const collationNotSupported = require('./utils').collationNotSupported; const wireProtocol = require('./wireprotocol'); const BSON = retrieveBSON(); const Long = BSON.Long; /** * This is a cursor results callback * * @callback resultCallback * @param {error} error An error object. Set to null if no error present * @param {object} document */ /** * @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB * allowing for iteration over the results returned from the underlying query. * * **CURSORS Cannot directly be instantiated** * @example * var Server = require('mongodb-core').Server * , ReadPreference = require('mongodb-core').ReadPreference * , assert = require('assert'); * * var server = new Server({host: 'localhost', port: 27017}); * // Wait for the connection event * server.on('connect', function(server) { * assert.equal(null, err); * * // Execute the write * var cursor = _server.cursor('integration_tests.inserts_example4', { * find: 'integration_tests.example4' * , query: {a:1} * }, { * readPreference: new ReadPreference('secondary'); * }); * * // Get the first document * cursor.next(function(err, doc) { * assert.equal(null, err); * server.destroy(); * }); * }); * * // Start connecting * server.connect(); */ /** * Creates a new Cursor, not to be used directly * @class * @param {object} bson An instance of the BSON parser * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {{object}|Long} cmd The selector (can be a command or a cursorId) * @param {object} [options=null] Optional settings. * @param {object} [options.batchSize=1000] Batchsize for the operation * @param {array} [options.documents=[]] Initial documents list for cursor * @param {object} [options.transforms=null] Transform methods for the cursor results * @param {function} [options.transforms.query] Transform the value returned from the initial query * @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next * @param {object} topology The server topology instance. * @param {object} topologyOptions The server topology options. * @return {Cursor} A cursor instance * @property {number} cursorBatchSize The current cursorBatchSize for the cursor * @property {number} cursorLimit The current cursorLimit for the cursor * @property {number} cursorSkip The current cursorSkip for the cursor */ var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) { options = options || {}; // Cursor pool this.pool = null; // Cursor server this.server = null; // Do we have a not connected handler this.disconnectHandler = options.disconnectHandler; // Set local values this.bson = bson; this.ns = ns; this.cmd = cmd; this.options = options; this.topology = topology; // All internal state this.cursorState = { cursorId: null, cmd: cmd, documents: options.documents || [], cursorIndex: 0, dead: false, killed: false, init: false, notified: false, limit: options.limit || cmd.limit || 0, skip: options.skip || cmd.skip || 0, batchSize: options.batchSize || cmd.batchSize || 1000, currentLimit: 0, // Result field name if not a cursor (contains the array of results) transforms: options.transforms, raw: options.raw || (cmd && cmd.raw) }; if (typeof options.session === 'object') { this.cursorState.session = options.session; } // Add promoteLong to cursor state if (typeof topologyOptions.promoteLongs === 'boolean') { this.cursorState.promoteLongs = topologyOptions.promoteLongs; } else if (typeof options.promoteLongs === 'boolean') { this.cursorState.promoteLongs = options.promoteLongs; } // Add promoteValues to cursor state if (typeof topologyOptions.promoteValues === 'boolean') { this.cursorState.promoteValues = topologyOptions.promoteValues; } else if (typeof options.promoteValues === 'boolean') { this.cursorState.promoteValues = options.promoteValues; } // Add promoteBuffers to cursor state if (typeof topologyOptions.promoteBuffers === 'boolean') { this.cursorState.promoteBuffers = topologyOptions.promoteBuffers; } else if (typeof options.promoteBuffers === 'boolean') { this.cursorState.promoteBuffers = options.promoteBuffers; } if (topologyOptions.reconnect) { this.cursorState.reconnect = topologyOptions.reconnect; } // Logger this.logger = Logger('Cursor', topologyOptions); // // Did we pass in a cursor id if (typeof cmd === 'number') { this.cursorState.cursorId = Long.fromNumber(cmd); this.cursorState.lastCursorId = this.cursorState.cursorId; } else if (cmd instanceof Long) { this.cursorState.cursorId = cmd; this.cursorState.lastCursorId = cmd; } }; Cursor.prototype.setCursorBatchSize = function(value) { this.cursorState.batchSize = value; }; Cursor.prototype.cursorBatchSize = function() { return this.cursorState.batchSize; }; Cursor.prototype.setCursorLimit = function(value) { this.cursorState.limit = value; }; Cursor.prototype.cursorLimit = function() { return this.cursorState.limit; }; Cursor.prototype.setCursorSkip = function(value) { this.cursorState.skip = value; }; Cursor.prototype.cursorSkip = function() { return this.cursorState.skip; }; Cursor.prototype._endSession = function(options, callback) { if (typeof options === 'function') { callback = options; options = {}; } options = options || {}; const session = this.cursorState.session; if (session && (options.force || session.owner === this)) { this.cursorState.session = undefined; session.endSession(callback); return true; } if (callback) { callback(); } return false; }; // // Handle callback (including any exceptions thrown) var handleCallback = function(callback, err, result) { try { callback(err, result); } catch (err) { process.nextTick(function() { throw err; }); } }; // Internal methods Cursor.prototype._getmore = function(callback) { if (this.logger.isDebug()) this.logger.debug(f('schedule getMore call for query [%s]', JSON.stringify(this.query))); // Set the current batchSize var batchSize = this.cursorState.batchSize; if ( this.cursorState.limit > 0 && this.cursorState.currentLimit + batchSize > this.cursorState.limit ) { batchSize = this.cursorState.limit - this.cursorState.currentLimit; } wireProtocol.getMore(this.server, this.ns, this.cursorState, batchSize, this.options, callback); }; /** * Clone the cursor * @method * @return {Cursor} */ Cursor.prototype.clone = function() { return this.topology.cursor(this.ns, this.cmd, this.options); }; /** * Checks if the cursor is dead * @method * @return {boolean} A boolean signifying if the cursor is dead or not */ Cursor.prototype.isDead = function() { return this.cursorState.dead === true; }; /** * Checks if the cursor was killed by the application * @method * @return {boolean} A boolean signifying if the cursor was killed by the application */ Cursor.prototype.isKilled = function() { return this.cursorState.killed === true; }; /** * Checks if the cursor notified it's caller about it's death * @method * @return {boolean} A boolean signifying if the cursor notified the callback */ Cursor.prototype.isNotified = function() { return this.cursorState.notified === true; }; /** * Returns current buffered documents length * @method * @return {number} The number of items in the buffered documents */ Cursor.prototype.bufferedCount = function() { return this.cursorState.documents.length - this.cursorState.cursorIndex; }; /** * Returns current buffered documents * @method * @return {Array} An array of buffered documents */ Cursor.prototype.readBufferedDocuments = function(number) { var unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex; var length = number < unreadDocumentsLength ? number : unreadDocumentsLength; var elements = this.cursorState.documents.slice( this.cursorState.cursorIndex, this.cursorState.cursorIndex + length ); // Transform the doc with passed in transformation method if provided if (this.cursorState.transforms && typeof this.cursorState.transforms.doc === 'function') { // Transform all the elements for (var i = 0; i < elements.length; i++) { elements[i] = this.cursorState.transforms.doc(elements[i]); } } // Ensure we do not return any more documents than the limit imposed // Just return the number of elements up to the limit if ( this.cursorState.limit > 0 && this.cursorState.currentLimit + elements.length > this.cursorState.limit ) { elements = elements.slice(0, this.cursorState.limit - this.cursorState.currentLimit); this.kill(); } // Adjust current limit this.cursorState.currentLimit = this.cursorState.currentLimit + elements.length; this.cursorState.cursorIndex = this.cursorState.cursorIndex + elements.length; // Return elements return elements; }; /** * Kill the cursor * @method * @param {resultCallback} callback A callback function */ Cursor.prototype.kill = function(callback) { // Set cursor to dead this.cursorState.dead = true; this.cursorState.killed = true; // Remove documents this.cursorState.documents = []; // If no cursor id just return if ( this.cursorState.cursorId == null || this.cursorState.cursorId.isZero() || this.cursorState.init === false ) { if (callback) callback(null, null); return; } wireProtocol.killCursors(this.server, this.ns, this.cursorState, callback); }; /** * Resets the cursor * @method * @return {null} */ Cursor.prototype.rewind = function() { if (this.cursorState.init) { if (!this.cursorState.dead) { this.kill(); } this.cursorState.currentLimit = 0; this.cursorState.init = false; this.cursorState.dead = false; this.cursorState.killed = false; this.cursorState.notified = false; this.cursorState.documents = []; this.cursorState.cursorId = null; this.cursorState.cursorIndex = 0; } }; /** * Validate if the pool is dead and return error */ var isConnectionDead = function(self, callback) { if (self.pool && self.pool.isDestroyed()) { self.cursorState.killed = true; const err = new MongoNetworkError( f('connection to host %s:%s was destroyed', self.pool.host, self.pool.port) ); _setCursorNotifiedImpl(self, () => callback(err)); return true; } return false; }; /** * Validate if the cursor is dead but was not explicitly killed by user */ var isCursorDeadButNotkilled = function(self, callback) { // Cursor is dead but not marked killed, return null if (self.cursorState.dead && !self.cursorState.killed) { self.cursorState.killed = true; setCursorNotified(self, callback); return true; } return false; }; /** * Validate if the cursor is dead and was killed by user */ var isCursorDeadAndKilled = function(self, callback) { if (self.cursorState.dead && self.cursorState.killed) { handleCallback(callback, new MongoError('cursor is dead')); return true; } return false; }; /** * Validate if the cursor was killed by the user */ var isCursorKilled = function(self, callback) { if (self.cursorState.killed) { setCursorNotified(self, callback); return true; } return false; }; /** * Mark cursor as being dead and notified */ var setCursorDeadAndNotified = function(self, callback) { self.cursorState.dead = true; setCursorNotified(self, callback); }; /** * Mark cursor as being notified */ var setCursorNotified = function(self, callback) { _setCursorNotifiedImpl(self, () => handleCallback(callback, null, null)); }; var _setCursorNotifiedImpl = function(self, callback) { self.cursorState.notified = true; self.cursorState.documents = []; self.cursorState.cursorIndex = 0; if (self._endSession) { return self._endSession(undefined, () => callback()); } return callback(); }; var nextFunction = function(self, callback) { // We have notified about it if (self.cursorState.notified) { return callback(new Error('cursor is exhausted')); } // Cursor is killed return null if (isCursorKilled(self, callback)) return; // Cursor is dead but not marked killed, return null if (isCursorDeadButNotkilled(self, callback)) return; // We have a dead and killed cursor, attempting to call next should error if (isCursorDeadAndKilled(self, callback)) return; // We have just started the cursor if (!self.cursorState.init) { return initializeCursor(self, callback); } if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) { // Ensure we kill the cursor on the server self.kill(); // Set cursor in dead and notified state return setCursorDeadAndNotified(self, callback); } else if ( self.cursorState.cursorIndex === self.cursorState.documents.length && !Long.ZERO.equals(self.cursorState.cursorId) ) { // Ensure an empty cursor state self.cursorState.documents = []; self.cursorState.cursorIndex = 0; // Check if topology is destroyed if (self.topology.isDestroyed()) return callback( new MongoNetworkError('connection destroyed, not possible to instantiate cursor') ); // Check if connection is dead and return if not possible to // execute a getmore on this connection if (isConnectionDead(self, callback)) return; // Execute the next get more self._getmore(function(err, doc, connection) { if (err) { if (err instanceof MongoError) { err[mongoErrorContextSymbol].isGetMore = true; } return handleCallback(callback, err); } if (self.cursorState.cursorId && self.cursorState.cursorId.isZero() && self._endSession) { self._endSession(); } // Save the returned connection to ensure all getMore's fire over the same connection self.connection = connection; // Tailable cursor getMore result, notify owner about it // No attempt is made here to retry, this is left to the user of the // core module to handle to keep core simple if ( self.cursorState.documents.length === 0 && self.cmd.tailable && Long.ZERO.equals(self.cursorState.cursorId) ) { // No more documents in the tailed cursor return handleCallback( callback, new MongoError({ message: 'No more documents in tailed cursor', tailable: self.cmd.tailable, awaitData: self.cmd.awaitData }) ); } else if ( self.cursorState.documents.length === 0 && self.cmd.tailable && !Long.ZERO.equals(self.cursorState.cursorId) ) { return nextFunction(self, callback); } if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) { return setCursorDeadAndNotified(self, callback); } nextFunction(self, callback); }); } else if ( self.cursorState.documents.length === self.cursorState.cursorIndex && self.cmd.tailable && Long.ZERO.equals(self.cursorState.cursorId) ) { return handleCallback( callback, new MongoError({ message: 'No more documents in tailed cursor', tailable: self.cmd.tailable, awaitData: self.cmd.awaitData }) ); } else if ( self.cursorState.documents.length === self.cursorState.cursorIndex && Long.ZERO.equals(self.cursorState.cursorId) ) { setCursorDeadAndNotified(self, callback); } else { if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) { // Ensure we kill the cursor on the server self.kill(); // Set cursor in dead and notified state return setCursorDeadAndNotified(self, callback); } // Increment the current cursor limit self.cursorState.currentLimit += 1; // Get the document var doc = self.cursorState.documents[self.cursorState.cursorIndex++]; // Doc overflow if (!doc || doc.$err) { // Ensure we kill the cursor on the server self.kill(); // Set cursor in dead and notified state return setCursorDeadAndNotified(self, function() { handleCallback(callback, new MongoError(doc ? doc.$err : undefined)); }); } // Transform the doc with passed in transformation method if provided if (self.cursorState.transforms && typeof self.cursorState.transforms.doc === 'function') { doc = self.cursorState.transforms.doc(doc); } // Return the document handleCallback(callback, null, doc); } }; function initializeCursor(cursor, callback) { // Topology is not connected, save the call in the provided store to be // Executed at some point when the handler deems it's reconnected if (!cursor.topology.isConnected(cursor.options)) { // Only need this for single server, because repl sets and mongos // will always continue trying to reconnect if (cursor.topology._type === 'server' && !cursor.topology.s.options.reconnect) { // Reconnect is disabled, so we'll never reconnect return callback(new MongoError('no connection available')); } if (cursor.disconnectHandler != null) { if (cursor.topology.isDestroyed()) { // Topology was destroyed, so don't try to wait for it to reconnect return callback(new MongoError('Topology was destroyed')); } return cursor.disconnectHandler.addObjectAndMethod( 'cursor', cursor, 'next', [callback], callback ); } } // Very explicitly choose what is passed to selectServer const serverSelectOptions = {}; if (cursor.cursorState.session) { serverSelectOptions.session = cursor.cursorState.session; } if (cursor.options.readPreference) { serverSelectOptions.readPreference = cursor.options.readPreference; } return cursor.topology.selectServer(serverSelectOptions, (err, server) => { if (err) { const disconnectHandler = cursor.disconnectHandler; if (disconnectHandler != null) { return disconnectHandler.addObjectAndMethod('cursor', cursor, 'next', [callback], callback); } return callback(err); } cursor.server = server; cursor.cursorState.init = true; if (collationNotSupported(cursor.server, cursor.cmd)) { return callback(new MongoError(`server ${cursor.server.name} does not support collation`)); } function done() { if ( cursor.cursorState.cursorId && cursor.cursorState.cursorId.isZero() && cursor._endSession ) { cursor._endSession(); } if ( cursor.cursorState.documents.length === 0 && cursor.cursorState.cursorId && cursor.cursorState.cursorId.isZero() && !cursor.cmd.tailable && !cursor.cmd.awaitData ) { return setCursorNotified(cursor, callback); } nextFunction(cursor, callback); } // NOTE: this is a special internal method for cloning a cursor, consider removing if (cursor.cursorState.cursorId != null) { return done(); } const queryCallback = (err, r) => { if (err) return callback(err); const result = r.message; if (result.queryFailure) { return callback(new MongoError(result.documents[0]), null); } // Check if we have a command cursor if ( Array.isArray(result.documents) && result.documents.length === 1 && (!cursor.cmd.find || (cursor.cmd.find && cursor.cmd.virtual === false)) && (typeof result.documents[0].cursor !== 'string' || result.documents[0]['$err'] || result.documents[0]['errmsg'] || Array.isArray(result.documents[0].result)) ) { // We have an error document, return the error if (result.documents[0]['$err'] || result.documents[0]['errmsg']) { return callback(new MongoError(result.documents[0]), null); } // We have a cursor document if (result.documents[0].cursor != null && typeof result.documents[0].cursor !== 'string') { var id = result.documents[0].cursor.id; // If we have a namespace change set the new namespace for getmores if (result.documents[0].cursor.ns) { cursor.ns = result.documents[0].cursor.ns; } // Promote id to long if needed cursor.cursorState.cursorId = typeof id === 'number' ? Long.fromNumber(id) : id; cursor.cursorState.lastCursorId = cursor.cursorState.cursorId; cursor.cursorState.operationTime = result.documents[0].operationTime; // If we have a firstBatch set it if (Array.isArray(result.documents[0].cursor.firstBatch)) { cursor.cursorState.documents = result.documents[0].cursor.firstBatch; //.reverse(); } // Return after processing command cursor return done(result); } if (Array.isArray(result.documents[0].result)) { cursor.cursorState.documents = result.documents[0].result; cursor.cursorState.cursorId = Long.ZERO; return done(result); } } // Otherwise fall back to regular find path const cursorId = result.cursorId || 0; cursor.cursorState.cursorId = Long.fromNumber(cursorId); cursor.cursorState.documents = result.documents; cursor.cursorState.lastCursorId = result.cursorId; // Transform the results with passed in transformation method if provided if ( cursor.cursorState.transforms && typeof cursor.cursorState.transforms.query === 'function' ) { cursor.cursorState.documents = cursor.cursorState.transforms.query(result); } // Return callback done(result); }; if (cursor.logger.isDebug()) { cursor.logger.debug( `issue initial query [${JSON.stringify(cursor.cmd)}] with flags [${JSON.stringify( cursor.query )}]` ); } if (cursor.cmd.find != null) { wireProtocol.query( cursor.server, cursor.ns, cursor.cmd, cursor.cursorState, cursor.options, queryCallback ); return; } cursor.query = wireProtocol.command( cursor.server, cursor.ns, cursor.cmd, cursor.options, queryCallback ); }); } /** * Retrieve the next document from the cursor * @method * @param {resultCallback} callback A callback function */ Cursor.prototype.next = function(callback) { nextFunction(this, callback); }; module.exports = Cursor; package/lib/error.js000644 0000010064 3560116604 011516 0ustar00000000 000000 'use strict'; const mongoErrorContextSymbol = Symbol('mongoErrorContextSymbol'); /** * Creates a new MongoError * * @augments Error * @param {Error|string|object} message The error message * @property {string} message The error message * @property {string} stack The error call stack */ class MongoError extends Error { constructor(message) { if (message instanceof Error) { super(message.message); this.stack = message.stack; } else { if (typeof message === 'string') { super(message); } else { super(message.message || message.errmsg || message.$err || 'n/a'); for (var name in message) { this[name] = message[name]; } } Error.captureStackTrace(this, this.constructor); } this.name = 'MongoError'; this[mongoErrorContextSymbol] = this[mongoErrorContextSymbol] || {}; } /** * Creates a new MongoError object * * @param {Error|string|object} options The options used to create the error. * @return {MongoError} A MongoError instance * @deprecated Use `new MongoError()` instead. */ static create(options) { return new MongoError(options); } hasErrorLabel(label) { return this.errorLabels && this.errorLabels.indexOf(label) !== -1; } } /** * Creates a new MongoNetworkError * * @param {Error|string|object} message The error message * @property {string} message The error message * @property {string} stack The error call stack */ class MongoNetworkError extends MongoError { constructor(message) { super(message); this.name = 'MongoNetworkError'; // This is added as part of the transactions specification this.errorLabels = ['TransientTransactionError']; } } /** * An error used when attempting to parse a value (like a connection string) * * @param {Error|string|object} message The error message * @property {string} message The error message */ class MongoParseError extends MongoError { constructor(message) { super(message); this.name = 'MongoParseError'; } } /** * An error signifying a timeout event * * @param {Error|string|object} message The error message * @property {string} message The error message */ class MongoTimeoutError extends MongoError { constructor(message) { super(message); this.name = 'MongoTimeoutError'; } } function makeWriteConcernResultObject(input) { const output = Object.assign({}, input); if (output.ok === 0) { output.ok = 1; delete output.errmsg; delete output.code; delete output.codeName; } return output; } /** * An error thrown when the server reports a writeConcernError * * @param {Error|string|object} message The error message * @param {object} result The result document (provided if ok: 1) * @property {string} message The error message * @property {object} [result] The result document (provided if ok: 1) */ class MongoWriteConcernError extends MongoError { constructor(message, result) { super(message); this.name = 'MongoWriteConcernError'; if (result != null) { this.result = makeWriteConcernResultObject(result); } } } // see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms const RETRYABLE_ERROR_CODES = new Set([ 6, // HostUnreachable 7, // HostNotFound 89, // NetworkTimeout 91, // ShutdownInProgress 189, // PrimarySteppedDown 9001, // SocketException 10107, // NotMaster 11600, // InterruptedAtShutdown 11602, // InterruptedDueToReplStateChange 13435, // NotMasterNoSlaveOk 13436 // NotMasterOrSecondary ]); /** * Determines whether an error is something the driver should attempt to retry * * @param {MongoError|Error} error */ function isRetryableError(error) { return ( RETRYABLE_ERROR_CODES.has(error.code) || error instanceof MongoNetworkError || error.message.match(/not master/) || error.message.match(/node is recovering/) ); } module.exports = { MongoError, MongoNetworkError, MongoParseError, MongoTimeoutError, MongoWriteConcernError, mongoErrorContextSymbol, isRetryableError }; package/lib/sdam/monitoring.js000644 0000015156 3560116604 013505 0ustar00000000 000000 'use strict'; const ServerDescription = require('./server_description').ServerDescription; const calculateDurationInMs = require('../utils').calculateDurationInMs; /** * Published when server description changes, but does NOT include changes to the RTT. * * @property {Object} topologyId A unique identifier for the topology * @property {ServerAddress} address The address (host/port pair) of the server * @property {ServerDescription} previousDescription The previous server description * @property {ServerDescription} newDescription The new server description */ class ServerDescriptionChangedEvent { constructor(topologyId, address, previousDescription, newDescription) { Object.assign(this, { topologyId, address, previousDescription, newDescription }); } } /** * Published when server is initialized. * * @property {Object} topologyId A unique identifier for the topology * @property {ServerAddress} address The address (host/port pair) of the server */ class ServerOpeningEvent { constructor(topologyId, address) { Object.assign(this, { topologyId, address }); } } /** * Published when server is closed. * * @property {ServerAddress} address The address (host/port pair) of the server * @property {Object} topologyId A unique identifier for the topology */ class ServerClosedEvent { constructor(topologyId, address) { Object.assign(this, { topologyId, address }); } } /** * Published when topology description changes. * * @property {Object} topologyId * @property {TopologyDescription} previousDescription The old topology description * @property {TopologyDescription} newDescription The new topology description */ class TopologyDescriptionChangedEvent { constructor(topologyId, previousDescription, newDescription) { Object.assign(this, { topologyId, previousDescription, newDescription }); } } /** * Published when topology is initialized. * * @param {Object} topologyId A unique identifier for the topology */ class TopologyOpeningEvent { constructor(topologyId) { Object.assign(this, { topologyId }); } } /** * Published when topology is closed. * * @param {Object} topologyId A unique identifier for the topology */ class TopologyClosedEvent { constructor(topologyId) { Object.assign(this, { topologyId }); } } /** * Fired when the server monitor’s ismaster command is started - immediately before * the ismaster command is serialized into raw BSON and written to the socket. * * @property {Object} connectionId The connection id for the command */ class ServerHeartbeatStartedEvent { constructor(connectionId) { Object.assign(this, { connectionId }); } } /** * Fired when the server monitor’s ismaster succeeds. * * @param {Number} duration The execution time of the event in ms * @param {Object} reply The command reply * @param {Object} connectionId The connection id for the command */ class ServerHeartbeatSucceededEvent { constructor(duration, reply, connectionId) { Object.assign(this, { duration, reply, connectionId }); } } /** * Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception. * * @param {Number} duration The execution time of the event in ms * @param {MongoError|Object} failure The command failure * @param {Object} connectionId The connection id for the command */ class ServerHeartbeatFailedEvent { constructor(duration, failure, connectionId) { Object.assign(this, { duration, failure, connectionId }); } } /** * Performs a server check as described by the SDAM spec. * * NOTE: This method automatically reschedules itself, so that there is always an active * monitoring process * * @param {Server} server The server to monitor */ function monitorServer(server, options) { options = options || {}; const heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; if (options.initial === true) { server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); return; } // executes a single check of a server const checkServer = callback => { let start = process.hrtime(); // emit a signal indicating we have started the heartbeat server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name)); // NOTE: legacy monitoring event process.nextTick(() => server.emit('monitoring', server)); server.command( 'admin.$cmd', { ismaster: true }, { monitoring: true, socketTimeout: server.s.options.connectionTimeout || 2000 }, (err, result) => { let duration = calculateDurationInMs(start); if (err) { server.emit( 'serverHeartbeatFailed', new ServerHeartbeatFailedEvent(duration, err, server.name) ); return callback(err, null); } const isMaster = result.result; server.emit( 'serverHeartbeatSucceded', new ServerHeartbeatSucceededEvent(duration, isMaster, server.name) ); return callback(null, isMaster); } ); }; const successHandler = isMaster => { server.s.monitoring = false; // emit an event indicating that our description has changed server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster)); // schedule the next monitoring process server.s.monitorId = setTimeout(() => monitorServer(server), heartbeatFrequencyMS); }; // run the actual monitoring loop server.s.monitoring = true; checkServer((err, isMaster) => { if (!err) { successHandler(isMaster); return; } // According to the SDAM specification's "Network error during server check" section, if // an ismaster call fails we reset the server's pool. If a server was once connected, // change its type to `Unknown` only after retrying once. server.s.pool.reset(() => { // otherwise re-attempt monitoring once checkServer((error, isMaster) => { if (error) { server.s.monitoring = false; // we revert to an `Unknown` by emitting a default description with no isMaster server.emit( 'descriptionReceived', new ServerDescription(server.description.address, null, { error }) ); // we do not reschedule monitoring in this case return; } successHandler(isMaster); }); }); }); } module.exports = { ServerDescriptionChangedEvent, ServerOpeningEvent, ServerClosedEvent, TopologyDescriptionChangedEvent, TopologyOpeningEvent, TopologyClosedEvent, ServerHeartbeatStartedEvent, ServerHeartbeatSucceededEvent, ServerHeartbeatFailedEvent, monitorServer }; package/lib/sdam/server_description.js000644 0000007112 3560116604 015222 0ustar00000000 000000 'use strict'; // An enumeration of server types we know about const ServerType = { Standalone: 'Standalone', Mongos: 'Mongos', PossiblePrimary: 'PossiblePrimary', RSPrimary: 'RSPrimary', RSSecondary: 'RSSecondary', RSArbiter: 'RSArbiter', RSOther: 'RSOther', RSGhost: 'RSGhost', Unknown: 'Unknown' }; const WRITABLE_SERVER_TYPES = new Set([ ServerType.RSPrimary, ServerType.Standalone, ServerType.Mongos ]); const ISMASTER_FIELDS = [ 'minWireVersion', 'maxWireVersion', 'maxBsonObjectSize', 'maxMessageSizeBytes', 'maxWriteBatchSize', 'compression', 'me', 'hosts', 'passives', 'arbiters', 'tags', 'setName', 'setVersion', 'electionId', 'primary', 'logicalSessionTimeoutMinutes', 'saslSupportedMechs', '__nodejs_mock_server__', '$clusterTime' ]; /** * The client's view of a single server, based on the most recent ismaster outcome. * * Internal type, not meant to be directly instantiated */ class ServerDescription { /** * Create a ServerDescription * @param {String} address The address of the server * @param {Object} [ismaster] An optional ismaster response for this server * @param {Object} [options] Optional settings * @param {Number} [options.roundTripTime] The round trip time to ping this server (in ms) */ constructor(address, ismaster, options) { options = options || {}; ismaster = Object.assign( { minWireVersion: 0, maxWireVersion: 0, hosts: [], passives: [], arbiters: [], tags: [] }, ismaster ); this.address = address; this.error = options.error || null; this.roundTripTime = options.roundTripTime || 0; this.lastUpdateTime = Date.now(); this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null; this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null; this.type = parseServerType(ismaster); // direct mappings ISMASTER_FIELDS.forEach(field => { if (typeof ismaster[field] !== 'undefined') this[field] = ismaster[field]; }); // normalize case for hosts if (this.me) this.me = this.me.toLowerCase(); this.hosts = this.hosts.map(host => host.toLowerCase()); this.passives = this.passives.map(host => host.toLowerCase()); this.arbiters = this.arbiters.map(host => host.toLowerCase()); } get allHosts() { return this.hosts.concat(this.arbiters).concat(this.passives); } /** * @return {Boolean} Is this server available for reads */ get isReadable() { return this.type === ServerType.RSSecondary || this.isWritable; } /** * @return {Boolean} Is this server available for writes */ get isWritable() { return WRITABLE_SERVER_TYPES.has(this.type); } } /** * Parses an `ismaster` message and determines the server type * * @param {Object} ismaster The `ismaster` message to parse * @return {ServerType} */ function parseServerType(ismaster) { if (!ismaster || !ismaster.ok) { return ServerType.Unknown; } if (ismaster.isreplicaset) { return ServerType.RSGhost; } if (ismaster.msg && ismaster.msg === 'isdbgrid') { return ServerType.Mongos; } if (ismaster.setName) { if (ismaster.hidden) { return ServerType.RSOther; } else if (ismaster.ismaster) { return ServerType.RSPrimary; } else if (ismaster.secondary) { return ServerType.RSSecondary; } else if (ismaster.arbiterOnly) { return ServerType.RSArbiter; } else { return ServerType.RSOther; } } return ServerType.Standalone; } module.exports = { ServerDescription, ServerType }; package/lib/sdam/server_selectors.js000644 0000020131 3560116604 014676 0ustar00000000 000000 'use strict'; const ServerType = require('./server_description').ServerType; const TopologyType = require('./topology_description').TopologyType; const ReadPreference = require('../topologies/read_preference'); const MongoError = require('../error').MongoError; // max staleness constants const IDLE_WRITE_PERIOD = 10000; const SMALLEST_MAX_STALENESS_SECONDS = 90; /** * Returns a server selector that selects for writable servers */ function writableServerSelector() { return function(topologyDescription, servers) { return latencyWindowReducer(topologyDescription, servers.filter(s => s.isWritable)); }; } /** * Reduces the passed in array of servers by the rules of the "Max Staleness" specification * found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst * * @param {ReadPreference} readPreference The read preference providing max staleness guidance * @param {topologyDescription} topologyDescription The topology description * @param {ServerDescription[]} servers The list of server descriptions to be reduced * @return {ServerDescription[]} The list of servers that satisfy the requirements of max staleness */ function maxStalenessReducer(readPreference, topologyDescription, servers) { if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) { return servers; } const maxStaleness = readPreference.maxStalenessSeconds; const maxStalenessVariance = (topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000; if (maxStaleness < maxStalenessVariance) { throw new MongoError(`maxStalenessSeconds must be at least ${maxStalenessVariance} seconds`); } if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) { throw new MongoError( `maxStalenessSeconds must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds` ); } if (topologyDescription.type === TopologyType.ReplicaSetWithPrimary) { const primary = servers.filter(primaryFilter)[0]; return servers.reduce((result, server) => { const stalenessMS = server.lastUpdateTime - server.lastWriteDate - (primary.lastUpdateTime - primary.lastWriteDate) + topologyDescription.heartbeatFrequencyMS; const staleness = stalenessMS / 1000; if (staleness <= readPreference.maxStalenessSeconds) result.push(server); return result; }, []); } else if (topologyDescription.type === TopologyType.ReplicaSetNoPrimary) { const sMax = servers.reduce((max, s) => (s.lastWriteDate > max.lastWriteDate ? s : max)); return servers.reduce((result, server) => { const stalenessMS = sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS; const staleness = stalenessMS / 1000; if (staleness <= readPreference.maxStalenessSeconds) result.push(server); return result; }, []); } return servers; } /** * Determines whether a server's tags match a given set of tags * * @param {String[]} tagSet The requested tag set to match * @param {String[]} serverTags The server's tags */ function tagSetMatch(tagSet, serverTags) { const keys = Object.keys(tagSet); const serverTagKeys = Object.keys(serverTags); for (let i = 0; i < keys.length; ++i) { const key = keys[i]; if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) { return false; } } return true; } /** * Reduces a set of server descriptions based on tags requested by the read preference * * @param {ReadPreference} readPreference The read preference providing the requested tags * @param {ServerDescription[]} servers The list of server descriptions to reduce * @return {ServerDescription[]} The list of servers matching the requested tags */ function tagSetReducer(readPreference, servers) { if ( readPreference.tags == null || (Array.isArray(readPreference.tags) && readPreference.tags.length === 0) ) { return servers; } for (let i = 0; i < readPreference.tags.length; ++i) { const tagSet = readPreference.tags[i]; const serversMatchingTagset = servers.reduce((matched, server) => { if (tagSetMatch(tagSet, server.tags)) matched.push(server); return matched; }, []); if (serversMatchingTagset.length) { return serversMatchingTagset; } } return []; } /** * Reduces a list of servers to ensure they fall within an acceptable latency window. This is * further specified in the "Server Selection" specification, found here: * https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst * * @param {topologyDescription} topologyDescription The topology description * @param {ServerDescription[]} servers The list of servers to reduce * @returns {ServerDescription[]} The servers which fall within an acceptable latency window */ function latencyWindowReducer(topologyDescription, servers) { const low = servers.reduce( (min, server) => (min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min)), -1 ); const high = low + topologyDescription.localThresholdMS; return servers.reduce((result, server) => { if (server.roundTripTime <= high && server.roundTripTime >= low) result.push(server); return result; }, []); } // filters function primaryFilter(server) { return server.type === ServerType.RSPrimary; } function secondaryFilter(server) { return server.type === ServerType.RSSecondary; } function nearestFilter(server) { return server.type === ServerType.RSSecondary || server.type === ServerType.RSPrimary; } function knownFilter(server) { return server.type !== ServerType.Unknown; } /** * Returns a function which selects servers based on a provided read preference * * @param {ReadPreference} readPreference The read preference to select with */ function readPreferenceServerSelector(readPreference) { if (!readPreference.isValid()) { throw new TypeError('Invalid read preference specified'); } return function(topologyDescription, servers) { const commonWireVersion = topologyDescription.commonWireVersion; if ( commonWireVersion && (readPreference.minWireVersion && readPreference.minWireVersion > commonWireVersion) ) { throw new MongoError( `Minimum wire version '${ readPreference.minWireVersion }' required, but found '${commonWireVersion}'` ); } if ( topologyDescription.type === TopologyType.Single || topologyDescription.type === TopologyType.Sharded ) { return latencyWindowReducer(topologyDescription, servers.filter(knownFilter)); } if (readPreference.mode === ReadPreference.PRIMARY) { return servers.filter(primaryFilter); } if (readPreference.mode === ReadPreference.SECONDARY) { return latencyWindowReducer( topologyDescription, tagSetReducer( readPreference, maxStalenessReducer(readPreference, topologyDescription, servers) ) ).filter(secondaryFilter); } else if (readPreference.mode === ReadPreference.NEAREST) { return latencyWindowReducer( topologyDescription, tagSetReducer( readPreference, maxStalenessReducer(readPreference, topologyDescription, servers) ) ).filter(nearestFilter); } else if (readPreference.mode === ReadPreference.SECONDARY_PREFERRED) { const result = latencyWindowReducer( topologyDescription, tagSetReducer( readPreference, maxStalenessReducer(readPreference, topologyDescription, servers) ) ).filter(secondaryFilter); return result.length === 0 ? servers.filter(primaryFilter) : result; } else if (readPreference.mode === ReadPreference.PRIMARY_PREFERRED) { const result = servers.filter(primaryFilter); if (result.length) { return result; } return latencyWindowReducer( topologyDescription, tagSetReducer( readPreference, maxStalenessReducer(readPreference, topologyDescription, servers) ) ).filter(secondaryFilter); } }; } module.exports = { writableServerSelector, readPreferenceServerSelector }; package/lib/sdam/server.js000644 0000030633 3560116604 012623 0ustar00000000 000000 'use strict'; const EventEmitter = require('events'); const MongoError = require('../error').MongoError; const Pool = require('../connection/pool'); const relayEvents = require('../utils').relayEvents; const wireProtocol = require('../wireprotocol'); const BSON = require('../connection/utils').retrieveBSON(); const createClientInfo = require('../topologies/shared').createClientInfo; const Logger = require('../connection/logger'); const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); const monitorServer = require('./monitoring').monitorServer; const MongoParseError = require('../error').MongoParseError; const MongoNetworkError = require('../error').MongoNetworkError; const collationNotSupported = require('../utils').collationNotSupported; const debugOptions = require('../connection/utils').debugOptions; // Used for filtering out fields for logging const DEBUG_FIELDS = [ 'reconnect', 'reconnectTries', 'reconnectInterval', 'emitError', 'cursorFactory', 'host', 'port', 'size', 'keepAlive', 'keepAliveInitialDelay', 'noDelay', 'connectionTimeout', 'checkServerIdentity', 'socketTimeout', 'ssl', 'ca', 'crl', 'cert', 'key', 'rejectUnauthorized', 'promoteLongs', 'promoteValues', 'promoteBuffers', 'servername' ]; const STATE_DISCONNECTED = 0; const STATE_CONNECTING = 1; const STATE_CONNECTED = 2; /** * * @fires Server#serverHeartbeatStarted * @fires Server#serverHeartbeatSucceeded * @fires Server#serverHeartbeatFailed */ class Server extends EventEmitter { /** * Create a server * * @param {ServerDescription} description * @param {Object} options */ constructor(description, options, topology) { super(); this.s = { // the server description description, // a saved copy of the incoming options options, // the server logger logger: Logger('Server', options), // the bson parser bson: options.bson || new BSON(), // client metadata for the initial handshake clientInfo: createClientInfo(options), // state variable to determine if there is an active server check in progress monitoring: false, // the implementation of the monitoring method monitorFunction: options.monitorFunction || monitorServer, // the connection pool pool: null, // the server state state: STATE_DISCONNECTED, credentials: options.credentials, topology }; } get description() { return this.s.description; } get name() { return this.s.description.address; } /** * Initiate server connect */ connect(options) { options = options || {}; // do not allow connect to be called on anything that's not disconnected if (this.s.pool && !this.s.pool.isDisconnected() && !this.s.pool.isDestroyed()) { throw new MongoError(`Server instance in invalid state ${this.s.pool.state}`); } // create a pool const addressParts = this.description.address.split(':'); const poolOptions = Object.assign( { host: addressParts[0], port: parseInt(addressParts[1], 10) }, this.s.options, options, { bson: this.s.bson } ); // NOTE: this should only be the case if we are connecting to a single server poolOptions.reconnect = true; this.s.pool = new Pool(this, poolOptions); // setup listeners this.s.pool.on('connect', connectEventHandler(this)); this.s.pool.on('close', errorEventHandler(this)); this.s.pool.on('error', errorEventHandler(this)); this.s.pool.on('parseError', parseErrorEventHandler(this)); // it is unclear whether consumers should even know about these events // this.s.pool.on('timeout', timeoutEventHandler(this)); // this.s.pool.on('reconnect', reconnectEventHandler(this)); // this.s.pool.on('reconnectFailed', errorEventHandler(this)); // relay all command monitoring events relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']); this.s.state = STATE_CONNECTING; // If auth settings have been provided, use them if (options.auth) { this.s.pool.connect.apply(this.s.pool, options.auth); return; } this.s.pool.connect(); } /** * Destroy the server connection * * @param {Boolean} [options.force=false] Force destroy the pool */ destroy(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, { force: false }, options); if (!this.s.pool) { this.s.state = STATE_DISCONNECTED; if (typeof callback === 'function') { callback(null, null); } return; } ['close', 'error', 'timeout', 'parseError', 'connect'].forEach(event => { this.s.pool.removeAllListeners(event); }); if (this.s.monitorId) { clearTimeout(this.s.monitorId); } this.s.pool.destroy(options.force, err => { this.s.state = STATE_DISCONNECTED; callback(err); }); } /** * Immediately schedule monitoring of this server. If there already an attempt being made * this will be a no-op. */ monitor(options) { options = options || {}; if (this.s.state !== STATE_CONNECTED || this.s.monitoring) return; if (this.s.monitorId) clearTimeout(this.s.monitorId); this.s.monitorFunction(this, options); } /** * Execute a command * * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ command(ns, cmd, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } const error = basicReadValidations(this, options); if (error) { return callback(error, null); } // Clone the options options = Object.assign({}, options, { wireProtocolCommand: false }); // Debug log if (this.s.logger.isDebug()) { this.s.logger.debug( `executing command [${JSON.stringify({ ns, cmd, options: debugOptions(DEBUG_FIELDS, options) })}] against ${this.name}` ); } // error if collation not supported if (collationNotSupported(this, cmd)) { callback(new MongoError(`server ${this.name} does not support collation`)); return; } wireProtocol.command(this, ns, cmd, options, callback); } /** * Insert one or more documents * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of documents to insert * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ insert(ns, ops, options, callback) { executeWriteOperation({ server: this, op: 'insert', ns, ops }, options, callback); } /** * Perform one or more update operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of updates * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ update(ns, ops, options, callback) { executeWriteOperation({ server: this, op: 'update', ns, ops }, options, callback); } /** * Perform one or more remove operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of removes * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ remove(ns, ops, options, callback) { executeWriteOperation({ server: this, op: 'remove', ns, ops }, options, callback); } } Object.defineProperty(Server.prototype, 'clusterTime', { get: function() { return this.s.topology.clusterTime; }, set: function(clusterTime) { this.s.topology.clusterTime = clusterTime; } }); function basicWriteValidations(server) { if (!server.s.pool) { return new MongoError('server instance is not connected'); } if (server.s.pool.isDestroyed()) { return new MongoError('server instance pool was destroyed'); } return null; } function basicReadValidations(server, options) { const error = basicWriteValidations(server, options); if (error) { return error; } if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { return new MongoError('readPreference must be an instance of ReadPreference'); } } function executeWriteOperation(args, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // TODO: once we drop Node 4, use destructuring either here or in arguments. const server = args.server; const op = args.op; const ns = args.ns; const ops = Array.isArray(args.ops) ? args.ops : [args.ops]; const error = basicWriteValidations(server, options); if (error) { callback(error, null); return; } if (collationNotSupported(server, options)) { callback(new MongoError(`server ${this.name} does not support collation`)); return; } return wireProtocol[op](server, ns, ops, options, callback); } function connectEventHandler(server) { return function(pool, conn) { const ismaster = conn.ismaster; server.s.lastIsMasterMS = conn.lastIsMasterMS; if (conn.agreedCompressor) { server.s.pool.options.agreedCompressor = conn.agreedCompressor; } if (conn.zlibCompressionLevel) { server.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; } if (conn.ismaster.$clusterTime) { const $clusterTime = conn.ismaster.$clusterTime; server.s.sclusterTime = $clusterTime; } // log the connection event if requested if (server.s.logger.isInfo()) { server.s.logger.info( `server ${server.name} connected with ismaster [${JSON.stringify(ismaster)}]` ); } // emit an event indicating that our description has changed server.emit('descriptionReceived', new ServerDescription(server.description.address, ismaster)); // we are connected and handshaked (guaranteed by the pool) server.s.state = STATE_CONNECTED; server.emit('connect', server); }; } function errorEventHandler(server) { return function(err) { if (err) { server.emit('error', new MongoNetworkError(err)); } server.emit('close'); }; } function parseErrorEventHandler(server) { return function(err) { server.s.state = STATE_DISCONNECTED; server.emit('error', new MongoParseError(err)); }; } module.exports = Server; package/lib/sdam/topology_description.js000644 0000031475 3560116604 015601 0ustar00000000 000000 'use strict'; const ServerType = require('./server_description').ServerType; const ServerDescription = require('./server_description').ServerDescription; const ReadPreference = require('../topologies/read_preference'); const WIRE_CONSTANTS = require('../wireprotocol/constants'); // contstants related to compatability checks const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; // An enumeration of topology types we know about const TopologyType = { Single: 'Single', ReplicaSetNoPrimary: 'ReplicaSetNoPrimary', ReplicaSetWithPrimary: 'ReplicaSetWithPrimary', Sharded: 'Sharded', Unknown: 'Unknown' }; // Representation of a deployment of servers class TopologyDescription { /** * Create a TopologyDescription * * @param {string} topologyType * @param {Map} serverDescriptions the a map of address to ServerDescription * @param {string} setName * @param {number} maxSetVersion * @param {ObjectId} maxElectionId */ constructor( topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, options, error ) { options = options || {}; // TODO: consider assigning all these values to a temporary value `s` which // we use `Object.freeze` on, ensuring the internal state of this type // is immutable. this.type = topologyType || TopologyType.Unknown; this.setName = setName || null; this.maxSetVersion = maxSetVersion || null; this.maxElectionId = maxElectionId || null; this.servers = serverDescriptions || new Map(); this.stale = false; this.compatible = true; this.compatibilityError = null; this.logicalSessionTimeoutMinutes = null; this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0; this.localThresholdMS = options.localThresholdMS || 0; this.options = options; this.error = error; this.commonWireVersion = commonWireVersion || null; // determine server compatibility for (const serverDescription of this.servers.values()) { if (serverDescription.type === ServerType.Unknown) continue; if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) { this.compatible = false; this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${ serverDescription.minWireVersion }, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`; } if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) { this.compatible = false; this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${ serverDescription.maxWireVersion }, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`; break; } } // Whenever a client updates the TopologyDescription from an ismaster response, it MUST set // TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes // value among ServerDescriptions of all data-bearing server types. If any have a null // logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be // set to null. const readableServers = Array.from(this.servers.values()).filter(s => s.isReadable); this.logicalSessionTimeoutMinutes = readableServers.reduce((result, server) => { if (server.logicalSessionTimeoutMinutes == null) return null; if (result == null) return server.logicalSessionTimeoutMinutes; return Math.min(result, server.logicalSessionTimeoutMinutes); }, null); } /** * Returns a copy of this description updated with a given ServerDescription * * @param {ServerDescription} serverDescription */ update(serverDescription) { const address = serverDescription.address; // NOTE: there are a number of prime targets for refactoring here // once we support destructuring assignments // potentially mutated values let topologyType = this.type; let setName = this.setName; let maxSetVersion = this.maxSetVersion; let maxElectionId = this.maxElectionId; let commonWireVersion = this.commonWireVersion; let error = serverDescription.error || null; const serverType = serverDescription.type; let serverDescriptions = new Map(this.servers); // update common wire version if (serverDescription.maxWireVersion !== 0) { if (commonWireVersion == null) { commonWireVersion = serverDescription.maxWireVersion; } else { commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion); } } // update the actual server description serverDescriptions.set(address, serverDescription); if (topologyType === TopologyType.Single) { // once we are defined as single, that never changes return new TopologyDescription( TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, this.options, error ); } if (topologyType === TopologyType.Unknown) { if (serverType === ServerType.Standalone) { serverDescriptions.delete(address); } else { topologyType = topologyTypeForServerType(serverType); } } if (topologyType === TopologyType.Sharded) { if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) === -1) { serverDescriptions.delete(address); } } if (topologyType === TopologyType.ReplicaSetNoPrimary) { if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) >= 0) { serverDescriptions.delete(address); } if (serverType === ServerType.RSPrimary) { const result = updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ); (topologyType = result[0]), (setName = result[1]), (maxSetVersion = result[2]), (maxElectionId = result[3]); } else if ( [ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0 ) { const result = updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription); (topologyType = result[0]), (setName = result[1]); } } if (topologyType === TopologyType.ReplicaSetWithPrimary) { if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) { serverDescriptions.delete(address); topologyType = checkHasPrimary(serverDescriptions); } else if (serverType === ServerType.RSPrimary) { const result = updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ); (topologyType = result[0]), (setName = result[1]), (maxSetVersion = result[2]), (maxElectionId = result[3]); } else if ( [ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0 ) { topologyType = updateRsWithPrimaryFromMember( serverDescriptions, setName, serverDescription ); } else { topologyType = checkHasPrimary(serverDescriptions); } } return new TopologyDescription( topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, this.options, error ); } /** * Determines if the topology has a readable server available. See the table in the * following section for behaviour rules. * * @param {ReadPreference} [readPreference] An optional read preference for determining if a readable server is present * @return {Boolean} Whether there is a readable server in this topology */ hasReadableServer(/* readPreference */) { // To be implemented when server selection is implemented } /** * Determines if the topology has a writable server available. See the table in the * following section for behaviour rules. * * @return {Boolean} Whether there is a writable server in this topology */ hasWritableServer() { return this.hasReadableServer(ReadPreference.primary); } /** * Determines if the topology has a definition for the provided address * * @param {String} address * @return {Boolean} Whether the topology knows about this server */ hasServer(address) { return this.servers.has(address); } } function topologyTypeForServerType(serverType) { if (serverType === ServerType.Mongos) return TopologyType.Sharded; if (serverType === ServerType.RSPrimary) return TopologyType.ReplicaSetWithPrimary; return TopologyType.ReplicaSetNoPrimary; } function updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ) { setName = setName || serverDescription.setName; if (setName !== serverDescription.setName) { serverDescriptions.delete(serverDescription.address); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } const electionIdOID = serverDescription.electionId ? serverDescription.electionId.$oid : null; const maxElectionIdOID = maxElectionId ? maxElectionId.$oid : null; if (serverDescription.setVersion != null && electionIdOID != null) { if (maxSetVersion != null && maxElectionIdOID != null) { if (maxSetVersion > serverDescription.setVersion || maxElectionIdOID > electionIdOID) { // this primary is stale, we must remove it serverDescriptions.set( serverDescription.address, new ServerDescription(serverDescription.address) ); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } } maxElectionId = serverDescription.electionId; } if ( serverDescription.setVersion != null && (maxSetVersion == null || serverDescription.setVersion > maxSetVersion) ) { maxSetVersion = serverDescription.setVersion; } // We've heard from the primary. Is it the same primary as before? for (const address of serverDescriptions.keys()) { const server = serverDescriptions.get(address); if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) { // Reset old primary's type to Unknown. serverDescriptions.set(address, new ServerDescription(server.address)); // There can only be one primary break; } } // Discover new hosts from this primary's response. serverDescription.allHosts.forEach(address => { if (!serverDescriptions.has(address)) { serverDescriptions.set(address, new ServerDescription(address)); } }); // Remove hosts not in the response. const currentAddresses = Array.from(serverDescriptions.keys()); const responseAddresses = serverDescription.allHosts; currentAddresses.filter(addr => responseAddresses.indexOf(addr) === -1).forEach(address => { serverDescriptions.delete(address); }); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } function updateRsWithPrimaryFromMember(serverDescriptions, setName, serverDescription) { if (setName == null) { throw new TypeError('setName is required'); } if ( setName !== serverDescription.setName || (serverDescription.me && serverDescription.address !== serverDescription.me) ) { serverDescriptions.delete(serverDescription.address); } return checkHasPrimary(serverDescriptions); } function updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription) { let topologyType = TopologyType.ReplicaSetNoPrimary; setName = setName || serverDescription.setName; if (setName !== serverDescription.setName) { serverDescriptions.delete(serverDescription.address); return [topologyType, setName]; } serverDescription.allHosts.forEach(address => { if (!serverDescriptions.has(address)) { serverDescriptions.set(address, new ServerDescription(address)); } }); if (serverDescription.me && serverDescription.address !== serverDescription.me) { serverDescriptions.delete(serverDescription.address); } return [topologyType, setName]; } function checkHasPrimary(serverDescriptions) { for (const addr of serverDescriptions.keys()) { if (serverDescriptions.get(addr).type === ServerType.RSPrimary) { return TopologyType.ReplicaSetWithPrimary; } } return TopologyType.ReplicaSetNoPrimary; } module.exports = { TopologyType, TopologyDescription }; package/lib/sdam/topology.js000644 0000102032 3560116604 013162 0ustar00000000 000000 'use strict'; const EventEmitter = require('events'); const ServerDescription = require('./server_description').ServerDescription; const ServerType = require('./server_description').ServerType; const TopologyDescription = require('./topology_description').TopologyDescription; const TopologyType = require('./topology_description').TopologyType; const monitoring = require('./monitoring'); const calculateDurationInMs = require('../utils').calculateDurationInMs; const MongoTimeoutError = require('../error').MongoTimeoutError; const Server = require('./server'); const relayEvents = require('../utils').relayEvents; const ReadPreference = require('../topologies/read_preference'); const readPreferenceServerSelector = require('./server_selectors').readPreferenceServerSelector; const writableServerSelector = require('./server_selectors').writableServerSelector; const isRetryableWritesSupported = require('../topologies/shared').isRetryableWritesSupported; const Cursor = require('../cursor'); const deprecate = require('util').deprecate; const BSON = require('../connection/utils').retrieveBSON(); const createCompressionInfo = require('../topologies/shared').createCompressionInfo; const isRetryableError = require('../error').isRetryableError; const MongoParseError = require('../error').MongoParseError; const ClientSession = require('../sessions').ClientSession; const createClientInfo = require('../topologies/shared').createClientInfo; const MongoError = require('../error').MongoError; const resolveClusterTime = require('../topologies/shared').resolveClusterTime; // Global state let globalTopologyCounter = 0; // Constants const TOPOLOGY_DEFAULTS = { localThresholdMS: 15, serverSelectionTimeoutMS: 10000, heartbeatFrequencyMS: 30000, minHeartbeatFrequencyMS: 500 }; // events that we relay to the `Topology` const SERVER_RELAY_EVENTS = [ 'serverHeartbeatStarted', 'serverHeartbeatSucceeded', 'serverHeartbeatFailed', 'commandStarted', 'commandSucceeded', 'commandFailed', // NOTE: Legacy events 'monitoring' ]; // all events we listen to from `Server` instances const LOCAL_SERVER_EVENTS = SERVER_RELAY_EVENTS.concat([ 'error', 'connect', 'descriptionReceived', 'close', 'ended' ]); /** * A container of server instances representing a connection to a MongoDB topology. * * @fires Topology#serverOpening * @fires Topology#serverClosed * @fires Topology#serverDescriptionChanged * @fires Topology#topologyOpening * @fires Topology#topologyClosed * @fires Topology#topologyDescriptionChanged * @fires Topology#serverHeartbeatStarted * @fires Topology#serverHeartbeatSucceeded * @fires Topology#serverHeartbeatFailed */ class Topology extends EventEmitter { /** * Create a topology * * @param {Array|String} [seedlist] a string list, or array of Server instances to connect to * @param {Object} [options] Optional settings * @param {Number} [options.localThresholdMS=15] The size of the latency window for selecting among multiple suitable servers * @param {Number} [options.serverSelectionTimeoutMS=30000] How long to block for server selection before throwing an error * @param {Number} [options.heartbeatFrequencyMS=10000] The frequency with which topology updates are scheduled */ constructor(seedlist, options) { super(); if (typeof options === 'undefined' && typeof seedlist !== 'string') { options = seedlist; seedlist = []; // this is for legacy single server constructor support if (options.host) { seedlist.push({ host: options.host, port: options.port }); } } seedlist = seedlist || []; if (typeof seedlist === 'string') { seedlist = parseStringSeedlist(seedlist); } options = Object.assign({}, TOPOLOGY_DEFAULTS, options); const topologyType = topologyTypeFromSeedlist(seedlist, options); const topologyId = globalTopologyCounter++; const serverDescriptions = seedlist.reduce((result, seed) => { if (seed.domain_socket) seed.host = seed.domain_socket; const address = seed.port ? `${seed.host}:${seed.port}` : `${seed.host}:27017`; result.set(address, new ServerDescription(address)); return result; }, new Map()); this.s = { // the id of this topology id: topologyId, // passed in options options, // initial seedlist of servers to connect to seedlist: seedlist, // the topology description description: new TopologyDescription( topologyType, serverDescriptions, options.replicaSet, null, null, null, options ), serverSelectionTimeoutMS: options.serverSelectionTimeoutMS, heartbeatFrequencyMS: options.heartbeatFrequencyMS, minHeartbeatIntervalMS: options.minHeartbeatIntervalMS, // allow users to override the cursor factory Cursor: options.cursorFactory || Cursor, // the bson parser bson: options.bson || new BSON(), // a map of server instances to normalized addresses servers: new Map(), // Server Session Pool sessionPool: null, // Active client sessions sessions: [], // Promise library promiseLibrary: options.promiseLibrary || Promise, credentials: options.credentials, clusterTime: null }; // amend options for server instance creation this.s.options.compression = { compressors: createCompressionInfo(options) }; // add client info this.s.clientInfo = createClientInfo(options); } /** * @return A `TopologyDescription` for this topology */ get description() { return this.s.description; } get parserType() { return BSON.native ? 'c++' : 'js'; } /** * All raw connections * @method * @return {Connection[]} */ connections() { return Array.from(this.s.servers.values()).reduce((result, server) => { return result.concat(server.s.pool.allConnections()); }, []); } /** * Initiate server connect * * @param {Object} [options] Optional settings * @param {Array} [options.auth=null] Array of auth options to apply on connect * @param {function} [callback] An optional callback called once on the first connected server */ connect(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // emit SDAM monitoring events this.emit('topologyOpening', new monitoring.TopologyOpeningEvent(this.s.id)); // emit an event for the topology change this.emit( 'topologyDescriptionChanged', new monitoring.TopologyDescriptionChangedEvent( this.s.id, new TopologyDescription(TopologyType.Unknown), // initial is always Unknown this.s.description ) ); connectServers(this, Array.from(this.s.description.servers.values())); this.s.connected = true; // otherwise, wait for a server to properly connect based on user provided read preference, // or primary. translateReadPreference(options); const readPreference = options.readPreference || ReadPreference.primary; this.selectServer(readPreferenceServerSelector(readPreference), options, (err, server) => { if (err) { if (typeof callback === 'function') { callback(err, null); } else { this.emit('error', err); } return; } const errorHandler = err => { server.removeListener('connect', connectHandler); if (typeof callback === 'function') callback(err, null); }; const connectHandler = (_, err) => { server.removeListener('error', errorHandler); this.emit('open', err, this); this.emit('connect', this); if (typeof callback === 'function') callback(err, this); }; const STATE_CONNECTING = 1; if (server.s.state === STATE_CONNECTING) { server.once('error', errorHandler); server.once('connect', connectHandler); return; } connectHandler(); }); } /** * Close this topology */ close(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; if (this.s.sessionPool) { this.s.sessions.forEach(session => session.endSession()); this.s.sessionPool.endAllPooledSessions(); } const servers = this.s.servers; if (servers.size === 0) { this.s.connected = false; if (typeof callback === 'function') { callback(null, null); } return; } // destroy all child servers let destroyed = 0; servers.forEach(server => destroyServer(server, this, () => { destroyed++; if (destroyed === servers.size) { // emit an event for close this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id)); this.s.connected = false; if (typeof callback === 'function') { callback(null, null); } } }) ); } /** * Selects a server according to the selection predicate provided * * @param {function} [selector] An optional selector to select servers by, defaults to a random selection within a latency window * @param {object} [options] Optional settings related to server selection * @param {number} [options.serverSelectionTimeoutMS] How long to block for server selection before throwing an error * @param {function} callback The callback used to indicate success or failure * @return {Server} An instance of a `Server` meeting the criteria of the predicate provided */ selectServer(selector, options, callback) { if (typeof options === 'function') { callback = options; if (typeof selector !== 'function') { options = selector; translateReadPreference(options); const readPreference = options.readPreference || ReadPreference.primary; selector = readPreferenceServerSelector(readPreference); } else { options = {}; } } options = Object.assign( {}, { serverSelectionTimeoutMS: this.s.serverSelectionTimeoutMS }, options ); const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; const transaction = session && session.transaction; if (isSharded && transaction && transaction.server) { callback(null, transaction.server); return; } selectServers( this, selector, options.serverSelectionTimeoutMS, process.hrtime(), (err, servers) => { if (err) return callback(err, null); const selectedServer = randomSelection(servers); if (isSharded && transaction && transaction.isActive) { transaction.pinServer(selectedServer); } callback(null, selectedServer); } ); } // Sessions related methods /** * @return Whether sessions are supported on the current topology */ hasSessionSupport() { return this.description.logicalSessionTimeoutMinutes != null; } /** * Start a logical session */ startSession(options, clientOptions) { const session = new ClientSession(this, this.s.sessionPool, options, clientOptions); session.once('ended', () => { this.s.sessions = this.s.sessions.filter(s => !s.equals(session)); }); this.s.sessions.push(session); return session; } /** * Send endSessions command(s) with the given session ids * * @param {Array} sessions The sessions to end * @param {function} [callback] */ endSessions(sessions, callback) { if (!Array.isArray(sessions)) { sessions = [sessions]; } this.command( 'admin.$cmd', { endSessions: sessions }, { readPreference: ReadPreference.primaryPreferred, noResponse: true }, () => { // intentionally ignored, per spec if (typeof callback === 'function') callback(); } ); } /** * Update the internal TopologyDescription with a ServerDescription * * @param {object} serverDescription The server to update in the internal list of server descriptions */ serverUpdateHandler(serverDescription) { if (!this.s.description.hasServer(serverDescription.address)) { return; } // these will be used for monitoring events later const previousTopologyDescription = this.s.description; const previousServerDescription = this.s.description.servers.get(serverDescription.address); // first update the TopologyDescription this.s.description = this.s.description.update(serverDescription); if (this.s.description.compatibilityError) { this.emit('error', new MongoError(this.s.description.compatibilityError)); return; } // emit monitoring events for this change this.emit( 'serverDescriptionChanged', new monitoring.ServerDescriptionChangedEvent( this.s.id, serverDescription.address, previousServerDescription, this.s.description.servers.get(serverDescription.address) ) ); // update server list from updated descriptions updateServers(this, serverDescription); // Driver Sessions Spec: "Whenever a driver receives a cluster time from // a server it MUST compare it to the current highest seen cluster time // for the deployment. If the new cluster time is higher than the // highest seen cluster time it MUST become the new highest seen cluster // time. Two cluster times are compared using only the BsonTimestamp // value of the clusterTime embedded field." const clusterTime = serverDescription.$clusterTime; if (clusterTime) { resolveClusterTime(this, clusterTime); } this.emit( 'topologyDescriptionChanged', new monitoring.TopologyDescriptionChangedEvent( this.s.id, previousTopologyDescription, this.s.description ) ); } auth(credentials, callback) { if (typeof credentials === 'function') (callback = credentials), (credentials = null); if (typeof callback === 'function') callback(null, true); } logout(callback) { if (typeof callback === 'function') callback(null, true); } // Basic operation support. Eventually this should be moved into command construction // during the command refactor. /** * Insert one or more documents * * @param {String} ns The full qualified namespace for this operation * @param {Array} ops An array of documents to insert * @param {Boolean} [options.ordered=true] Execute in order or out of order * @param {Object} [options.writeConcern] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields * @param {ClientSession} [options.session] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ insert(ns, ops, options, callback) { executeWriteOperation({ topology: this, op: 'insert', ns, ops }, options, callback); } /** * Perform one or more update operations * * @param {string} ns The fully qualified namespace for this operation * @param {array} ops An array of updates * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields * @param {ClientSession} [options.session] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ update(ns, ops, options, callback) { executeWriteOperation({ topology: this, op: 'update', ns, ops }, options, callback); } /** * Perform one or more remove operations * * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of removes * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ remove(ns, ops, options, callback) { executeWriteOperation({ topology: this, op: 'remove', ns, ops }, options, callback); } /** * Execute a command * * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Connection} [options.connection] Specify connection object to execute command against * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ command(ns, cmd, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } translateReadPreference(options); const readPreference = options.readPreference || ReadPreference.primary; this.selectServer(readPreferenceServerSelector(readPreference), options, (err, server) => { if (err) { callback(err, null); return; } const willRetryWrite = !options.retrying && !!options.retryWrites && options.session && isRetryableWritesSupported(this) && !options.session.inTransaction() && isWriteCommand(cmd); const cb = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err)) { return callback(err); } if (willRetryWrite) { const newOptions = Object.assign({}, options, { retrying: true }); return this.command(ns, cmd, newOptions, callback); } return callback(err); }; // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } server.command(ns, cmd, options, cb); }); } /** * Create a new cursor * * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId * @param {object} [options] Options for the cursor * @param {object} [options.batchSize=0] Batchsize for the operation * @param {array} [options.documents=[]] Initial documents list for cursor * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {object} [options.topology] The internal topology of the created cursor * @returns {Cursor} */ cursor(ns, cmd, options) { options = options || {}; const topology = options.topology || this; const CursorClass = options.cursorFactory || this.s.Cursor; translateReadPreference(options); return new CursorClass(this.s.bson, ns, cmd, options, topology, this.s.options); } get clientInfo() { return this.s.clientInfo; } // Legacy methods for compat with old topology types isConnected() { // console.log('not implemented: `isConnected`'); return true; } isDestroyed() { // console.log('not implemented: `isDestroyed`'); return false; } unref() { console.log('not implemented: `unref`'); } // NOTE: There are many places in code where we explicitly check the last isMaster // to do feature support detection. This should be done any other way, but for // now we will just return the first isMaster seen, which should suffice. lastIsMaster() { const serverDescriptions = Array.from(this.description.servers.values()); if (serverDescriptions.length === 0) return {}; const sd = serverDescriptions.filter(sd => sd.type !== ServerType.Unknown)[0]; const result = sd || { maxWireVersion: this.description.commonWireVersion }; return result; } get logicalSessionTimeoutMinutes() { return this.description.logicalSessionTimeoutMinutes; } get bson() { return this.s.bson; } } Object.defineProperty(Topology.prototype, 'clusterTime', { enumerable: true, get: function() { return this.s.clusterTime; }, set: function(clusterTime) { this.s.clusterTime = clusterTime; } }); // legacy aliases Topology.prototype.destroy = deprecate( Topology.prototype.close, 'destroy() is deprecated, please use close() instead' ); const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; function isWriteCommand(command) { return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); } /** * Destroys a server, and removes all event listeners from the instance * * @param {Server} server */ function destroyServer(server, topology, callback) { LOCAL_SERVER_EVENTS.forEach(event => server.removeAllListeners(event)); server.destroy(() => { topology.emit( 'serverClosed', new monitoring.ServerClosedEvent(topology.s.id, server.description.address) ); if (typeof callback === 'function') callback(null, null); }); } /** * Parses a basic seedlist in string form * * @param {string} seedlist The seedlist to parse */ function parseStringSeedlist(seedlist) { return seedlist.split(',').map(seed => ({ host: seed.split(':')[0], port: seed.split(':')[1] || 27017 })); } function topologyTypeFromSeedlist(seedlist, options) { const replicaSet = options.replicaSet || options.setName || options.rs_name; if (seedlist.length === 1 && !replicaSet) return TopologyType.Single; if (replicaSet) return TopologyType.ReplicaSetNoPrimary; return TopologyType.Unknown; } function randomSelection(array) { return array[Math.floor(Math.random() * array.length)]; } /** * Selects servers using the provided selector * * @private * @param {Topology} topology The topology to select servers from * @param {function} selector The actual predicate used for selecting servers * @param {Number} timeout The max time we are willing wait for selection * @param {Number} start A high precision timestamp for the start of the selection process * @param {function} callback The callback used to convey errors or the resultant servers */ function selectServers(topology, selector, timeout, start, callback) { const duration = calculateDurationInMs(start); if (duration >= timeout) { return callback(new MongoTimeoutError(`Server selection timed out after ${timeout} ms`)); } // ensure we are connected if (!topology.s.connected) { topology.connect(); // we want to make sure we're still within the requested timeout window const failToConnectTimer = setTimeout(() => { topology.removeListener('connect', connectHandler); callback(new MongoTimeoutError('Server selection timed out waiting to connect')); }, timeout - duration); const connectHandler = () => { clearTimeout(failToConnectTimer); selectServers(topology, selector, timeout, process.hrtime(), callback); }; topology.once('connect', connectHandler); return; } // otherwise, attempt server selection const serverDescriptions = Array.from(topology.description.servers.values()); let descriptions; // support server selection by options with readPreference if (typeof selector === 'object') { const readPreference = selector.readPreference ? selector.readPreference : ReadPreference.primary; selector = readPreferenceServerSelector(readPreference); } try { descriptions = selector ? selector(topology.description, serverDescriptions) : serverDescriptions; } catch (e) { return callback(e, null); } if (descriptions.length) { const servers = descriptions.map(description => topology.s.servers.get(description.address)); return callback(null, servers); } const retrySelection = () => { // ensure all server monitors attempt monitoring soon topology.s.servers.forEach(server => { setTimeout( () => server.monitor({ heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS }), TOPOLOGY_DEFAULTS.minHeartbeatFrequencyMS ); }); const descriptionChangedHandler = () => { // successful iteration, clear the check timer clearTimeout(iterationTimer); if (topology.description.error) { callback(topology.description.error, null); return; } // topology description has changed due to monitoring, reattempt server selection selectServers(topology, selector, timeout, start, callback); }; const iterationTimer = setTimeout(() => { topology.removeListener('topologyDescriptionChanged', descriptionChangedHandler); callback(new MongoTimeoutError(`Server selection timed out after ${timeout} ms`)); }, timeout - duration); topology.once('topologyDescriptionChanged', descriptionChangedHandler); }; retrySelection(); } function createAndConnectServer(topology, serverDescription) { topology.emit( 'serverOpening', new monitoring.ServerOpeningEvent(topology.s.id, serverDescription.address) ); const server = new Server(serverDescription, topology.s.options, topology); relayEvents(server, topology, SERVER_RELAY_EVENTS); server.once('connect', serverConnectEventHandler(server, topology)); server.on('descriptionReceived', topology.serverUpdateHandler.bind(topology)); server.on('error', serverErrorEventHandler(server, topology)); server.on('close', () => topology.emit('close', server)); server.connect(); return server; } /** * Create `Server` instances for all initially known servers, connect them, and assign * them to the passed in `Topology`. * * @param {Topology} topology The topology responsible for the servers * @param {ServerDescription[]} serverDescriptions A list of server descriptions to connect */ function connectServers(topology, serverDescriptions) { topology.s.servers = serverDescriptions.reduce((servers, serverDescription) => { const server = createAndConnectServer(topology, serverDescription); servers.set(serverDescription.address, server); return servers; }, new Map()); } function updateServers(topology, incomingServerDescription) { // update the internal server's description if (topology.s.servers.has(incomingServerDescription.address)) { const server = topology.s.servers.get(incomingServerDescription.address); server.s.description = incomingServerDescription; } // add new servers for all descriptions we currently don't know about locally for (const serverDescription of topology.description.servers.values()) { if (!topology.s.servers.has(serverDescription.address)) { const server = createAndConnectServer(topology, serverDescription); topology.s.servers.set(serverDescription.address, server); } } // for all servers no longer known, remove their descriptions and destroy their instances for (const entry of topology.s.servers) { const serverAddress = entry[0]; if (topology.description.hasServer(serverAddress)) { continue; } const server = topology.s.servers.get(serverAddress); topology.s.servers.delete(serverAddress); // prepare server for garbage collection destroyServer(server, topology); } } function serverConnectEventHandler(server, topology) { return function(/* isMaster, err */) { server.monitor({ initial: true, heartbeatFrequencyMS: topology.description.heartbeatFrequencyMS }); }; } function serverErrorEventHandler(server, topology) { return function(err) { topology.emit( 'serverClosed', new monitoring.ServerClosedEvent(topology.s.id, server.description.address) ); if (err instanceof MongoParseError) { resetServerState(server, err, { clearPool: true }); return; } resetServerState(server, err); }; } function executeWriteOperation(args, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // TODO: once we drop Node 4, use destructuring either here or in arguments. const topology = args.topology; const op = args.op; const ns = args.ns; const ops = args.ops; const willRetryWrite = !args.retrying && !!options.retryWrites && options.session && isRetryableWritesSupported(topology) && !options.session.inTransaction(); topology.selectServer(writableServerSelector(), options, (err, server) => { if (err) { callback(err, null); return; } const handler = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err)) { return callback(err); } if (willRetryWrite) { const newArgs = Object.assign({}, args, { retrying: true }); return executeWriteOperation(newArgs, options, callback); } return callback(err); }; if (callback.operationId) { handler.operationId = callback.operationId; } // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } // execute the write operation server[op](ns, ops, options, handler); }); } /** * Resets the internal state of this server to `Unknown` by simulating an empty ismaster * * @private * @param {Server} server * @param {MongoError} error The error that caused the state reset * @param {object} [options] Optional settings * @param {boolean} [options.clearPool=false] Pool should be cleared out on state reset */ function resetServerState(server, error, options) { options = Object.assign({}, { clearPool: false }, options); function resetState() { server.emit( 'descriptionReceived', new ServerDescription(server.description.address, null, { error }) ); } if (options.clearPool && server.pool) { server.pool.reset(() => resetState()); return; } resetState(); } function translateReadPreference(options) { if (options.readPreference == null) { return; } let r = options.readPreference; if (typeof r === 'string') { options.readPreference = new ReadPreference(r); } else if (r && !(r instanceof ReadPreference) && typeof r === 'object') { const mode = r.mode || r.preference; if (mode && typeof mode === 'string') { options.readPreference = new ReadPreference(mode, r.tags, { maxStalenessSeconds: r.maxStalenessSeconds }); } } else if (!(r instanceof ReadPreference)) { throw new TypeError('Invalid read preference: ' + r); } return options; } /** * A server opening SDAM monitoring event * * @event Topology#serverOpening * @type {ServerOpeningEvent} */ /** * A server closed SDAM monitoring event * * @event Topology#serverClosed * @type {ServerClosedEvent} */ /** * A server description SDAM change monitoring event * * @event Topology#serverDescriptionChanged * @type {ServerDescriptionChangedEvent} */ /** * A topology open SDAM event * * @event Topology#topologyOpening * @type {TopologyOpeningEvent} */ /** * A topology closed SDAM event * * @event Topology#topologyClosed * @type {TopologyClosedEvent} */ /** * A topology structure SDAM change event * * @event Topology#topologyDescriptionChanged * @type {TopologyDescriptionChangedEvent} */ /** * A topology serverHeartbeatStarted SDAM event * * @event Topology#serverHeartbeatStarted * @type {ServerHeartbeatStartedEvent} */ /** * A topology serverHeartbeatFailed SDAM event * * @event Topology#serverHeartbeatFailed * @type {ServerHearbeatFailedEvent} */ /** * A topology serverHeartbeatSucceeded SDAM change event * * @event Topology#serverHeartbeatSucceeded * @type {ServerHeartbeatSucceededEvent} */ /** * An event emitted indicating a command was started, if command monitoring is enabled * * @event Topology#commandStarted * @type {object} */ /** * An event emitted indicating a command succeeded, if command monitoring is enabled * * @event Topology#commandSucceeded * @type {object} */ /** * An event emitted indicating a command failed, if command monitoring is enabled * * @event Topology#commandFailed * @type {object} */ module.exports = Topology; package/lib/sessions.js000644 0000051622 3560116604 012240 0ustar00000000 000000 'use strict'; const retrieveBSON = require('./connection/utils').retrieveBSON; const EventEmitter = require('events'); const BSON = retrieveBSON(); const Binary = BSON.Binary; const uuidV4 = require('./utils').uuidV4; const MongoError = require('./error').MongoError; const isRetryableError = require('././error').isRetryableError; const MongoNetworkError = require('./error').MongoNetworkError; const MongoWriteConcernError = require('./error').MongoWriteConcernError; const Transaction = require('./transactions').Transaction; const TxnState = require('./transactions').TxnState; const isPromiseLike = require('./utils').isPromiseLike; const ReadPreference = require('./topologies/read_preference'); const isTransactionCommand = require('./transactions').isTransactionCommand; const resolveClusterTime = require('./topologies/shared').resolveClusterTime; function assertAlive(session, callback) { if (session.serverSession == null) { const error = new MongoError('Cannot use a session that has ended'); if (typeof callback === 'function') { callback(error, null); return false; } throw error; } return true; } /** * Options to pass when creating a Client Session * @typedef {Object} SessionOptions * @property {boolean} [causalConsistency=true] Whether causal consistency should be enabled on this session * @property {TransactionOptions} [defaultTransactionOptions] The default TransactionOptions to use for transactions started on this session. */ /** * A BSON document reflecting the lsid of a {@link ClientSession} * @typedef {Object} SessionId */ /** * A class representing a client session on the server * WARNING: not meant to be instantiated directly. * @class * @hideconstructor */ class ClientSession extends EventEmitter { /** * Create a client session. * WARNING: not meant to be instantiated directly * * @param {Topology} topology The current client's topology (Internal Class) * @param {ServerSessionPool} sessionPool The server session pool (Internal Class) * @param {SessionOptions} [options] Optional settings * @param {Object} [clientOptions] Optional settings provided when creating a client in the porcelain driver */ constructor(topology, sessionPool, options, clientOptions) { super(); if (topology == null) { throw new Error('ClientSession requires a topology'); } if (sessionPool == null || !(sessionPool instanceof ServerSessionPool)) { throw new Error('ClientSession requires a ServerSessionPool'); } options = options || {}; this.topology = topology; this.sessionPool = sessionPool; this.hasEnded = false; this.serverSession = sessionPool.acquire(); this.clientOptions = clientOptions; this.supports = { causalConsistency: typeof options.causalConsistency !== 'undefined' ? options.causalConsistency : true }; options = options || {}; if (typeof options.initialClusterTime !== 'undefined') { this.clusterTime = options.initialClusterTime; } else { this.clusterTime = null; } this.operationTime = null; this.explicit = !!options.explicit; this.owner = options.owner; this.defaultTransactionOptions = Object.assign({}, options.defaultTransactionOptions); this.transaction = new Transaction(); } /** * The server id associated with this session * @type {SessionId} */ get id() { return this.serverSession.id; } /** * Ends this session on the server * * @param {Object} [options] Optional settings. Currently reserved for future use * @param {Function} [callback] Optional callback for completion of this operation */ endSession(options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; if (this.hasEnded) { if (typeof callback === 'function') callback(null, null); return; } if (this.serverSession && this.inTransaction()) { this.abortTransaction(); // pass in callback? } // mark the session as ended, and emit a signal this.hasEnded = true; this.emit('ended', this); // release the server session back to the pool this.sessionPool.release(this.serverSession); this.serverSession = null; // spec indicates that we should ignore all errors for `endSessions` if (typeof callback === 'function') callback(null, null); } /** * Advances the operationTime for a ClientSession. * * @param {Timestamp} operationTime the `BSON.Timestamp` of the operation type it is desired to advance to */ advanceOperationTime(operationTime) { if (this.operationTime == null) { this.operationTime = operationTime; return; } if (operationTime.greaterThan(this.operationTime)) { this.operationTime = operationTime; } } /** * Used to determine if this session equals another * @param {ClientSession} session * @return {boolean} true if the sessions are equal */ equals(session) { if (!(session instanceof ClientSession)) { return false; } return this.id.id.buffer.equals(session.id.id.buffer); } /** * Increment the transaction number on the internal ServerSession */ incrementTransactionNumber() { this.serverSession.txnNumber++; } /** * @returns {boolean} whether this session is currently in a transaction or not */ inTransaction() { return this.transaction.isActive; } /** * Starts a new transaction with the given options. * * @param {TransactionOptions} options Options for the transaction */ startTransaction(options) { assertAlive(this); if (this.inTransaction()) { throw new MongoError('Transaction already in progress'); } // increment txnNumber this.incrementTransactionNumber(); // create transaction state this.transaction = new Transaction( Object.assign({}, this.clientOptions, options || this.defaultTransactionOptions) ); this.transaction.transition(TxnState.STARTING_TRANSACTION); } /** * Commits the currently active transaction in this session. * * @param {Function} [callback] optional callback for completion of this operation * @return {Promise} A promise is returned if no callback is provided */ commitTransaction(callback) { if (typeof callback === 'function') { endTransaction(this, 'commitTransaction', callback); return; } return new Promise((resolve, reject) => { endTransaction( this, 'commitTransaction', (err, reply) => (err ? reject(err) : resolve(reply)) ); }); } /** * Aborts the currently active transaction in this session. * * @param {Function} [callback] optional callback for completion of this operation * @return {Promise} A promise is returned if no callback is provided */ abortTransaction(callback) { if (typeof callback === 'function') { endTransaction(this, 'abortTransaction', callback); return; } return new Promise((resolve, reject) => { endTransaction( this, 'abortTransaction', (err, reply) => (err ? reject(err) : resolve(reply)) ); }); } /** * This is here to ensure that ClientSession is never serialized to BSON. * @ignore */ toBSON() { throw new Error('ClientSession cannot be serialized to BSON.'); } /** * A user provided function to be run within a transaction * * @callback WithTransactionCallback * @param {ClientSession} session The parent session of the transaction running the operation. This should be passed into each operation within the lambda. * @returns {Promise} The resulting Promise of operations run within this transaction */ /** * Runs a provided lambda within a transaction, retrying either the commit operation * or entire transaction as needed (and when the error permits) to better ensure that * the transaction can complete successfully. * * IMPORTANT: This method requires the user to return a Promise, all lambdas that do not * return a Promise will result in undefined behavior. * * @param {WithTransactionCallback} fn * @param {TransactionOptions} [options] Optional settings for the transaction */ withTransaction(fn, options) { const startTime = Date.now(); return attemptTransaction(this, startTime, fn, options); } } const MAX_WITH_TRANSACTION_TIMEOUT = 120000; const UNSATISFIABLE_WRITE_CONCERN_CODE = 100; const UNKNOWN_REPL_WRITE_CONCERN_CODE = 79; const NON_DETERMINISTIC_WRITE_CONCERN_ERRORS = new Set([ 'CannotSatisfyWriteConcern', 'UnknownReplWriteConcern', 'UnsatisfiableWriteConcern' ]); function hasNotTimedOut(startTime, max) { return Date.now() - startTime < max; } function isUnknownTransactionCommitResult(err) { return ( !NON_DETERMINISTIC_WRITE_CONCERN_ERRORS.has(err.codeName) && err.code !== UNSATISFIABLE_WRITE_CONCERN_CODE && err.code !== UNKNOWN_REPL_WRITE_CONCERN_CODE ); } function attemptTransactionCommit(session, startTime, fn, options) { return session.commitTransaction().catch(err => { if (err instanceof MongoError && hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT)) { if (err.hasErrorLabel('UnknownTransactionCommitResult')) { return attemptTransactionCommit(session, startTime, fn, options); } if (err.hasErrorLabel('TransientTransactionError')) { return attemptTransaction(session, startTime, fn, options); } } throw err; }); } const USER_EXPLICIT_TXN_END_STATES = new Set([ TxnState.NO_TRANSACTION, TxnState.TRANSACTION_COMMITTED, TxnState.TRANSACTION_ABORTED ]); function userExplicitlyEndedTransaction(session) { return USER_EXPLICIT_TXN_END_STATES.has(session.transaction.state); } function attemptTransaction(session, startTime, fn, options) { session.startTransaction(options); let promise; try { promise = fn(session); } catch (err) { promise = Promise.reject(err); } if (!isPromiseLike(promise)) { session.abortTransaction(); throw new TypeError('Function provided to `withTransaction` must return a Promise'); } return promise .then(() => { if (userExplicitlyEndedTransaction(session)) { return; } return attemptTransactionCommit(session, startTime, fn, options); }) .catch(err => { function maybeRetryOrThrow(err) { if ( err instanceof MongoError && err.hasErrorLabel('TransientTransactionError') && hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT) ) { return attemptTransaction(session, startTime, fn, options); } throw err; } if (session.transaction.isActive) { return session.abortTransaction().then(() => maybeRetryOrThrow(err)); } return maybeRetryOrThrow(err); }); } function endTransaction(session, commandName, callback) { if (!assertAlive(session, callback)) { // checking result in case callback was called return; } // handle any initial problematic cases let txnState = session.transaction.state; if (txnState === TxnState.NO_TRANSACTION) { callback(new MongoError('No transaction started')); return; } if (commandName === 'commitTransaction') { if ( txnState === TxnState.STARTING_TRANSACTION || txnState === TxnState.TRANSACTION_COMMITTED_EMPTY ) { // the transaction was never started, we can safely exit here session.transaction.transition(TxnState.TRANSACTION_COMMITTED_EMPTY); callback(null, null); return; } if (txnState === TxnState.TRANSACTION_ABORTED) { callback(new MongoError('Cannot call commitTransaction after calling abortTransaction')); return; } } else { if (txnState === TxnState.STARTING_TRANSACTION) { // the transaction was never started, we can safely exit here session.transaction.transition(TxnState.TRANSACTION_ABORTED); callback(null, null); return; } if (txnState === TxnState.TRANSACTION_ABORTED) { callback(new MongoError('Cannot call abortTransaction twice')); return; } if ( txnState === TxnState.TRANSACTION_COMMITTED || txnState === TxnState.TRANSACTION_COMMITTED_EMPTY ) { callback(new MongoError('Cannot call abortTransaction after calling commitTransaction')); return; } } // construct and send the command const command = { [commandName]: 1 }; // apply a writeConcern if specified let writeConcern; if (session.transaction.options.writeConcern) { writeConcern = Object.assign({}, session.transaction.options.writeConcern); } else if (session.clientOptions && session.clientOptions.w) { writeConcern = { w: session.clientOptions.w }; } if (txnState === TxnState.TRANSACTION_COMMITTED) { writeConcern = Object.assign({ wtimeout: 10000 }, writeConcern, { w: 'majority' }); } if (writeConcern) { Object.assign(command, { writeConcern }); } function commandHandler(e, r) { if (commandName === 'commitTransaction') { session.transaction.transition(TxnState.TRANSACTION_COMMITTED); if ( e && (e instanceof MongoNetworkError || e instanceof MongoWriteConcernError || isRetryableError(e)) ) { if (e.errorLabels) { const idx = e.errorLabels.indexOf('TransientTransactionError'); if (idx !== -1) { e.errorLabels.splice(idx, 1); } } else { e.errorLabels = []; } if (isUnknownTransactionCommitResult(e)) { e.errorLabels.push('UnknownTransactionCommitResult'); // per txns spec, must unpin session in this case session.transaction.unpinServer(); } } } else { session.transaction.transition(TxnState.TRANSACTION_ABORTED); } callback(e, r); } // The spec indicates that we should ignore all errors on `abortTransaction` function transactionError(err) { return commandName === 'commitTransaction' ? err : null; } if ( // Assumption here that commandName is "commitTransaction" or "abortTransaction" session.transaction.recoveryToken && supportsRecoveryToken(session) ) { command.recoveryToken = session.transaction.recoveryToken; } // send the command session.topology.command('admin.$cmd', command, { session }, (err, reply) => { if (err && isRetryableError(err)) { // SPEC-1185: apply majority write concern when retrying commitTransaction if (command.commitTransaction) { // per txns spec, must unpin session in this case session.transaction.unpinServer(); command.writeConcern = Object.assign({ wtimeout: 10000 }, command.writeConcern, { w: 'majority' }); } return session.topology.command('admin.$cmd', command, { session }, (_err, _reply) => commandHandler(transactionError(_err), _reply) ); } commandHandler(transactionError(err), reply); }); } function supportsRecoveryToken(session) { const topology = session.topology; return !!topology.s.options.useRecoveryToken; } /** * Reflects the existence of a session on the server. Can be reused by the session pool. * WARNING: not meant to be instantiated directly. For internal use only. * @ignore */ class ServerSession { constructor() { this.id = { id: new Binary(uuidV4(), Binary.SUBTYPE_UUID) }; this.lastUse = Date.now(); this.txnNumber = 0; } /** * Determines if the server session has timed out. * @ignore * @param {Date} sessionTimeoutMinutes The server's "logicalSessionTimeoutMinutes" * @return {boolean} true if the session has timed out. */ hasTimedOut(sessionTimeoutMinutes) { // Take the difference of the lastUse timestamp and now, which will result in a value in // milliseconds, and then convert milliseconds to minutes to compare to `sessionTimeoutMinutes` const idleTimeMinutes = Math.round( (((Date.now() - this.lastUse) % 86400000) % 3600000) / 60000 ); return idleTimeMinutes > sessionTimeoutMinutes - 1; } } /** * Maintains a pool of Server Sessions. * For internal use only * @ignore */ class ServerSessionPool { constructor(topology) { if (topology == null) { throw new Error('ServerSessionPool requires a topology'); } this.topology = topology; this.sessions = []; } /** * Ends all sessions in the session pool. * @ignore */ endAllPooledSessions() { if (this.sessions.length) { this.topology.endSessions(this.sessions.map(session => session.id)); this.sessions = []; } } /** * Acquire a Server Session from the pool. * Iterates through each session in the pool, removing any stale sessions * along the way. The first non-stale session found is removed from the * pool and returned. If no non-stale session is found, a new ServerSession * is created. * @ignore * @returns {ServerSession} */ acquire() { const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes; while (this.sessions.length) { const session = this.sessions.shift(); if (!session.hasTimedOut(sessionTimeoutMinutes)) { return session; } } return new ServerSession(); } /** * Release a session to the session pool * Adds the session back to the session pool if the session has not timed out yet. * This method also removes any stale sessions from the pool. * @ignore * @param {ServerSession} session The session to release to the pool */ release(session) { const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes; while (this.sessions.length) { const session = this.sessions[this.sessions.length - 1]; if (session.hasTimedOut(sessionTimeoutMinutes)) { this.sessions.pop(); } else { break; } } if (!session.hasTimedOut(sessionTimeoutMinutes)) { this.sessions.unshift(session); } } } /** * Optionally decorate a command with sessions specific keys * * @param {ClientSession} session the session tracking transaction state * @param {Object} command the command to decorate * @param {Object} topology the topology for tracking the cluster time * @param {Object} [options] Optional settings passed to calling operation * @return {MongoError|null} An error, if some error condition was met */ function applySession(session, command, options) { const serverSession = session.serverSession; if (serverSession == null) { // TODO: merge this with `assertAlive`, did not want to throw a try/catch here return new MongoError('Cannot use a session that has ended'); } // mark the last use of this session, and apply the `lsid` serverSession.lastUse = Date.now(); command.lsid = serverSession.id; // first apply non-transaction-specific sessions data const inTransaction = session.inTransaction() || isTransactionCommand(command); const isRetryableWrite = options.willRetryWrite; if (serverSession.txnNumber && (isRetryableWrite || inTransaction)) { command.txnNumber = BSON.Long.fromNumber(serverSession.txnNumber); } // now attempt to apply transaction-specific sessions data if (!inTransaction) { if (session.transaction.state !== TxnState.NO_TRANSACTION) { session.transaction.transition(TxnState.NO_TRANSACTION); } // TODO: the following should only be applied to read operation per spec. // for causal consistency if (session.supports.causalConsistency && session.operationTime) { command.readConcern = command.readConcern || {}; Object.assign(command.readConcern, { afterClusterTime: session.operationTime }); } return; } if (options.readPreference && !options.readPreference.equals(ReadPreference.primary)) { return new MongoError( `Read preference in a transaction must be primary, not: ${options.readPreference.mode}` ); } // `autocommit` must always be false to differentiate from retryable writes command.autocommit = false; if (session.transaction.state === TxnState.STARTING_TRANSACTION) { session.transaction.transition(TxnState.TRANSACTION_IN_PROGRESS); command.startTransaction = true; const readConcern = session.transaction.options.readConcern || session.clientOptions.readConcern; if (readConcern) { command.readConcern = readConcern; } if (session.supports.causalConsistency && session.operationTime) { command.readConcern = command.readConcern || {}; Object.assign(command.readConcern, { afterClusterTime: session.operationTime }); } } } function updateSessionFromResponse(session, document) { if (document.$clusterTime) { resolveClusterTime(session, document.$clusterTime); } if (document.operationTime && session && session.supports.causalConsistency) { session.advanceOperationTime(document.operationTime); } if (document.recoveryToken && session && session.inTransaction()) { session.transaction._recoveryToken = document.recoveryToken; } } module.exports = { ClientSession, ServerSession, ServerSessionPool, TxnState, applySession, updateSessionFromResponse }; package/lib/tools/smoke_plugin.js000644 0000003160 3560116604 014220 0ustar00000000 000000 'use strict'; var fs = require('fs'); /* Note: because this plugin uses process.on('uncaughtException'), only one * of these can exist at any given time. This plugin and anything else that * uses process.on('uncaughtException') will conflict. */ exports.attachToRunner = function(runner, outputFile) { var smokeOutput = { results: [] }; var runningTests = {}; var integraPlugin = { beforeTest: function(test, callback) { test.startTime = Date.now(); runningTests[test.name] = test; callback(); }, afterTest: function(test, callback) { smokeOutput.results.push({ status: test.status, start: test.startTime, end: Date.now(), test_file: test.name, exit_code: 0, url: '' }); delete runningTests[test.name]; callback(); }, beforeExit: function(obj, callback) { fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() { callback(); }); } }; // In case of exception, make sure we write file process.on('uncaughtException', function(err) { // Mark all currently running tests as failed for (var testName in runningTests) { smokeOutput.results.push({ status: 'fail', start: runningTests[testName].startTime, end: Date.now(), test_file: testName, exit_code: 0, url: '' }); } // write file fs.writeFileSync(outputFile, JSON.stringify(smokeOutput)); // Standard NodeJS uncaught exception handler console.error(err.stack); process.exit(1); }); runner.plugin(integraPlugin); return integraPlugin; }; package/lib/topologies/mongos.js000644 0000121230 3560116604 014051 0ustar00000000 000000 'use strict'; const inherits = require('util').inherits; const f = require('util').format; const EventEmitter = require('events').EventEmitter; const BasicCursor = require('../cursor'); const Logger = require('../connection/logger'); const retrieveBSON = require('../connection/utils').retrieveBSON; const MongoError = require('../error').MongoError; const Server = require('./server'); const clone = require('./shared').clone; const diff = require('./shared').diff; const cloneOptions = require('./shared').cloneOptions; const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); /** * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is * used to construct connections. * * @example * var Mongos = require('mongodb-core').Mongos * , ReadPreference = require('mongodb-core').ReadPreference * , assert = require('assert'); * * var server = new Mongos([{host: 'localhost', port: 30000}]); * // Wait for the connection event * server.on('connect', function(server) { * server.destroy(); * }); * * // Start connecting * server.connect(); */ // // States var DISCONNECTED = 'disconnected'; var CONNECTING = 'connecting'; var CONNECTED = 'connected'; var UNREFERENCED = 'unreferenced'; var DESTROYED = 'destroyed'; function stateTransition(self, newState) { var legalTransitions = { disconnected: [CONNECTING, DESTROYED, DISCONNECTED], connecting: [CONNECTING, DESTROYED, CONNECTED, DISCONNECTED], connected: [CONNECTED, DISCONNECTED, DESTROYED, UNREFERENCED], unreferenced: [UNREFERENCED, DESTROYED], destroyed: [DESTROYED] }; // Get current state var legalStates = legalTransitions[self.state]; if (legalStates && legalStates.indexOf(newState) !== -1) { self.state = newState; } else { self.logger.error( f( 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', self.id, self.state, newState, legalStates ) ); } } // // ReplSet instance id var id = 1; var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; /** * Creates a new Mongos instance * @class * @param {array} seedlist A list of seeds for the replicaset * @param {number} [options.haInterval=5000] The High availability period for replicaset inquiry * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors * @param {number} [options.size=5] Server connection pool size * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for MongoS proxy selection * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {number} [options.connectionTimeout=1000] TCP Connection timeout setting * @param {number} [options.socketTimeout=0] TCP Socket timeout setting * @param {boolean} [options.ssl=false] Use SSL for connection * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. * @param {Buffer} [options.ca] SSL Certificate store binary buffer * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer * @param {Buffer} [options.cert] SSL Certificate binary buffer * @param {Buffer} [options.key] SSL Key file binary buffer * @param {string} [options.passphrase] SSL Certificate pass phrase * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology * @return {Mongos} A cursor instance * @fires Mongos#connect * @fires Mongos#reconnect * @fires Mongos#joined * @fires Mongos#left * @fires Mongos#failed * @fires Mongos#fullsetup * @fires Mongos#all * @fires Mongos#serverHeartbeatStarted * @fires Mongos#serverHeartbeatSucceeded * @fires Mongos#serverHeartbeatFailed * @fires Mongos#topologyOpening * @fires Mongos#topologyClosed * @fires Mongos#topologyDescriptionChanged * @property {string} type the topology type. * @property {string} parserType the parser type used (c++ or js). */ var Mongos = function(seedlist, options) { options = options || {}; // Get replSet Id this.id = id++; // Internal state this.s = { options: Object.assign({}, options), // BSON instance bson: options.bson || new BSON([ BSON.Binary, BSON.Code, BSON.DBRef, BSON.Decimal128, BSON.Double, BSON.Int32, BSON.Long, BSON.Map, BSON.MaxKey, BSON.MinKey, BSON.ObjectId, BSON.BSONRegExp, BSON.Symbol, BSON.Timestamp ]), // Factory overrides Cursor: options.cursorFactory || BasicCursor, // Logger instance logger: Logger('Mongos', options), // Seedlist seedlist: seedlist, // Ha interval haInterval: options.haInterval ? options.haInterval : 10000, // Disconnect handler disconnectHandler: options.disconnectHandler, // Server selection index index: 0, // Connect function options passed in connectOptions: {}, // Are we running in debug mode debug: typeof options.debug === 'boolean' ? options.debug : false, // localThresholdMS localThresholdMS: options.localThresholdMS || 15, // Client info clientInfo: createClientInfo(options) }; // Set the client info this.s.options.clientInfo = createClientInfo(options); // Log info warning if the socketTimeout < haInterval as it will cause // a lot of recycled connections to happen. if ( this.s.logger.isWarn() && this.s.options.socketTimeout !== 0 && this.s.options.socketTimeout < this.s.haInterval ) { this.s.logger.warn( f( 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', this.s.options.socketTimeout, this.s.haInterval ) ); } // Disconnected state this.state = DISCONNECTED; // Current proxies we are connecting to this.connectingProxies = []; // Currently connected proxies this.connectedProxies = []; // Disconnected proxies this.disconnectedProxies = []; // Index of proxy to run operations against this.index = 0; // High availability timeout id this.haTimeoutId = null; // Last ismaster this.ismaster = null; // Description of the Replicaset this.topologyDescription = { topologyType: 'Unknown', servers: [] }; // Highest clusterTime seen in responses from the current deployment this.clusterTime = null; // Add event listener EventEmitter.call(this); }; inherits(Mongos, EventEmitter); Object.assign(Mongos.prototype, SessionMixins); Object.defineProperty(Mongos.prototype, 'type', { enumerable: true, get: function() { return 'mongos'; } }); Object.defineProperty(Mongos.prototype, 'parserType', { enumerable: true, get: function() { return BSON.native ? 'c++' : 'js'; } }); Object.defineProperty(Mongos.prototype, 'logicalSessionTimeoutMinutes', { enumerable: true, get: function() { if (!this.ismaster) return null; return this.ismaster.logicalSessionTimeoutMinutes || null; } }); /** * Emit event if it exists * @method */ function emitSDAMEvent(self, event, description) { if (self.listeners(event).length > 0) { self.emit(event, description); } } const SERVER_EVENTS = ['serverDescriptionChanged', 'error', 'close', 'timeout', 'parseError']; function destroyServer(server, options, callback) { options = options || {}; SERVER_EVENTS.forEach(event => server.removeAllListeners(event)); server.destroy(options, callback); } /** * Initiate server connect */ Mongos.prototype.connect = function(options) { var self = this; // Add any connect level options to the internal state this.s.connectOptions = options || {}; // Set connecting state stateTransition(this, CONNECTING); // Create server instances var servers = this.s.seedlist.map(function(x) { const server = new Server( Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, parent: self, clientInfo: clone(self.s.clientInfo) }) ); relayEvents(server, self, ['serverDescriptionChanged']); return server; }); // Emit the topology opening event emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); // Start all server connections connectProxies(self, servers); }; /** * Authenticate the topology. * @method * @param {MongoCredentials} credentials The credentials for authentication we are using * @param {authResultCallback} callback A callback function */ Mongos.prototype.auth = function(credentials, callback) { if (typeof callback === 'function') callback(null, null); }; function handleEvent(self) { return function() { if (self.state === DESTROYED) return; // Move to list of disconnectedProxies moveServerFrom(self.connectedProxies, self.disconnectedProxies, this); // Emit the initial topology emitTopologyDescriptionChanged(self); // Emit the left signal self.emit('left', 'mongos', this); // Emit the sdam event self.emit('serverClosed', { topologyId: self.id, address: this.name }); }; } function handleInitialConnectEvent(self, event) { return function() { var _this = this; // Destroy the instance if (self.state === DESTROYED) { // Emit the initial topology emitTopologyDescriptionChanged(self); // Move from connectingProxies moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); return this.destroy(); } // Check the type of server if (event === 'connect') { // Get last known ismaster self.ismaster = _this.lastIsMaster(); // Is this not a proxy, remove t if (self.ismaster.msg === 'isdbgrid') { // Add to the connectd list for (let i = 0; i < self.connectedProxies.length; i++) { if (self.connectedProxies[i].name === _this.name) { // Move from connectingProxies moveServerFrom(self.connectingProxies, self.disconnectedProxies, _this); // Emit the initial topology emitTopologyDescriptionChanged(self); _this.destroy(); return self.emit('failed', _this); } } // Remove the handlers for (let i = 0; i < handlers.length; i++) { _this.removeAllListeners(handlers[i]); } // Add stable state handlers _this.on('error', handleEvent(self, 'error')); _this.on('close', handleEvent(self, 'close')); _this.on('timeout', handleEvent(self, 'timeout')); _this.on('parseError', handleEvent(self, 'parseError')); // Move from connecting proxies connected moveServerFrom(self.connectingProxies, self.connectedProxies, _this); // Emit the joined event self.emit('joined', 'mongos', _this); } else { // Print warning if we did not find a mongos proxy if (self.s.logger.isWarn()) { var message = 'expected mongos proxy, but found replicaset member mongod for server %s'; // We have a standalone server if (!self.ismaster.hosts) { message = 'expected mongos proxy, but found standalone mongod for server %s'; } self.s.logger.warn(f(message, _this.name)); } // This is not a mongos proxy, remove it completely removeProxyFrom(self.connectingProxies, _this); // Emit the left event self.emit('left', 'server', _this); // Emit failed event self.emit('failed', _this); } } else { moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); // Emit the left event self.emit('left', 'mongos', this); // Emit failed event self.emit('failed', this); } // Emit the initial topology emitTopologyDescriptionChanged(self); // Trigger topologyMonitor if (self.connectingProxies.length === 0) { // Emit connected if we are connected if (self.connectedProxies.length > 0 && self.state === CONNECTING) { // Set the state to connected stateTransition(self, CONNECTED); // Emit the connect event self.emit('connect', self); self.emit('fullsetup', self); self.emit('all', self); } else if (self.disconnectedProxies.length === 0) { // Print warning if we did not find a mongos proxy if (self.s.logger.isWarn()) { self.s.logger.warn( f('no mongos proxies found in seed list, did you mean to connect to a replicaset') ); } // Emit the error that no proxies were found return self.emit('error', new MongoError('no mongos proxies found in seed list')); } // Topology monitor topologyMonitor(self, { firstConnect: true }); } }; } function connectProxies(self, servers) { // Update connectingProxies self.connectingProxies = self.connectingProxies.concat(servers); // Index used to interleaf the server connects, avoiding // runtime issues on io constrained vm's var timeoutInterval = 0; function connect(server, timeoutInterval) { setTimeout(function() { // Emit opening server event self.emit('serverOpening', { topologyId: self.id, address: server.name }); // Emit the initial topology emitTopologyDescriptionChanged(self); // Add event handlers server.once('close', handleInitialConnectEvent(self, 'close')); server.once('timeout', handleInitialConnectEvent(self, 'timeout')); server.once('parseError', handleInitialConnectEvent(self, 'parseError')); server.once('error', handleInitialConnectEvent(self, 'error')); server.once('connect', handleInitialConnectEvent(self, 'connect')); // Command Monitoring events relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); // Start connection server.connect(self.s.connectOptions); }, timeoutInterval); } // Start all the servers while (servers.length > 0) { connect(servers.shift(), timeoutInterval++); } } function pickProxy(self, session) { // TODO: Destructure :) const transaction = session && session.transaction; if (transaction && transaction.server) { if (transaction.server.isConnected()) { return transaction.server; } else { transaction.unpinServer(); } } // Get the currently connected Proxies var connectedProxies = self.connectedProxies.slice(0); // Set lower bound var lowerBoundLatency = Number.MAX_VALUE; // Determine the lower bound for the Proxies for (var i = 0; i < connectedProxies.length; i++) { if (connectedProxies[i].lastIsMasterMS < lowerBoundLatency) { lowerBoundLatency = connectedProxies[i].lastIsMasterMS; } } // Filter out the possible servers connectedProxies = connectedProxies.filter(function(server) { if ( server.lastIsMasterMS <= lowerBoundLatency + self.s.localThresholdMS && server.isConnected() ) { return true; } }); let proxy; // We have no connectedProxies pick first of the connected ones if (connectedProxies.length === 0) { proxy = self.connectedProxies[0]; } else { // Get proxy proxy = connectedProxies[self.index % connectedProxies.length]; // Update the index self.index = (self.index + 1) % connectedProxies.length; } if (transaction && transaction.isActive && proxy && proxy.isConnected()) { transaction.pinServer(proxy); } // Return the proxy return proxy; } function moveServerFrom(from, to, proxy) { for (var i = 0; i < from.length; i++) { if (from[i].name === proxy.name) { from.splice(i, 1); } } for (i = 0; i < to.length; i++) { if (to[i].name === proxy.name) { to.splice(i, 1); } } to.push(proxy); } function removeProxyFrom(from, proxy) { for (var i = 0; i < from.length; i++) { if (from[i].name === proxy.name) { from.splice(i, 1); } } } function reconnectProxies(self, proxies, callback) { // Count lefts var count = proxies.length; // Handle events var _handleEvent = function(self, event) { return function() { var _self = this; count = count - 1; // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); return this.destroy(); } if (event === 'connect') { // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); return _self.destroy(); } // Remove the handlers for (var i = 0; i < handlers.length; i++) { _self.removeAllListeners(handlers[i]); } // Add stable state handlers _self.on('error', handleEvent(self, 'error')); _self.on('close', handleEvent(self, 'close')); _self.on('timeout', handleEvent(self, 'timeout')); _self.on('parseError', handleEvent(self, 'parseError')); // Move to the connected servers moveServerFrom(self.connectingProxies, self.connectedProxies, _self); // Emit topology Change emitTopologyDescriptionChanged(self); // Emit joined event self.emit('joined', 'mongos', _self); } else { // Move from connectingProxies moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); this.destroy(); } // Are we done finish up callback if (count === 0) { callback(); } }; }; // No new servers if (count === 0) { return callback(); } // Execute method function execute(_server, i) { setTimeout(function() { // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { return; } // Create a new server instance var server = new Server( Object.assign({}, self.s.options, { host: _server.name.split(':')[0], port: parseInt(_server.name.split(':')[1], 10), reconnect: false, monitoring: false, parent: self, clientInfo: clone(self.s.clientInfo) }) ); destroyServer(_server); removeProxyFrom(self.disconnectedProxies, _server); // Relay the server description change relayEvents(server, self, ['serverDescriptionChanged']); // Emit opening server event self.emit('serverOpening', { topologyId: server.s.topologyId !== -1 ? server.s.topologyId : self.id, address: server.name }); // Add temp handlers server.once('connect', _handleEvent(self, 'connect')); server.once('close', _handleEvent(self, 'close')); server.once('timeout', _handleEvent(self, 'timeout')); server.once('error', _handleEvent(self, 'error')); server.once('parseError', _handleEvent(self, 'parseError')); // Command Monitoring events relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); // Connect to proxy self.connectingProxies.push(server); server.connect(self.s.connectOptions); }, i); } // Create new instances for (var i = 0; i < proxies.length; i++) { execute(proxies[i], i); } } function topologyMonitor(self, options) { options = options || {}; // Set momitoring timeout self.haTimeoutId = setTimeout(function() { if (self.state === DESTROYED || self.state === UNREFERENCED) return; // If we have a primary and a disconnect handler, execute // buffered operations if (self.isConnected() && self.s.disconnectHandler) { self.s.disconnectHandler.execute(); } // Get the connectingServers var proxies = self.connectedProxies.slice(0); // Get the count var count = proxies.length; // If the count is zero schedule a new fast function pingServer(_self, _server, cb) { // Measure running time var start = new Date().getTime(); // Emit the server heartbeat start emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: _server.name }); // Execute ismaster _server.command( 'admin.$cmd', { ismaster: true }, { monitoring: true, socketTimeout: self.s.options.connectionTimeout || 2000 }, function(err, r) { if (self.state === DESTROYED || self.state === UNREFERENCED) { // Move from connectingProxies moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); _server.destroy(); return cb(err, r); } // Calculate latency var latencyMS = new Date().getTime() - start; // We had an error, remove it from the state if (err) { // Emit the server heartbeat failure emitSDAMEvent(self, 'serverHeartbeatFailed', { durationMS: latencyMS, failure: err, connectionId: _server.name }); // Move from connected proxies to disconnected proxies moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); } else { // Update the server ismaster _server.ismaster = r.result; _server.lastIsMasterMS = latencyMS; // Server heart beat event emitSDAMEvent(self, 'serverHeartbeatSucceeded', { durationMS: latencyMS, reply: r.result, connectionId: _server.name }); } cb(err, r); } ); } // No proxies initiate monitor again if (proxies.length === 0) { // Emit close event if any listeners registered if (self.listeners('close').length > 0 && self.state === CONNECTING) { self.emit('error', new MongoError('no mongos proxy available')); } else { self.emit('close', self); } // Attempt to connect to any unknown servers return reconnectProxies(self, self.disconnectedProxies, function() { if (self.state === DESTROYED || self.state === UNREFERENCED) return; // Are we connected ? emit connect event if (self.state === CONNECTING && options.firstConnect) { self.emit('connect', self); self.emit('fullsetup', self); self.emit('all', self); } else if (self.isConnected()) { self.emit('reconnect', self); } else if (!self.isConnected() && self.listeners('close').length > 0) { self.emit('close', self); } // Perform topology monitor topologyMonitor(self); }); } // Ping all servers for (var i = 0; i < proxies.length; i++) { pingServer(self, proxies[i], function() { count = count - 1; if (count === 0) { if (self.state === DESTROYED || self.state === UNREFERENCED) return; // Attempt to connect to any unknown servers reconnectProxies(self, self.disconnectedProxies, function() { if (self.state === DESTROYED || self.state === UNREFERENCED) return; // Perform topology monitor topologyMonitor(self); }); } }); } }, self.s.haInterval); } /** * Returns the last known ismaster document for this server * @method * @return {object} */ Mongos.prototype.lastIsMaster = function() { return this.ismaster; }; /** * Unref all connections belong to this server * @method */ Mongos.prototype.unref = function() { // Transition state stateTransition(this, UNREFERENCED); // Get all proxies var proxies = this.connectedProxies.concat(this.connectingProxies); proxies.forEach(function(x) { x.unref(); }); clearTimeout(this.haTimeoutId); }; /** * Destroy the server connection * @param {boolean} [options.force=false] Force destroy the pool * @method */ Mongos.prototype.destroy = function(options, callback) { if (this.haTimeoutId) { clearTimeout(this.haTimeoutId); } const proxies = this.connectedProxies.concat(this.connectingProxies); let serverCount = proxies.length; const serverDestroyed = () => { serverCount--; if (serverCount > 0) { return; } emitTopologyDescriptionChanged(this); emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); stateTransition(this, DESTROYED); if (typeof callback === 'function') { callback(null, null); } }; if (serverCount === 0) { serverDestroyed(); return; } // Destroy all connecting servers proxies.forEach(server => { // Emit the sdam event this.emit('serverClosed', { topologyId: this.id, address: server.name }); destroyServer(server, options, serverDestroyed); moveServerFrom(this.connectedProxies, this.disconnectedProxies, server); }); }; /** * Figure out if the server is connected * @method * @return {boolean} */ Mongos.prototype.isConnected = function() { return this.connectedProxies.length > 0; }; /** * Figure out if the server instance was destroyed by calling destroy * @method * @return {boolean} */ Mongos.prototype.isDestroyed = function() { return this.state === DESTROYED; }; // // Operations // function executeWriteOperation(args, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // TODO: once we drop Node 4, use destructuring either here or in arguments. const self = args.self; const op = args.op; const ns = args.ns; const ops = args.ops; // Pick a server let server = pickProxy(self, options.session); // No server found error out if (!server) return callback(new MongoError('no mongos proxy available')); const willRetryWrite = !args.retrying && !!options.retryWrites && options.session && isRetryableWritesSupported(self) && !options.session.inTransaction(); const handler = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err) || !willRetryWrite) { return callback(err); } // Pick another server server = pickProxy(self, options.session); // No server found error out with original error if (!server) { return callback(err); } const newArgs = Object.assign({}, args, { retrying: true }); return executeWriteOperation(newArgs, options, callback); }; if (callback.operationId) { handler.operationId = callback.operationId; } // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } // rerun the operation server[op](ns, ops, options, handler); } /** * Insert one or more documents * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of documents to insert * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ Mongos.prototype.insert = function(ns, ops, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); // Not connected but we have a disconnecthandler if (!this.isConnected() && this.s.disconnectHandler != null) { return this.s.disconnectHandler.add('insert', ns, ops, options, callback); } // No mongos proxy available if (!this.isConnected()) { return callback(new MongoError('no mongos proxy available')); } // Execute write operation executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); }; /** * Perform one or more update operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of updates * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ Mongos.prototype.update = function(ns, ops, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); // Not connected but we have a disconnecthandler if (!this.isConnected() && this.s.disconnectHandler != null) { return this.s.disconnectHandler.add('update', ns, ops, options, callback); } // No mongos proxy available if (!this.isConnected()) { return callback(new MongoError('no mongos proxy available')); } // Execute write operation executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); }; /** * Perform one or more remove operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of removes * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ Mongos.prototype.remove = function(ns, ops, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); // Not connected but we have a disconnecthandler if (!this.isConnected() && this.s.disconnectHandler != null) { return this.s.disconnectHandler.add('remove', ns, ops, options, callback); } // No mongos proxy available if (!this.isConnected()) { return callback(new MongoError('no mongos proxy available')); } // Execute write operation executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); }; const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; function isWriteCommand(command) { return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); } /** * Execute a command * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Connection} [options.connection] Specify connection object to execute command against * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ Mongos.prototype.command = function(ns, cmd, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); var self = this; // Pick a proxy var server = pickProxy(self, options.session); // Topology is not connected, save the call in the provided store to be // Executed at some point when the handler deems it's reconnected if ((server == null || !server.isConnected()) && this.s.disconnectHandler != null) { return this.s.disconnectHandler.add('command', ns, cmd, options, callback); } // No server returned we had an error if (server == null) { return callback(new MongoError('no mongos proxy available')); } // Cloned options var clonedOptions = cloneOptions(options); clonedOptions.topology = self; const willRetryWrite = !options.retrying && options.retryWrites && options.session && isRetryableWritesSupported(self) && !options.session.inTransaction() && isWriteCommand(cmd); const cb = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err)) { return callback(err); } if (willRetryWrite) { const newOptions = Object.assign({}, clonedOptions, { retrying: true }); return this.command(ns, cmd, newOptions, callback); } return callback(err); }; // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } // Execute the command server.command(ns, cmd, clonedOptions, cb); }; /** * Get a new cursor * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId * @param {object} [options] Options for the cursor * @param {object} [options.batchSize=0] Batchsize for the operation * @param {array} [options.documents=[]] Initial documents list for cursor * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {object} [options.topology] The internal topology of the created cursor * @returns {Cursor} */ Mongos.prototype.cursor = function(ns, cmd, options) { options = options || {}; const topology = options.topology || this; // Set up final cursor type var FinalCursor = options.cursorFactory || this.s.Cursor; // Return the cursor return new FinalCursor(this.s.bson, ns, cmd, options, topology, this.s.options); }; /** * Selects a server * * @method * @param {function} selector Unused * @param {ReadPreference} [options.readPreference] Unused * @param {ClientSession} [options.session] Specify a session if it is being used * @param {function} callback */ Mongos.prototype.selectServer = function(selector, options, callback) { if (typeof selector === 'function' && typeof callback === 'undefined') (callback = selector), (selector = undefined), (options = {}); if (typeof options === 'function') (callback = options), (options = selector), (selector = undefined); options = options || {}; const server = pickProxy(this, options.session); if (this.s.debug) this.emit('pickedServer', null, server); callback(null, server); }; /** * All raw connections * @method * @return {Connection[]} */ Mongos.prototype.connections = function() { var connections = []; for (var i = 0; i < this.connectedProxies.length; i++) { connections = connections.concat(this.connectedProxies[i].connections()); } return connections; }; function emitTopologyDescriptionChanged(self) { if (self.listeners('topologyDescriptionChanged').length > 0) { var topology = 'Unknown'; if (self.connectedProxies.length > 0) { topology = 'Sharded'; } // Generate description var description = { topologyType: topology, servers: [] }; // All proxies var proxies = self.disconnectedProxies.concat(self.connectingProxies); // Add all the disconnected proxies description.servers = description.servers.concat( proxies.map(function(x) { var description = x.getDescription(); description.type = 'Unknown'; return description; }) ); // Add all the connected proxies description.servers = description.servers.concat( self.connectedProxies.map(function(x) { var description = x.getDescription(); description.type = 'Mongos'; return description; }) ); // Get the diff var diffResult = diff(self.topologyDescription, description); // Create the result var result = { topologyId: self.id, previousDescription: self.topologyDescription, newDescription: description, diff: diffResult }; // Emit the topologyDescription change if (diffResult.servers.length > 0) { self.emit('topologyDescriptionChanged', result); } // Set the new description self.topologyDescription = description; } } /** * A mongos connect event, used to verify that the connection is up and running * * @event Mongos#connect * @type {Mongos} */ /** * A mongos reconnect event, used to verify that the mongos topology has reconnected * * @event Mongos#reconnect * @type {Mongos} */ /** * A mongos fullsetup event, used to signal that all topology members have been contacted. * * @event Mongos#fullsetup * @type {Mongos} */ /** * A mongos all event, used to signal that all topology members have been contacted. * * @event Mongos#all * @type {Mongos} */ /** * A server member left the mongos list * * @event Mongos#left * @type {Mongos} * @param {string} type The type of member that left (mongos) * @param {Server} server The server object that left */ /** * A server member joined the mongos list * * @event Mongos#joined * @type {Mongos} * @param {string} type The type of member that left (mongos) * @param {Server} server The server object that joined */ /** * A server opening SDAM monitoring event * * @event Mongos#serverOpening * @type {object} */ /** * A server closed SDAM monitoring event * * @event Mongos#serverClosed * @type {object} */ /** * A server description SDAM change monitoring event * * @event Mongos#serverDescriptionChanged * @type {object} */ /** * A topology open SDAM event * * @event Mongos#topologyOpening * @type {object} */ /** * A topology closed SDAM event * * @event Mongos#topologyClosed * @type {object} */ /** * A topology structure SDAM change event * * @event Mongos#topologyDescriptionChanged * @type {object} */ /** * A topology serverHeartbeatStarted SDAM event * * @event Mongos#serverHeartbeatStarted * @type {object} */ /** * A topology serverHeartbeatFailed SDAM event * * @event Mongos#serverHeartbeatFailed * @type {object} */ /** * A topology serverHeartbeatSucceeded SDAM change event * * @event Mongos#serverHeartbeatSucceeded * @type {object} */ /** * An event emitted indicating a command was started, if command monitoring is enabled * * @event Mongos#commandStarted * @type {object} */ /** * An event emitted indicating a command succeeded, if command monitoring is enabled * * @event Mongos#commandSucceeded * @type {object} */ /** * An event emitted indicating a command failed, if command monitoring is enabled * * @event Mongos#commandFailed * @type {object} */ module.exports = Mongos; package/lib/topologies/read_preference.js000644 0000013070 3560116604 015662 0ustar00000000 000000 'use strict'; /** * The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is * used to construct connections. * @class * @param {string} mode A string describing the read preference mode (primary|primaryPreferred|secondary|secondaryPreferred|nearest) * @param {array} tags The tags object * @param {object} [options] Additional read preference options * @param {number} [options.maxStalenessSeconds] Max secondary read staleness in seconds, Minimum value is 90 seconds. * @return {ReadPreference} * @example * const ReplSet = require('mongodb-core').ReplSet, * ReadPreference = require('mongodb-core').ReadPreference, * assert = require('assert'); * * const server = new ReplSet([{host: 'localhost', port: 30000}], {setName: 'rs'}); * // Wait for the connection event * server.on('connect', function(server) { * const cursor = server.cursor( * 'db.test', * { find: 'db.test', query: {} }, * { readPreference: new ReadPreference('secondary') } * ); * * cursor.next(function(err, doc) { * server.destroy(); * }); * }); * * // Start connecting * server.connect(); * @see https://docs.mongodb.com/manual/core/read-preference/ */ const ReadPreference = function(mode, tags, options) { // TODO(major): tags MUST be an array of tagsets if (tags && !Array.isArray(tags)) { console.warn( 'ReadPreference tags must be an array, this will change in the next major version' ); if (typeof tags.maxStalenessSeconds !== 'undefined') { // this is likely an options object options = tags; tags = undefined; } else { tags = [tags]; } } this.mode = mode; this.tags = tags; options = options || {}; if (options.maxStalenessSeconds != null) { if (options.maxStalenessSeconds <= 0) { throw new TypeError('maxStalenessSeconds must be a positive integer'); } this.maxStalenessSeconds = options.maxStalenessSeconds; // NOTE: The minimum required wire version is 5 for this read preference. If the existing // topology has a lower value then a MongoError will be thrown during server selection. this.minWireVersion = 5; } if (this.mode === ReadPreference.PRIMARY || this.mode === true) { if (this.tags && Array.isArray(this.tags) && this.tags.length > 0) { throw new TypeError('Primary read preference cannot be combined with tags'); } if (this.maxStalenessSeconds) { throw new TypeError('Primary read preference cannot be combined with maxStalenessSeconds'); } } }; // Support the deprecated `preference` property introduced in the porcelain layer Object.defineProperty(ReadPreference.prototype, 'preference', { enumerable: true, get: function() { return this.mode; } }); /* * Read preference mode constants */ ReadPreference.PRIMARY = 'primary'; ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred'; ReadPreference.SECONDARY = 'secondary'; ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred'; ReadPreference.NEAREST = 'nearest'; const VALID_MODES = [ ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST, true, false, null ]; /** * Validate if a mode is legal * * @method * @param {string} mode The string representing the read preference mode. * @return {boolean} True if a mode is valid */ ReadPreference.isValid = function(mode) { return VALID_MODES.indexOf(mode) !== -1; }; /** * Validate if a mode is legal * * @method * @param {string} mode The string representing the read preference mode. * @return {boolean} True if a mode is valid */ ReadPreference.prototype.isValid = function(mode) { return ReadPreference.isValid(typeof mode === 'string' ? mode : this.mode); }; const needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest']; /** * Indicates that this readPreference needs the "slaveOk" bit when sent over the wire * @method * @return {boolean} * @see https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#op-query */ ReadPreference.prototype.slaveOk = function() { return needSlaveOk.indexOf(this.mode) !== -1; }; /** * Are the two read preference equal * @method * @param {ReadPreference} readPreference The read preference with which to check equality * @return {boolean} True if the two ReadPreferences are equivalent */ ReadPreference.prototype.equals = function(readPreference) { return readPreference.mode === this.mode; }; /** * Return JSON representation * @method * @return {Object} A JSON representation of the ReadPreference */ ReadPreference.prototype.toJSON = function() { const readPreference = { mode: this.mode }; if (Array.isArray(this.tags)) readPreference.tags = this.tags; if (this.maxStalenessSeconds) readPreference.maxStalenessSeconds = this.maxStalenessSeconds; return readPreference; }; /** * Primary read preference * @member * @type {ReadPreference} */ ReadPreference.primary = new ReadPreference('primary'); /** * Primary Preferred read preference * @member * @type {ReadPreference} */ ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred'); /** * Secondary read preference * @member * @type {ReadPreference} */ ReadPreference.secondary = new ReadPreference('secondary'); /** * Secondary Preferred read preference * @member * @type {ReadPreference} */ ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred'); /** * Nearest read preference * @member * @type {ReadPreference} */ ReadPreference.nearest = new ReadPreference('nearest'); module.exports = ReadPreference; package/lib/topologies/replset_state.js000644 0000076301 3560116604 015435 0ustar00000000 000000 'use strict'; var inherits = require('util').inherits, f = require('util').format, diff = require('./shared').diff, EventEmitter = require('events').EventEmitter, Logger = require('../connection/logger'), ReadPreference = require('./read_preference'), MongoError = require('../error').MongoError, Buffer = require('safe-buffer').Buffer; var TopologyType = { Single: 'Single', ReplicaSetNoPrimary: 'ReplicaSetNoPrimary', ReplicaSetWithPrimary: 'ReplicaSetWithPrimary', Sharded: 'Sharded', Unknown: 'Unknown' }; var ServerType = { Standalone: 'Standalone', Mongos: 'Mongos', PossiblePrimary: 'PossiblePrimary', RSPrimary: 'RSPrimary', RSSecondary: 'RSSecondary', RSArbiter: 'RSArbiter', RSOther: 'RSOther', RSGhost: 'RSGhost', Unknown: 'Unknown' }; var ReplSetState = function(options) { options = options || {}; // Add event listener EventEmitter.call(this); // Topology state this.topologyType = TopologyType.ReplicaSetNoPrimary; this.setName = options.setName; // Server set this.set = {}; // Unpacked options this.id = options.id; this.setName = options.setName; // Replicaset logger this.logger = options.logger || Logger('ReplSet', options); // Server selection index this.index = 0; // Acceptable latency this.acceptableLatency = options.acceptableLatency || 15; // heartbeatFrequencyMS this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; // Server side this.primary = null; this.secondaries = []; this.arbiters = []; this.passives = []; this.ghosts = []; // Current unknown hosts this.unknownServers = []; // In set status this.set = {}; // Status this.maxElectionId = null; this.maxSetVersion = 0; // Description of the Replicaset this.replicasetDescription = { topologyType: 'Unknown', servers: [] }; this.logicalSessionTimeoutMinutes = undefined; }; inherits(ReplSetState, EventEmitter); ReplSetState.prototype.hasPrimaryAndSecondary = function() { return this.primary != null && this.secondaries.length > 0; }; ReplSetState.prototype.hasPrimaryOrSecondary = function() { return this.hasPrimary() || this.hasSecondary(); }; ReplSetState.prototype.hasPrimary = function() { return this.primary != null; }; ReplSetState.prototype.hasSecondary = function() { return this.secondaries.length > 0; }; ReplSetState.prototype.get = function(host) { var servers = this.allServers(); for (var i = 0; i < servers.length; i++) { if (servers[i].name.toLowerCase() === host.toLowerCase()) { return servers[i]; } } return null; }; ReplSetState.prototype.allServers = function(options) { options = options || {}; var servers = this.primary ? [this.primary] : []; servers = servers.concat(this.secondaries); if (!options.ignoreArbiters) servers = servers.concat(this.arbiters); servers = servers.concat(this.passives); return servers; }; ReplSetState.prototype.destroy = function(options, callback) { const serversToDestroy = this.secondaries .concat(this.arbiters) .concat(this.passives) .concat(this.ghosts); if (this.primary) serversToDestroy.push(this.primary); let serverCount = serversToDestroy.length; const serverDestroyed = () => { serverCount--; if (serverCount > 0) { return; } // Clear out the complete state this.secondaries = []; this.arbiters = []; this.passives = []; this.ghosts = []; this.unknownServers = []; this.set = {}; this.primary = null; // Emit the topology changed emitTopologyDescriptionChanged(this); if (typeof callback === 'function') { callback(null, null); } }; if (serverCount === 0) { serverDestroyed(); return; } serversToDestroy.forEach(server => server.destroy(options, serverDestroyed)); }; ReplSetState.prototype.remove = function(server, options) { options = options || {}; // Get the server name and lowerCase it var serverName = server.name.toLowerCase(); // Only remove if the current server is not connected var servers = this.primary ? [this.primary] : []; servers = servers.concat(this.secondaries); servers = servers.concat(this.arbiters); servers = servers.concat(this.passives); // Check if it's active and this is just a failed connection attempt for (var i = 0; i < servers.length; i++) { if ( !options.force && servers[i].equals(server) && servers[i].isConnected && servers[i].isConnected() ) { return; } } // If we have it in the set remove it if (this.set[serverName]) { this.set[serverName].type = ServerType.Unknown; this.set[serverName].electionId = null; this.set[serverName].setName = null; this.set[serverName].setVersion = null; } // Remove type var removeType = null; // Remove from any lists if (this.primary && this.primary.equals(server)) { this.primary = null; this.topologyType = TopologyType.ReplicaSetNoPrimary; removeType = 'primary'; } // Remove from any other server lists removeType = removeFrom(server, this.secondaries) ? 'secondary' : removeType; removeType = removeFrom(server, this.arbiters) ? 'arbiter' : removeType; removeType = removeFrom(server, this.passives) ? 'secondary' : removeType; removeFrom(server, this.ghosts); removeFrom(server, this.unknownServers); // Push to unknownServers this.unknownServers.push(serverName); // Do we have a removeType if (removeType) { this.emit('left', removeType, server); } }; const isArbiter = ismaster => ismaster.arbiterOnly && ismaster.setName; ReplSetState.prototype.update = function(server) { var self = this; // Get the current ismaster var ismaster = server.lastIsMaster(); // Get the server name and lowerCase it var serverName = server.name.toLowerCase(); // // Add any hosts // if (ismaster) { // Join all the possible new hosts var hosts = Array.isArray(ismaster.hosts) ? ismaster.hosts : []; hosts = hosts.concat(Array.isArray(ismaster.arbiters) ? ismaster.arbiters : []); hosts = hosts.concat(Array.isArray(ismaster.passives) ? ismaster.passives : []); hosts = hosts.map(function(s) { return s.toLowerCase(); }); // Add all hosts as unknownServers for (var i = 0; i < hosts.length; i++) { // Add to the list of unknown server if ( this.unknownServers.indexOf(hosts[i]) === -1 && (!this.set[hosts[i]] || this.set[hosts[i]].type === ServerType.Unknown) ) { this.unknownServers.push(hosts[i].toLowerCase()); } if (!this.set[hosts[i]]) { this.set[hosts[i]] = { type: ServerType.Unknown, electionId: null, setName: null, setVersion: null }; } } } // // Unknown server // if (!ismaster && !inList(ismaster, server, this.unknownServers)) { self.set[serverName] = { type: ServerType.Unknown, setVersion: null, electionId: null, setName: null }; // Update set information about the server instance self.set[serverName].type = ServerType.Unknown; self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; if (self.unknownServers.indexOf(server.name) === -1) { self.unknownServers.push(serverName); } // Set the topology return false; } // Update logicalSessionTimeoutMinutes if (ismaster.logicalSessionTimeoutMinutes !== undefined && !isArbiter(ismaster)) { if ( self.logicalSessionTimeoutMinutes === undefined || ismaster.logicalSessionTimeoutMinutes === null ) { self.logicalSessionTimeoutMinutes = ismaster.logicalSessionTimeoutMinutes; } else { self.logicalSessionTimeoutMinutes = Math.min( self.logicalSessionTimeoutMinutes, ismaster.logicalSessionTimeoutMinutes ); } } // // Is this a mongos // if (ismaster && ismaster.msg === 'isdbgrid') { if (this.primary && this.primary.name === serverName) { this.primary = null; this.topologyType = TopologyType.ReplicaSetNoPrimary; } return false; } // A RSGhost instance if (ismaster.isreplicaset) { self.set[serverName] = { type: ServerType.RSGhost, setVersion: null, electionId: null, setName: ismaster.setName }; if (this.primary && this.primary.name === serverName) { this.primary = null; } // Set the topology this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; if (ismaster.setName) this.setName = ismaster.setName; // Set the topology return false; } // A RSOther instance if ( (ismaster.setName && ismaster.hidden) || (ismaster.setName && !ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly && !ismaster.passive) ) { self.set[serverName] = { type: ServerType.RSOther, setVersion: null, electionId: null, setName: ismaster.setName }; // Set the topology this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; if (ismaster.setName) this.setName = ismaster.setName; return false; } // // Standalone server, destroy and return // if (ismaster && ismaster.ismaster && !ismaster.setName) { this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.Unknown; this.remove(server, { force: true }); return false; } // // Server in maintanance mode // if (ismaster && !ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly) { this.remove(server, { force: true }); return false; } // // If the .me field does not match the passed in server // if (ismaster.me && ismaster.me.toLowerCase() !== serverName) { if (this.logger.isWarn()) { this.logger.warn( f( 'the seedlist server was removed due to its address %s not matching its ismaster.me address %s', server.name, ismaster.me ) ); } // Delete from the set delete this.set[serverName]; // Delete unknown servers removeFrom(server, self.unknownServers); // Destroy the instance server.destroy(); // Set the type of topology we have if (this.primary && !this.primary.equals(server)) { this.topologyType = TopologyType.ReplicaSetWithPrimary; } else { this.topologyType = TopologyType.ReplicaSetNoPrimary; } // // We have a potential primary // if (!this.primary && ismaster.primary) { this.set[ismaster.primary.toLowerCase()] = { type: ServerType.PossiblePrimary, setName: null, electionId: null, setVersion: null }; } return false; } // // Primary handling // if (!this.primary && ismaster.ismaster && ismaster.setName) { var ismasterElectionId = server.lastIsMaster().electionId; if (this.setName && this.setName !== ismaster.setName) { this.topologyType = TopologyType.ReplicaSetNoPrimary; return new MongoError( f( 'setName from ismaster does not match provided connection setName [%s] != [%s]', ismaster.setName, this.setName ) ); } if (!this.maxElectionId && ismasterElectionId) { this.maxElectionId = ismasterElectionId; } else if (this.maxElectionId && ismasterElectionId) { var result = compareObjectIds(this.maxElectionId, ismasterElectionId); // Get the electionIds var ismasterSetVersion = server.lastIsMaster().setVersion; if (result === 1) { this.topologyType = TopologyType.ReplicaSetNoPrimary; return false; } else if (result === 0 && ismasterSetVersion) { if (ismasterSetVersion < this.maxSetVersion) { this.topologyType = TopologyType.ReplicaSetNoPrimary; return false; } } this.maxSetVersion = ismasterSetVersion; this.maxElectionId = ismasterElectionId; } // Hande normalization of server names var normalizedHosts = ismaster.hosts.map(function(x) { return x.toLowerCase(); }); var locationIndex = normalizedHosts.indexOf(serverName); // Validate that the server exists in the host list if (locationIndex !== -1) { self.primary = server; self.set[serverName] = { type: ServerType.RSPrimary, setVersion: ismaster.setVersion, electionId: ismaster.electionId, setName: ismaster.setName }; // Set the topology this.topologyType = TopologyType.ReplicaSetWithPrimary; if (ismaster.setName) this.setName = ismaster.setName; removeFrom(server, self.unknownServers); removeFrom(server, self.secondaries); removeFrom(server, self.passives); self.emit('joined', 'primary', server); } else { this.topologyType = TopologyType.ReplicaSetNoPrimary; } emitTopologyDescriptionChanged(self); return true; } else if (ismaster.ismaster && ismaster.setName) { // Get the electionIds var currentElectionId = self.set[self.primary.name.toLowerCase()].electionId; var currentSetVersion = self.set[self.primary.name.toLowerCase()].setVersion; var currentSetName = self.set[self.primary.name.toLowerCase()].setName; ismasterElectionId = server.lastIsMaster().electionId; ismasterSetVersion = server.lastIsMaster().setVersion; var ismasterSetName = server.lastIsMaster().setName; // Is it the same server instance if (this.primary.equals(server) && currentSetName === ismasterSetName) { return false; } // If we do not have the same rs name if (currentSetName && currentSetName !== ismasterSetName) { if (!this.primary.equals(server)) { this.topologyType = TopologyType.ReplicaSetWithPrimary; } else { this.topologyType = TopologyType.ReplicaSetNoPrimary; } return false; } // Check if we need to replace the server if (currentElectionId && ismasterElectionId) { result = compareObjectIds(currentElectionId, ismasterElectionId); if (result === 1) { return false; } else if (result === 0 && currentSetVersion > ismasterSetVersion) { return false; } } else if (!currentElectionId && ismasterElectionId && ismasterSetVersion) { if (ismasterSetVersion < this.maxSetVersion) { return false; } } if (!this.maxElectionId && ismasterElectionId) { this.maxElectionId = ismasterElectionId; } else if (this.maxElectionId && ismasterElectionId) { result = compareObjectIds(this.maxElectionId, ismasterElectionId); if (result === 1) { return false; } else if (result === 0 && currentSetVersion && ismasterSetVersion) { if (ismasterSetVersion < this.maxSetVersion) { return false; } } else { if (ismasterSetVersion < this.maxSetVersion) { return false; } } this.maxElectionId = ismasterElectionId; this.maxSetVersion = ismasterSetVersion; } else { this.maxSetVersion = ismasterSetVersion; } // Modify the entry to unknown self.set[self.primary.name.toLowerCase()] = { type: ServerType.Unknown, setVersion: null, electionId: null, setName: null }; // Signal primary left self.emit('left', 'primary', this.primary); // Destroy the instance self.primary.destroy(); // Set the new instance self.primary = server; // Set the set information self.set[serverName] = { type: ServerType.RSPrimary, setVersion: ismaster.setVersion, electionId: ismaster.electionId, setName: ismaster.setName }; // Set the topology this.topologyType = TopologyType.ReplicaSetWithPrimary; if (ismaster.setName) this.setName = ismaster.setName; removeFrom(server, self.unknownServers); removeFrom(server, self.secondaries); removeFrom(server, self.passives); self.emit('joined', 'primary', server); emitTopologyDescriptionChanged(self); return true; } // A possible instance if (!this.primary && ismaster.primary) { self.set[ismaster.primary.toLowerCase()] = { type: ServerType.PossiblePrimary, setVersion: null, electionId: null, setName: null }; } // // Secondary handling // if ( ismaster.secondary && ismaster.setName && !inList(ismaster, server, this.secondaries) && this.setName && this.setName === ismaster.setName ) { addToList(self, ServerType.RSSecondary, ismaster, server, this.secondaries); // Set the topology this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; if (ismaster.setName) this.setName = ismaster.setName; removeFrom(server, self.unknownServers); // Remove primary if (this.primary && this.primary.name.toLowerCase() === serverName) { server.destroy(); this.primary = null; self.emit('left', 'primary', server); } // Emit secondary joined replicaset self.emit('joined', 'secondary', server); emitTopologyDescriptionChanged(self); return true; } // // Arbiter handling // if ( isArbiter(ismaster) && !inList(ismaster, server, this.arbiters) && this.setName && this.setName === ismaster.setName ) { addToList(self, ServerType.RSArbiter, ismaster, server, this.arbiters); // Set the topology this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; if (ismaster.setName) this.setName = ismaster.setName; removeFrom(server, self.unknownServers); self.emit('joined', 'arbiter', server); emitTopologyDescriptionChanged(self); return true; } // // Passive handling // if ( ismaster.passive && ismaster.setName && !inList(ismaster, server, this.passives) && this.setName && this.setName === ismaster.setName ) { addToList(self, ServerType.RSSecondary, ismaster, server, this.passives); // Set the topology this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; if (ismaster.setName) this.setName = ismaster.setName; removeFrom(server, self.unknownServers); // Remove primary if (this.primary && this.primary.name.toLowerCase() === serverName) { server.destroy(); this.primary = null; self.emit('left', 'primary', server); } self.emit('joined', 'secondary', server); emitTopologyDescriptionChanged(self); return true; } // // Remove the primary // if (this.set[serverName] && this.set[serverName].type === ServerType.RSPrimary) { self.emit('left', 'primary', this.primary); this.primary.destroy(); this.primary = null; this.topologyType = TopologyType.ReplicaSetNoPrimary; return false; } this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.ReplicaSetNoPrimary; return false; }; /** * Recalculate single server max staleness * @method */ ReplSetState.prototype.updateServerMaxStaleness = function(server, haInterval) { // Locate the max secondary lastwrite var max = 0; // Go over all secondaries for (var i = 0; i < this.secondaries.length; i++) { max = Math.max(max, this.secondaries[i].lastWriteDate); } // Perform this servers staleness calculation if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary && this.hasPrimary()) { server.staleness = server.lastUpdateTime - server.lastWriteDate - (this.primary.lastUpdateTime - this.primary.lastWriteDate) + haInterval; } else if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary) { server.staleness = max - server.lastWriteDate + haInterval; } }; /** * Recalculate all the staleness values for secodaries * @method */ ReplSetState.prototype.updateSecondariesMaxStaleness = function(haInterval) { for (var i = 0; i < this.secondaries.length; i++) { this.updateServerMaxStaleness(this.secondaries[i], haInterval); } }; /** * Pick a server by the passed in ReadPreference * @method * @param {ReadPreference} readPreference The ReadPreference instance to use */ ReplSetState.prototype.pickServer = function(readPreference) { // If no read Preference set to primary by default readPreference = readPreference || ReadPreference.primary; // maxStalenessSeconds is not allowed with a primary read if (readPreference.preference === 'primary' && readPreference.maxStalenessSeconds != null) { return new MongoError('primary readPreference incompatible with maxStalenessSeconds'); } // Check if we have any non compatible servers for maxStalenessSeconds var allservers = this.primary ? [this.primary] : []; allservers = allservers.concat(this.secondaries); // Does any of the servers not support the right wire protocol version // for maxStalenessSeconds when maxStalenessSeconds specified on readPreference. Then error out if (readPreference.maxStalenessSeconds != null) { for (var i = 0; i < allservers.length; i++) { if (allservers[i].ismaster.maxWireVersion < 5) { return new MongoError( 'maxStalenessSeconds not supported by at least one of the replicaset members' ); } } } // Do we have the nearest readPreference if (readPreference.preference === 'nearest' && readPreference.maxStalenessSeconds == null) { return pickNearest(this, readPreference); } else if ( readPreference.preference === 'nearest' && readPreference.maxStalenessSeconds != null ) { return pickNearestMaxStalenessSeconds(this, readPreference); } // Get all the secondaries var secondaries = this.secondaries; // Check if we can satisfy and of the basic read Preferences if (readPreference.equals(ReadPreference.secondary) && secondaries.length === 0) { return new MongoError('no secondary server available'); } if ( readPreference.equals(ReadPreference.secondaryPreferred) && secondaries.length === 0 && this.primary == null ) { return new MongoError('no secondary or primary server available'); } if (readPreference.equals(ReadPreference.primary) && this.primary == null) { return new MongoError('no primary server available'); } // Secondary preferred or just secondaries if ( readPreference.equals(ReadPreference.secondaryPreferred) || readPreference.equals(ReadPreference.secondary) ) { if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { // Pick nearest of any other servers available var server = pickNearest(this, readPreference); // No server in the window return primary if (server) { return server; } } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { // Pick nearest of any other servers available server = pickNearestMaxStalenessSeconds(this, readPreference); // No server in the window return primary if (server) { return server; } } if (readPreference.equals(ReadPreference.secondaryPreferred)) { return this.primary; } return null; } // Primary preferred if (readPreference.equals(ReadPreference.primaryPreferred)) { server = null; // We prefer the primary if it's available if (this.primary) { return this.primary; } // Pick a secondary if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { server = pickNearest(this, readPreference); } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { server = pickNearestMaxStalenessSeconds(this, readPreference); } // Did we find a server if (server) return server; } // Return the primary return this.primary; }; // // Filter serves by tags var filterByTags = function(readPreference, servers) { if (readPreference.tags == null) return servers; var filteredServers = []; var tagsArray = Array.isArray(readPreference.tags) ? readPreference.tags : [readPreference.tags]; // Iterate over the tags for (var j = 0; j < tagsArray.length; j++) { var tags = tagsArray[j]; // Iterate over all the servers for (var i = 0; i < servers.length; i++) { var serverTag = servers[i].lastIsMaster().tags || {}; // Did we find the a matching server var found = true; // Check if the server is valid for (var name in tags) { if (serverTag[name] !== tags[name]) { found = false; } } // Add to candidate list if (found) { filteredServers.push(servers[i]); } } } // Returned filtered servers return filteredServers; }; function pickNearestMaxStalenessSeconds(self, readPreference) { // Only get primary and secondaries as seeds var servers = []; // Get the maxStalenessMS var maxStalenessMS = readPreference.maxStalenessSeconds * 1000; // Check if the maxStalenessMS > 90 seconds if (maxStalenessMS < 90 * 1000) { return new MongoError('maxStalenessSeconds must be set to at least 90 seconds'); } // Add primary to list if not a secondary read preference if ( self.primary && readPreference.preference !== 'secondary' && readPreference.preference !== 'secondaryPreferred' ) { servers.push(self.primary); } // Add all the secondaries for (var i = 0; i < self.secondaries.length; i++) { servers.push(self.secondaries[i]); } // If we have a secondaryPreferred readPreference and no server add the primary if (self.primary && servers.length === 0 && readPreference.preference !== 'secondaryPreferred') { servers.push(self.primary); } // Filter by tags servers = filterByTags(readPreference, servers); // Filter by latency servers = servers.filter(function(s) { return s.staleness <= maxStalenessMS; }); // Sort by time servers.sort(function(a, b) { return a.lastIsMasterMS - b.lastIsMasterMS; }); // No servers, default to primary if (servers.length === 0) { return null; } // Ensure index does not overflow the number of available servers self.index = self.index % servers.length; // Get the server var server = servers[self.index]; // Add to the index self.index = self.index + 1; // Return the first server of the sorted and filtered list return server; } function pickNearest(self, readPreference) { // Only get primary and secondaries as seeds var servers = []; // Add primary to list if not a secondary read preference if ( self.primary && readPreference.preference !== 'secondary' && readPreference.preference !== 'secondaryPreferred' ) { servers.push(self.primary); } // Add all the secondaries for (var i = 0; i < self.secondaries.length; i++) { servers.push(self.secondaries[i]); } // If we have a secondaryPreferred readPreference and no server add the primary if (servers.length === 0 && self.primary && readPreference.preference !== 'secondaryPreferred') { servers.push(self.primary); } // Filter by tags servers = filterByTags(readPreference, servers); // Sort by time servers.sort(function(a, b) { return a.lastIsMasterMS - b.lastIsMasterMS; }); // Locate lowest time (picked servers are lowest time + acceptable Latency margin) var lowest = servers.length > 0 ? servers[0].lastIsMasterMS : 0; // Filter by latency servers = servers.filter(function(s) { return s.lastIsMasterMS <= lowest + self.acceptableLatency; }); // No servers, default to primary if (servers.length === 0) { return null; } // Ensure index does not overflow the number of available servers self.index = self.index % servers.length; // Get the server var server = servers[self.index]; // Add to the index self.index = self.index + 1; // Return the first server of the sorted and filtered list return server; } function inList(ismaster, server, list) { for (var i = 0; i < list.length; i++) { if (list[i] && list[i].name && list[i].name.toLowerCase() === server.name.toLowerCase()) return true; } return false; } function addToList(self, type, ismaster, server, list) { var serverName = server.name.toLowerCase(); // Update set information about the server instance self.set[serverName].type = type; self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; // Add to the list list.push(server); } function compareObjectIds(id1, id2) { var a = Buffer.from(id1.toHexString(), 'hex'); var b = Buffer.from(id2.toHexString(), 'hex'); if (a === b) { return 0; } if (typeof Buffer.compare === 'function') { return Buffer.compare(a, b); } var x = a.length; var y = b.length; var len = Math.min(x, y); for (var i = 0; i < len; i++) { if (a[i] !== b[i]) { break; } } if (i !== len) { x = a[i]; y = b[i]; } return x < y ? -1 : y < x ? 1 : 0; } function removeFrom(server, list) { for (var i = 0; i < list.length; i++) { if (list[i].equals && list[i].equals(server)) { list.splice(i, 1); return true; } else if (typeof list[i] === 'string' && list[i].toLowerCase() === server.name.toLowerCase()) { list.splice(i, 1); return true; } } return false; } function emitTopologyDescriptionChanged(self) { if (self.listeners('topologyDescriptionChanged').length > 0) { var topology = 'Unknown'; var setName = self.setName; if (self.hasPrimaryAndSecondary()) { topology = 'ReplicaSetWithPrimary'; } else if (!self.hasPrimary() && self.hasSecondary()) { topology = 'ReplicaSetNoPrimary'; } // Generate description var description = { topologyType: topology, setName: setName, servers: [] }; // Add the primary to the list if (self.hasPrimary()) { var desc = self.primary.getDescription(); desc.type = 'RSPrimary'; description.servers.push(desc); } // Add all the secondaries description.servers = description.servers.concat( self.secondaries.map(function(x) { var description = x.getDescription(); description.type = 'RSSecondary'; return description; }) ); // Add all the arbiters description.servers = description.servers.concat( self.arbiters.map(function(x) { var description = x.getDescription(); description.type = 'RSArbiter'; return description; }) ); // Add all the passives description.servers = description.servers.concat( self.passives.map(function(x) { var description = x.getDescription(); description.type = 'RSSecondary'; return description; }) ); // Get the diff var diffResult = diff(self.replicasetDescription, description); // Create the result var result = { topologyId: self.id, previousDescription: self.replicasetDescription, newDescription: description, diff: diffResult }; // Emit the topologyDescription change // if(diffResult.servers.length > 0) { self.emit('topologyDescriptionChanged', result); // } // Set the new description self.replicasetDescription = description; } } module.exports = ReplSetState; package/lib/topologies/replset.js000644 0000134772 3560116604 014244 0ustar00000000 000000 'use strict'; const inherits = require('util').inherits; const f = require('util').format; const EventEmitter = require('events').EventEmitter; const ReadPreference = require('./read_preference'); const BasicCursor = require('../cursor'); const retrieveBSON = require('../connection/utils').retrieveBSON; const Logger = require('../connection/logger'); const MongoError = require('../error').MongoError; const Server = require('./server'); const ReplSetState = require('./replset_state'); const clone = require('./shared').clone; const Timeout = require('./shared').Timeout; const Interval = require('./shared').Interval; const createClientInfo = require('./shared').createClientInfo; const SessionMixins = require('./shared').SessionMixins; const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; const relayEvents = require('../utils').relayEvents; const isRetryableError = require('../error').isRetryableError; const BSON = retrieveBSON(); // // States var DISCONNECTED = 'disconnected'; var CONNECTING = 'connecting'; var CONNECTED = 'connected'; var UNREFERENCED = 'unreferenced'; var DESTROYED = 'destroyed'; function stateTransition(self, newState) { var legalTransitions = { disconnected: [CONNECTING, DESTROYED, DISCONNECTED], connecting: [CONNECTING, DESTROYED, CONNECTED, DISCONNECTED], connected: [CONNECTED, DISCONNECTED, DESTROYED, UNREFERENCED], unreferenced: [UNREFERENCED, DESTROYED], destroyed: [DESTROYED] }; // Get current state var legalStates = legalTransitions[self.state]; if (legalStates && legalStates.indexOf(newState) !== -1) { self.state = newState; } else { self.s.logger.error( f( 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', self.id, self.state, newState, legalStates ) ); } } // // ReplSet instance id var id = 1; var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; /** * Creates a new Replset instance * @class * @param {array} seedlist A list of seeds for the replicaset * @param {boolean} options.setName The Replicaset set name * @param {boolean} [options.secondaryOnlyConnectionAllowed=false] Allow connection to a secondary only replicaset * @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry * @param {boolean} [options.emitError=false] Server will emit errors events * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors * @param {number} [options.size=5] Server connection pool size * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {number} [options.connectionTimeout=10000] TCP Connection timeout setting * @param {number} [options.socketTimeout=0] TCP Socket timeout setting * @param {boolean} [options.ssl=false] Use SSL for connection * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. * @param {Buffer} [options.ca] SSL Certificate store binary buffer * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer * @param {Buffer} [options.cert] SSL Certificate binary buffer * @param {Buffer} [options.key] SSL Key file binary buffer * @param {string} [options.passphrase] SSL Certificate pass phrase * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. * @param {number} [options.pingInterval=5000] Ping interval to check the response time to the different servers * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for Replicaset member selection * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology * @return {ReplSet} A cursor instance * @fires ReplSet#connect * @fires ReplSet#ha * @fires ReplSet#joined * @fires ReplSet#left * @fires ReplSet#failed * @fires ReplSet#fullsetup * @fires ReplSet#all * @fires ReplSet#error * @fires ReplSet#serverHeartbeatStarted * @fires ReplSet#serverHeartbeatSucceeded * @fires ReplSet#serverHeartbeatFailed * @fires ReplSet#topologyOpening * @fires ReplSet#topologyClosed * @fires ReplSet#topologyDescriptionChanged * @property {string} type the topology type. * @property {string} parserType the parser type used (c++ or js). */ var ReplSet = function(seedlist, options) { var self = this; options = options || {}; // Validate seedlist if (!Array.isArray(seedlist)) throw new MongoError('seedlist must be an array'); // Validate list if (seedlist.length === 0) throw new MongoError('seedlist must contain at least one entry'); // Validate entries seedlist.forEach(function(e) { if (typeof e.host !== 'string' || typeof e.port !== 'number') throw new MongoError('seedlist entry must contain a host and port'); }); // Add event listener EventEmitter.call(this); // Get replSet Id this.id = id++; // Get the localThresholdMS var localThresholdMS = options.localThresholdMS || 15; // Backward compatibility if (options.acceptableLatency) localThresholdMS = options.acceptableLatency; // Create a logger var logger = Logger('ReplSet', options); // Internal state this.s = { options: Object.assign({}, options), // BSON instance bson: options.bson || new BSON([ BSON.Binary, BSON.Code, BSON.DBRef, BSON.Decimal128, BSON.Double, BSON.Int32, BSON.Long, BSON.Map, BSON.MaxKey, BSON.MinKey, BSON.ObjectId, BSON.BSONRegExp, BSON.Symbol, BSON.Timestamp ]), // Factory overrides Cursor: options.cursorFactory || BasicCursor, // Logger instance logger: logger, // Seedlist seedlist: seedlist, // Replicaset state replicaSetState: new ReplSetState({ id: this.id, setName: options.setName, acceptableLatency: localThresholdMS, heartbeatFrequencyMS: options.haInterval ? options.haInterval : 10000, logger: logger }), // Current servers we are connecting to connectingServers: [], // Ha interval haInterval: options.haInterval ? options.haInterval : 10000, // Minimum heartbeat frequency used if we detect a server close minHeartbeatFrequencyMS: 500, // Disconnect handler disconnectHandler: options.disconnectHandler, // Server selection index index: 0, // Connect function options passed in connectOptions: {}, // Are we running in debug mode debug: typeof options.debug === 'boolean' ? options.debug : false, // Client info clientInfo: createClientInfo(options) }; // Add handler for topology change this.s.replicaSetState.on('topologyDescriptionChanged', function(r) { self.emit('topologyDescriptionChanged', r); }); // Log info warning if the socketTimeout < haInterval as it will cause // a lot of recycled connections to happen. if ( this.s.logger.isWarn() && this.s.options.socketTimeout !== 0 && this.s.options.socketTimeout < this.s.haInterval ) { this.s.logger.warn( f( 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', this.s.options.socketTimeout, this.s.haInterval ) ); } // Add forwarding of events from state handler var types = ['joined', 'left']; types.forEach(function(x) { self.s.replicaSetState.on(x, function(t, s) { self.emit(x, t, s); }); }); // Connect stat this.initialConnectState = { connect: false, fullsetup: false, all: false }; // Disconnected state this.state = DISCONNECTED; this.haTimeoutId = null; // Last ismaster this.ismaster = null; // Contains the intervalId this.intervalIds = []; // Highest clusterTime seen in responses from the current deployment this.clusterTime = null; }; inherits(ReplSet, EventEmitter); Object.assign(ReplSet.prototype, SessionMixins); Object.defineProperty(ReplSet.prototype, 'type', { enumerable: true, get: function() { return 'replset'; } }); Object.defineProperty(ReplSet.prototype, 'parserType', { enumerable: true, get: function() { return BSON.native ? 'c++' : 'js'; } }); Object.defineProperty(ReplSet.prototype, 'logicalSessionTimeoutMinutes', { enumerable: true, get: function() { return this.s.replicaSetState.logicalSessionTimeoutMinutes || null; } }); function rexecuteOperations(self) { // If we have a primary and a disconnect handler, execute // buffered operations if (self.s.replicaSetState.hasPrimaryAndSecondary() && self.s.disconnectHandler) { self.s.disconnectHandler.execute(); } else if (self.s.replicaSetState.hasPrimary() && self.s.disconnectHandler) { self.s.disconnectHandler.execute({ executePrimary: true }); } else if (self.s.replicaSetState.hasSecondary() && self.s.disconnectHandler) { self.s.disconnectHandler.execute({ executeSecondary: true }); } } function connectNewServers(self, servers, callback) { // Count lefts var count = servers.length; var error = null; // Handle events var _handleEvent = function(self, event) { return function(err) { var _self = this; count = count - 1; // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { return this.destroy({ force: true }); } if (event === 'connect') { // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { return _self.destroy({ force: true }); } // Update the state var result = self.s.replicaSetState.update(_self); // Update the state with the new server if (result) { // Primary lastIsMaster store it if (_self.lastIsMaster() && _self.lastIsMaster().ismaster) { self.ismaster = _self.lastIsMaster(); } // Remove the handlers for (let i = 0; i < handlers.length; i++) { _self.removeAllListeners(handlers[i]); } // Add stable state handlers _self.on('error', handleEvent(self, 'error')); _self.on('close', handleEvent(self, 'close')); _self.on('timeout', handleEvent(self, 'timeout')); _self.on('parseError', handleEvent(self, 'parseError')); // Enalbe the monitoring of the new server monitorServer(_self.lastIsMaster().me, self, {}); // Rexecute any stalled operation rexecuteOperations(self); } else { _self.destroy({ force: true }); } } else if (event === 'error') { error = err; } // Rexecute any stalled operation rexecuteOperations(self); // Are we done finish up callback if (count === 0) { callback(error); } }; }; // No new servers if (count === 0) return callback(); // Execute method function execute(_server, i) { setTimeout(function() { // Destroyed if (self.state === DESTROYED || self.state === UNREFERENCED) { return; } // Create a new server instance var server = new Server( Object.assign({}, self.s.options, { host: _server.split(':')[0], port: parseInt(_server.split(':')[1], 10), reconnect: false, monitoring: false, parent: self, clientInfo: clone(self.s.clientInfo) }) ); // Add temp handlers server.once('connect', _handleEvent(self, 'connect')); server.once('close', _handleEvent(self, 'close')); server.once('timeout', _handleEvent(self, 'timeout')); server.once('error', _handleEvent(self, 'error')); server.once('parseError', _handleEvent(self, 'parseError')); // SDAM Monitoring events server.on('serverOpening', e => self.emit('serverOpening', e)); server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); server.on('serverClosed', e => self.emit('serverClosed', e)); // Command Monitoring events relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); server.connect(self.s.connectOptions); }, i); } // Create new instances for (var i = 0; i < servers.length; i++) { execute(servers[i], i); } } // Ping the server var pingServer = function(self, server, cb) { // Measure running time var start = new Date().getTime(); // Emit the server heartbeat start emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: server.name }); // Execute ismaster // Set the socketTimeout for a monitoring message to a low number // Ensuring ismaster calls are timed out quickly server.command( 'admin.$cmd', { ismaster: true }, { monitoring: true, socketTimeout: self.s.options.connectionTimeout || 2000 }, function(err, r) { if (self.state === DESTROYED || self.state === UNREFERENCED) { server.destroy({ force: true }); return cb(err, r); } // Calculate latency var latencyMS = new Date().getTime() - start; // Set the last updatedTime var hrTime = process.hrtime(); // Calculate the last update time server.lastUpdateTime = hrTime[0] * 1000 + Math.round(hrTime[1] / 1000); // We had an error, remove it from the state if (err) { // Emit the server heartbeat failure emitSDAMEvent(self, 'serverHeartbeatFailed', { durationMS: latencyMS, failure: err, connectionId: server.name }); // Remove server from the state self.s.replicaSetState.remove(server); } else { // Update the server ismaster server.ismaster = r.result; // Check if we have a lastWriteDate convert it to MS // and store on the server instance for later use if (server.ismaster.lastWrite && server.ismaster.lastWrite.lastWriteDate) { server.lastWriteDate = server.ismaster.lastWrite.lastWriteDate.getTime(); } // Do we have a brand new server if (server.lastIsMasterMS === -1) { server.lastIsMasterMS = latencyMS; } else if (server.lastIsMasterMS) { // After the first measurement, average RTT MUST be computed using an // exponentially-weighted moving average formula, with a weighting factor (alpha) of 0.2. // If the prior average is denoted old_rtt, then the new average (new_rtt) is // computed from a new RTT measurement (x) using the following formula: // alpha = 0.2 // new_rtt = alpha * x + (1 - alpha) * old_rtt server.lastIsMasterMS = 0.2 * latencyMS + (1 - 0.2) * server.lastIsMasterMS; } if (self.s.replicaSetState.update(server)) { // Primary lastIsMaster store it if (server.lastIsMaster() && server.lastIsMaster().ismaster) { self.ismaster = server.lastIsMaster(); } } // Server heart beat event emitSDAMEvent(self, 'serverHeartbeatSucceeded', { durationMS: latencyMS, reply: r.result, connectionId: server.name }); } // Calculate the staleness for this server self.s.replicaSetState.updateServerMaxStaleness(server, self.s.haInterval); // Callback cb(err, r); } ); }; // Each server is monitored in parallel in their own timeout loop var monitorServer = function(host, self, options) { // If this is not the initial scan // Is this server already being monitoried, then skip monitoring if (!options.haInterval) { for (var i = 0; i < self.intervalIds.length; i++) { if (self.intervalIds[i].__host === host) { return; } } } // Get the haInterval var _process = options.haInterval ? Timeout : Interval; var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; // Create the interval var intervalId = new _process(function() { if (self.state === DESTROYED || self.state === UNREFERENCED) { // clearInterval(intervalId); intervalId.stop(); return; } // Do we already have server connection available for this host var _server = self.s.replicaSetState.get(host); // Check if we have a known server connection and reuse if (_server) { // Ping the server return pingServer(self, _server, function(err) { if (err) { // NOTE: should something happen here? return; } if (self.state === DESTROYED || self.state === UNREFERENCED) { intervalId.stop(); return; } // Filter out all called intervaliIds self.intervalIds = self.intervalIds.filter(function(intervalId) { return intervalId.isRunning(); }); // Initial sweep if (_process === Timeout) { if ( self.state === CONNECTING && ((self.s.replicaSetState.hasSecondary() && self.s.options.secondaryOnlyConnectionAllowed) || self.s.replicaSetState.hasPrimary()) ) { self.state = CONNECTED; // Emit connected sign process.nextTick(function() { self.emit('connect', self); }); // Start topology interval check topologyMonitor(self, {}); } } else { if ( self.state === DISCONNECTED && ((self.s.replicaSetState.hasSecondary() && self.s.options.secondaryOnlyConnectionAllowed) || self.s.replicaSetState.hasPrimary()) ) { self.state = CONNECTED; // Rexecute any stalled operation rexecuteOperations(self); // Emit connected sign process.nextTick(function() { self.emit('reconnect', self); }); } } if ( self.initialConnectState.connect && !self.initialConnectState.fullsetup && self.s.replicaSetState.hasPrimaryAndSecondary() ) { // Set initial connect state self.initialConnectState.fullsetup = true; self.initialConnectState.all = true; process.nextTick(function() { self.emit('fullsetup', self); self.emit('all', self); }); } }); } }, _haInterval); // Start the interval intervalId.start(); // Add the intervalId host name intervalId.__host = host; // Add the intervalId to our list of intervalIds self.intervalIds.push(intervalId); }; function topologyMonitor(self, options) { if (self.state === DESTROYED || self.state === UNREFERENCED) return; options = options || {}; // Get the servers var servers = Object.keys(self.s.replicaSetState.set); // Get the haInterval var _process = options.haInterval ? Timeout : Interval; var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; if (_process === Timeout) { return connectNewServers(self, self.s.replicaSetState.unknownServers, function(err) { // Don't emit errors if the connection was already if (self.state === DESTROYED || self.state === UNREFERENCED) { return; } if (!self.s.replicaSetState.hasPrimary() && !self.s.options.secondaryOnlyConnectionAllowed) { if (err) { return self.emit('error', err); } self.emit( 'error', new MongoError('no primary found in replicaset or invalid replica set name') ); return self.destroy({ force: true }); } else if ( !self.s.replicaSetState.hasSecondary() && self.s.options.secondaryOnlyConnectionAllowed ) { if (err) { return self.emit('error', err); } self.emit( 'error', new MongoError('no secondary found in replicaset or invalid replica set name') ); return self.destroy({ force: true }); } for (var i = 0; i < servers.length; i++) { monitorServer(servers[i], self, options); } }); } else { for (var i = 0; i < servers.length; i++) { monitorServer(servers[i], self, options); } } // Run the reconnect process function executeReconnect(self) { return function() { if (self.state === DESTROYED || self.state === UNREFERENCED) { return; } connectNewServers(self, self.s.replicaSetState.unknownServers, function() { var monitoringFrequencey = self.s.replicaSetState.hasPrimary() ? _haInterval : self.s.minHeartbeatFrequencyMS; // Create a timeout self.intervalIds.push(new Timeout(executeReconnect(self), monitoringFrequencey).start()); }); }; } // Decide what kind of interval to use var intervalTime = !self.s.replicaSetState.hasPrimary() ? self.s.minHeartbeatFrequencyMS : _haInterval; self.intervalIds.push(new Timeout(executeReconnect(self), intervalTime).start()); } function addServerToList(list, server) { for (var i = 0; i < list.length; i++) { if (list[i].name.toLowerCase() === server.name.toLowerCase()) return true; } list.push(server); } function handleEvent(self, event) { return function() { if (self.state === DESTROYED || self.state === UNREFERENCED) return; // Debug log if (self.s.logger.isDebug()) { self.s.logger.debug( f('handleEvent %s from server %s in replset with id %s', event, this.name, self.id) ); } // Remove from the replicaset state self.s.replicaSetState.remove(this); // Are we in a destroyed state return if (self.state === DESTROYED || self.state === UNREFERENCED) return; // If no primary and secondary available if ( !self.s.replicaSetState.hasPrimary() && !self.s.replicaSetState.hasSecondary() && self.s.options.secondaryOnlyConnectionAllowed ) { stateTransition(self, DISCONNECTED); } else if (!self.s.replicaSetState.hasPrimary()) { stateTransition(self, DISCONNECTED); } addServerToList(self.s.connectingServers, this); }; } function shouldTriggerConnect(self) { const isConnecting = self.state === CONNECTING; const hasPrimary = self.s.replicaSetState.hasPrimary(); const hasSecondary = self.s.replicaSetState.hasSecondary(); const secondaryOnlyConnectionAllowed = self.s.options.secondaryOnlyConnectionAllowed; const readPreferenceSecondary = self.s.connectOptions.readPreference && self.s.connectOptions.readPreference.equals(ReadPreference.secondary); return ( (isConnecting && ((readPreferenceSecondary && hasSecondary) || (!readPreferenceSecondary && hasPrimary))) || (hasSecondary && secondaryOnlyConnectionAllowed) ); } function handleInitialConnectEvent(self, event) { return function() { var _this = this; // Debug log if (self.s.logger.isDebug()) { self.s.logger.debug( f( 'handleInitialConnectEvent %s from server %s in replset with id %s', event, this.name, self.id ) ); } // Destroy the instance if (self.state === DESTROYED || self.state === UNREFERENCED) { return this.destroy({ force: true }); } // Check the type of server if (event === 'connect') { // Update the state var result = self.s.replicaSetState.update(_this); if (result === true) { // Primary lastIsMaster store it if (_this.lastIsMaster() && _this.lastIsMaster().ismaster) { self.ismaster = _this.lastIsMaster(); } // Debug log if (self.s.logger.isDebug()) { self.s.logger.debug( f( 'handleInitialConnectEvent %s from server %s in replset with id %s has state [%s]', event, _this.name, self.id, JSON.stringify(self.s.replicaSetState.set) ) ); } // Remove the handlers for (let i = 0; i < handlers.length; i++) { _this.removeAllListeners(handlers[i]); } // Add stable state handlers _this.on('error', handleEvent(self, 'error')); _this.on('close', handleEvent(self, 'close')); _this.on('timeout', handleEvent(self, 'timeout')); _this.on('parseError', handleEvent(self, 'parseError')); // Do we have a primary or primaryAndSecondary if (shouldTriggerConnect(self)) { // We are connected self.state = CONNECTED; // Set initial connect state self.initialConnectState.connect = true; // Emit connect event process.nextTick(function() { self.emit('connect', self); }); topologyMonitor(self, {}); } } else if (result instanceof MongoError) { _this.destroy({ force: true }); self.destroy({ force: true }); return self.emit('error', result); } else { _this.destroy({ force: true }); } } else { // Emit failure to connect self.emit('failed', this); addServerToList(self.s.connectingServers, this); // Remove from the state self.s.replicaSetState.remove(this); } if ( self.initialConnectState.connect && !self.initialConnectState.fullsetup && self.s.replicaSetState.hasPrimaryAndSecondary() ) { // Set initial connect state self.initialConnectState.fullsetup = true; self.initialConnectState.all = true; process.nextTick(function() { self.emit('fullsetup', self); self.emit('all', self); }); } // Remove from the list from connectingServers for (var i = 0; i < self.s.connectingServers.length; i++) { if (self.s.connectingServers[i].equals(this)) { self.s.connectingServers.splice(i, 1); } } // Trigger topologyMonitor if (self.s.connectingServers.length === 0 && self.state === CONNECTING) { topologyMonitor(self, { haInterval: 1 }); } }; } function connectServers(self, servers) { // Update connectingServers self.s.connectingServers = self.s.connectingServers.concat(servers); // Index used to interleaf the server connects, avoiding // runtime issues on io constrained vm's var timeoutInterval = 0; function connect(server, timeoutInterval) { setTimeout(function() { // Add the server to the state if (self.s.replicaSetState.update(server)) { // Primary lastIsMaster store it if (server.lastIsMaster() && server.lastIsMaster().ismaster) { self.ismaster = server.lastIsMaster(); } } // Add event handlers server.once('close', handleInitialConnectEvent(self, 'close')); server.once('timeout', handleInitialConnectEvent(self, 'timeout')); server.once('parseError', handleInitialConnectEvent(self, 'parseError')); server.once('error', handleInitialConnectEvent(self, 'error')); server.once('connect', handleInitialConnectEvent(self, 'connect')); // SDAM Monitoring events server.on('serverOpening', e => self.emit('serverOpening', e)); server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); server.on('serverClosed', e => self.emit('serverClosed', e)); // Command Monitoring events relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); // Start connection server.connect(self.s.connectOptions); }, timeoutInterval); } // Start all the servers while (servers.length > 0) { connect(servers.shift(), timeoutInterval++); } } /** * Emit event if it exists * @method */ function emitSDAMEvent(self, event, description) { if (self.listeners(event).length > 0) { self.emit(event, description); } } /** * Initiate server connect */ ReplSet.prototype.connect = function(options) { var self = this; // Add any connect level options to the internal state this.s.connectOptions = options || {}; // Set connecting state stateTransition(this, CONNECTING); // Create server instances var servers = this.s.seedlist.map(function(x) { return new Server( Object.assign({}, self.s.options, x, options, { reconnect: false, monitoring: false, parent: self, clientInfo: clone(self.s.clientInfo) }) ); }); // Error out as high availbility interval must be < than socketTimeout if ( this.s.options.socketTimeout > 0 && this.s.options.socketTimeout <= this.s.options.haInterval ) { return self.emit( 'error', new MongoError( f( 'haInterval [%s] MS must be set to less than socketTimeout [%s] MS', this.s.options.haInterval, this.s.options.socketTimeout ) ) ); } // Emit the topology opening event emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); // Start all server connections connectServers(self, servers); }; /** * Authenticate the topology. * @method * @param {MongoCredentials} credentials The credentials for authentication we are using * @param {authResultCallback} callback A callback function */ ReplSet.prototype.auth = function(credentials, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Destroy the server connection * @param {boolean} [options.force=false] Force destroy the pool * @method */ ReplSet.prototype.destroy = function(options, callback) { options = options || {}; let destroyCount = this.s.connectingServers.length + 1; // +1 for the callback from `replicaSetState.destroy` const serverDestroyed = () => { destroyCount--; if (destroyCount > 0) { return; } // Emit toplogy closing event emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); // Transition state stateTransition(this, DESTROYED); if (typeof callback === 'function') { callback(null, null); } }; // Clear out any monitoring process if (this.haTimeoutId) clearTimeout(this.haTimeoutId); // Clear out all monitoring for (var i = 0; i < this.intervalIds.length; i++) { this.intervalIds[i].stop(); } // Reset list of intervalIds this.intervalIds = []; if (destroyCount === 0) { serverDestroyed(); return; } // Destroy the replicaset this.s.replicaSetState.destroy(options, serverDestroyed); // Destroy all connecting servers this.s.connectingServers.forEach(function(x) { x.destroy(options, serverDestroyed); }); }; /** * Unref all connections belong to this server * @method */ ReplSet.prototype.unref = function() { // Transition state stateTransition(this, UNREFERENCED); this.s.replicaSetState.allServers().forEach(function(x) { x.unref(); }); clearTimeout(this.haTimeoutId); }; /** * Returns the last known ismaster document for this server * @method * @return {object} */ ReplSet.prototype.lastIsMaster = function() { // If secondaryOnlyConnectionAllowed and no primary but secondary // return the secondaries ismaster result. if ( this.s.options.secondaryOnlyConnectionAllowed && !this.s.replicaSetState.hasPrimary() && this.s.replicaSetState.hasSecondary() ) { return this.s.replicaSetState.secondaries[0].lastIsMaster(); } return this.s.replicaSetState.primary ? this.s.replicaSetState.primary.lastIsMaster() : this.ismaster; }; /** * All raw connections * @method * @return {Connection[]} */ ReplSet.prototype.connections = function() { var servers = this.s.replicaSetState.allServers(); var connections = []; for (var i = 0; i < servers.length; i++) { connections = connections.concat(servers[i].connections()); } return connections; }; /** * Figure out if the server is connected * @method * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @return {boolean} */ ReplSet.prototype.isConnected = function(options) { options = options || {}; // If we specified a read preference check if we are connected to something // than can satisfy this if (options.readPreference && options.readPreference.equals(ReadPreference.secondary)) { return this.s.replicaSetState.hasSecondary(); } if (options.readPreference && options.readPreference.equals(ReadPreference.primary)) { return this.s.replicaSetState.hasPrimary(); } if (options.readPreference && options.readPreference.equals(ReadPreference.primaryPreferred)) { return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); } if (options.readPreference && options.readPreference.equals(ReadPreference.secondaryPreferred)) { return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); } if (this.s.options.secondaryOnlyConnectionAllowed && this.s.replicaSetState.hasSecondary()) { return true; } return this.s.replicaSetState.hasPrimary(); }; /** * Figure out if the replicaset instance was destroyed by calling destroy * @method * @return {boolean} */ ReplSet.prototype.isDestroyed = function() { return this.state === DESTROYED; }; /** * Selects a server * * @method * @param {function} selector Unused * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {ClientSession} [options.session] Unused * @param {function} callback */ ReplSet.prototype.selectServer = function(selector, options, callback) { if (typeof selector === 'function' && typeof callback === 'undefined') (callback = selector), (selector = undefined), (options = {}); if (typeof options === 'function') (callback = options), (options = selector), (selector = undefined); options = options || {}; const server = this.s.replicaSetState.pickServer(options.readPreference); if (this.s.debug) this.emit('pickedServer', options.readPreference, server); callback(null, server); }; /** * Get all connected servers * @method * @return {Server[]} */ ReplSet.prototype.getServers = function() { return this.s.replicaSetState.allServers(); }; // // Execute write operation function executeWriteOperation(args, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // TODO: once we drop Node 4, use destructuring either here or in arguments. const self = args.self; const op = args.op; const ns = args.ns; const ops = args.ops; if (self.state === DESTROYED) { return callback(new MongoError(f('topology was destroyed'))); } const willRetryWrite = !args.retrying && !!options.retryWrites && options.session && isRetryableWritesSupported(self) && !options.session.inTransaction(); if (!self.s.replicaSetState.hasPrimary()) { if (self.s.disconnectHandler) { // Not connected but we have a disconnecthandler return self.s.disconnectHandler.add(op, ns, ops, options, callback); } else if (!willRetryWrite) { // No server returned we had an error return callback(new MongoError('no primary server found')); } } const handler = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err)) { return callback(err); } if (willRetryWrite) { const newArgs = Object.assign({}, args, { retrying: true }); return executeWriteOperation(newArgs, options, callback); } // Per SDAM, remove primary from replicaset if (self.s.replicaSetState.primary) { self.s.replicaSetState.remove(self.s.replicaSetState.primary, { force: true }); } return callback(err); }; if (callback.operationId) { handler.operationId = callback.operationId; } // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } self.s.replicaSetState.primary[op](ns, ops, options, handler); } /** * Insert one or more documents * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of documents to insert * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ ReplSet.prototype.insert = function(ns, ops, options, callback) { // Execute write operation executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); }; /** * Perform one or more update operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of updates * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ ReplSet.prototype.update = function(ns, ops, options, callback) { // Execute write operation executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); }; /** * Perform one or more remove operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of removes * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {boolean} [options.retryWrites] Enable retryable writes for this operation * @param {opResultCallback} callback A callback function */ ReplSet.prototype.remove = function(ns, ops, options, callback) { // Execute write operation executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); }; const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; function isWriteCommand(command) { return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); } /** * Execute a command * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Connection} [options.connection] Specify connection object to execute command against * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ ReplSet.prototype.command = function(ns, cmd, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); var self = this; // Establish readPreference var readPreference = options.readPreference ? options.readPreference : ReadPreference.primary; // If the readPreference is primary and we have no primary, store it if ( readPreference.preference === 'primary' && !this.s.replicaSetState.hasPrimary() && this.s.disconnectHandler != null ) { return this.s.disconnectHandler.add('command', ns, cmd, options, callback); } else if ( readPreference.preference === 'secondary' && !this.s.replicaSetState.hasSecondary() && this.s.disconnectHandler != null ) { return this.s.disconnectHandler.add('command', ns, cmd, options, callback); } else if ( readPreference.preference !== 'primary' && !this.s.replicaSetState.hasSecondary() && !this.s.replicaSetState.hasPrimary() && this.s.disconnectHandler != null ) { return this.s.disconnectHandler.add('command', ns, cmd, options, callback); } // Pick a server var server = this.s.replicaSetState.pickServer(readPreference); // We received an error, return it if (!(server instanceof Server)) return callback(server); // Emit debug event if (self.s.debug) self.emit('pickedServer', ReadPreference.primary, server); // No server returned we had an error if (server == null) { return callback( new MongoError( f('no server found that matches the provided readPreference %s', readPreference) ) ); } const willRetryWrite = !options.retrying && !!options.retryWrites && options.session && isRetryableWritesSupported(self) && !options.session.inTransaction() && isWriteCommand(cmd); const cb = (err, result) => { if (!err) return callback(null, result); if (!isRetryableError(err)) { return callback(err); } if (willRetryWrite) { const newOptions = Object.assign({}, options, { retrying: true }); return this.command(ns, cmd, newOptions, callback); } // Per SDAM, remove primary from replicaset if (this.s.replicaSetState.primary) { this.s.replicaSetState.remove(this.s.replicaSetState.primary, { force: true }); } return callback(err); }; // increment and assign txnNumber if (willRetryWrite) { options.session.incrementTransactionNumber(); options.willRetryWrite = willRetryWrite; } // Execute the command server.command(ns, cmd, options, cb); }; /** * Get a new cursor * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId * @param {object} [options] Options for the cursor * @param {object} [options.batchSize=0] Batchsize for the operation * @param {array} [options.documents=[]] Initial documents list for cursor * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {object} [options.topology] The internal topology of the created cursor * @returns {Cursor} */ ReplSet.prototype.cursor = function(ns, cmd, options) { options = options || {}; const topology = options.topology || this; // Set up final cursor type var FinalCursor = options.cursorFactory || this.s.Cursor; // Return the cursor return new FinalCursor(this.s.bson, ns, cmd, options, topology, this.s.options); }; /** * A replset connect event, used to verify that the connection is up and running * * @event ReplSet#connect * @type {ReplSet} */ /** * A replset reconnect event, used to verify that the topology reconnected * * @event ReplSet#reconnect * @type {ReplSet} */ /** * A replset fullsetup event, used to signal that all topology members have been contacted. * * @event ReplSet#fullsetup * @type {ReplSet} */ /** * A replset all event, used to signal that all topology members have been contacted. * * @event ReplSet#all * @type {ReplSet} */ /** * A replset failed event, used to signal that initial replset connection failed. * * @event ReplSet#failed * @type {ReplSet} */ /** * A server member left the replicaset * * @event ReplSet#left * @type {function} * @param {string} type The type of member that left (primary|secondary|arbiter) * @param {Server} server The server object that left */ /** * A server member joined the replicaset * * @event ReplSet#joined * @type {function} * @param {string} type The type of member that joined (primary|secondary|arbiter) * @param {Server} server The server object that joined */ /** * A server opening SDAM monitoring event * * @event ReplSet#serverOpening * @type {object} */ /** * A server closed SDAM monitoring event * * @event ReplSet#serverClosed * @type {object} */ /** * A server description SDAM change monitoring event * * @event ReplSet#serverDescriptionChanged * @type {object} */ /** * A topology open SDAM event * * @event ReplSet#topologyOpening * @type {object} */ /** * A topology closed SDAM event * * @event ReplSet#topologyClosed * @type {object} */ /** * A topology structure SDAM change event * * @event ReplSet#topologyDescriptionChanged * @type {object} */ /** * A topology serverHeartbeatStarted SDAM event * * @event ReplSet#serverHeartbeatStarted * @type {object} */ /** * A topology serverHeartbeatFailed SDAM event * * @event ReplSet#serverHeartbeatFailed * @type {object} */ /** * A topology serverHeartbeatSucceeded SDAM change event * * @event ReplSet#serverHeartbeatSucceeded * @type {object} */ /** * An event emitted indicating a command was started, if command monitoring is enabled * * @event ReplSet#commandStarted * @type {object} */ /** * An event emitted indicating a command succeeded, if command monitoring is enabled * * @event ReplSet#commandSucceeded * @type {object} */ /** * An event emitted indicating a command failed, if command monitoring is enabled * * @event ReplSet#commandFailed * @type {object} */ module.exports = ReplSet; package/lib/topologies/server.js000644 0000072250 3560116604 014064 0ustar00000000 000000 'use strict'; var inherits = require('util').inherits, f = require('util').format, EventEmitter = require('events').EventEmitter, ReadPreference = require('./read_preference'), Logger = require('../connection/logger'), debugOptions = require('../connection/utils').debugOptions, retrieveBSON = require('../connection/utils').retrieveBSON, Pool = require('../connection/pool'), MongoError = require('../error').MongoError, MongoNetworkError = require('../error').MongoNetworkError, wireProtocol = require('../wireprotocol'), BasicCursor = require('../cursor'), sdam = require('./shared'), createClientInfo = require('./shared').createClientInfo, createCompressionInfo = require('./shared').createCompressionInfo, resolveClusterTime = require('./shared').resolveClusterTime, SessionMixins = require('./shared').SessionMixins, relayEvents = require('../utils').relayEvents; const collationNotSupported = require('../utils').collationNotSupported; // Used for filtering out fields for loggin var debugFields = [ 'reconnect', 'reconnectTries', 'reconnectInterval', 'emitError', 'cursorFactory', 'host', 'port', 'size', 'keepAlive', 'keepAliveInitialDelay', 'noDelay', 'connectionTimeout', 'checkServerIdentity', 'socketTimeout', 'ssl', 'ca', 'crl', 'cert', 'key', 'rejectUnauthorized', 'promoteLongs', 'promoteValues', 'promoteBuffers', 'servername' ]; // Server instance id var id = 0; var serverAccounting = false; var servers = {}; var BSON = retrieveBSON(); /** * Creates a new Server instance * @class * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {number} [options.monitoring=true] Enable the server state monitoring (calling ismaster at monitoringInterval) * @param {number} [options.monitoringInterval=5000] The interval of calling ismaster when monitoring is enabled. * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors * @param {string} options.host The server host * @param {number} options.port The server port * @param {number} [options.size=5] Server connection pool size * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting * @param {boolean} [options.ssl=false] Use SSL for connection * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. * @param {Buffer} [options.ca] SSL Certificate store binary buffer * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer * @param {Buffer} [options.cert] SSL Certificate binary buffer * @param {Buffer} [options.key] SSL Key file binary buffer * @param {string} [options.passphrase] SSL Certificate pass phrase * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. * @param {string} [options.appname=null] Application name, passed in on ismaster call and logged in mongod server logs. Maximum size 128 bytes. * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology * @return {Server} A cursor instance * @fires Server#connect * @fires Server#close * @fires Server#error * @fires Server#timeout * @fires Server#parseError * @fires Server#reconnect * @fires Server#reconnectFailed * @fires Server#serverHeartbeatStarted * @fires Server#serverHeartbeatSucceeded * @fires Server#serverHeartbeatFailed * @fires Server#topologyOpening * @fires Server#topologyClosed * @fires Server#topologyDescriptionChanged * @property {string} type the topology type. * @property {string} parserType the parser type used (c++ or js). */ var Server = function(options) { options = options || {}; // Add event listener EventEmitter.call(this); // Server instance id this.id = id++; // Internal state this.s = { // Options options: options, // Logger logger: Logger('Server', options), // Factory overrides Cursor: options.cursorFactory || BasicCursor, // BSON instance bson: options.bson || new BSON([ BSON.Binary, BSON.Code, BSON.DBRef, BSON.Decimal128, BSON.Double, BSON.Int32, BSON.Long, BSON.Map, BSON.MaxKey, BSON.MinKey, BSON.ObjectId, BSON.BSONRegExp, BSON.Symbol, BSON.Timestamp ]), // Pool pool: null, // Disconnect handler disconnectHandler: options.disconnectHandler, // Monitor thread (keeps the connection alive) monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : true, // Is the server in a topology inTopology: !!options.parent, // Monitoring timeout monitoringInterval: typeof options.monitoringInterval === 'number' ? options.monitoringInterval : 5000, // Topology id topologyId: -1, compression: { compressors: createCompressionInfo(options) }, // Optional parent topology parent: options.parent }; // If this is a single deployment we need to track the clusterTime here if (!this.s.parent) { this.s.clusterTime = null; } // Curent ismaster this.ismaster = null; // Current ping time this.lastIsMasterMS = -1; // The monitoringProcessId this.monitoringProcessId = null; // Initial connection this.initialConnect = true; // Default type this._type = 'server'; // Set the client info this.clientInfo = createClientInfo(options); // Max Stalleness values // last time we updated the ismaster state this.lastUpdateTime = 0; // Last write time this.lastWriteDate = 0; // Stalleness this.staleness = 0; }; inherits(Server, EventEmitter); Object.assign(Server.prototype, SessionMixins); Object.defineProperty(Server.prototype, 'type', { enumerable: true, get: function() { return this._type; } }); Object.defineProperty(Server.prototype, 'parserType', { enumerable: true, get: function() { return BSON.native ? 'c++' : 'js'; } }); Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', { enumerable: true, get: function() { if (!this.ismaster) return null; return this.ismaster.logicalSessionTimeoutMinutes || null; } }); // In single server deployments we track the clusterTime directly on the topology, however // in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the // tracking objects so we can ensure we are gossiping the maximum time received from the // server. Object.defineProperty(Server.prototype, 'clusterTime', { enumerable: true, set: function(clusterTime) { const settings = this.s.parent ? this.s.parent : this.s; resolveClusterTime(settings, clusterTime); }, get: function() { const settings = this.s.parent ? this.s.parent : this.s; return settings.clusterTime || null; } }); Server.enableServerAccounting = function() { serverAccounting = true; servers = {}; }; Server.disableServerAccounting = function() { serverAccounting = false; }; Server.servers = function() { return servers; }; Object.defineProperty(Server.prototype, 'name', { enumerable: true, get: function() { return this.s.options.host + ':' + this.s.options.port; } }); function disconnectHandler(self, type, ns, cmd, options, callback) { // Topology is not connected, save the call in the provided store to be // Executed at some point when the handler deems it's reconnected if ( !self.s.pool.isConnected() && self.s.options.reconnect && self.s.disconnectHandler != null && !options.monitoring ) { self.s.disconnectHandler.add(type, ns, cmd, options, callback); return true; } // If we have no connection error if (!self.s.pool.isConnected()) { callback(new MongoError(f('no connection available to server %s', self.name))); return true; } } function monitoringProcess(self) { return function() { // Pool was destroyed do not continue process if (self.s.pool.isDestroyed()) return; // Emit monitoring Process event self.emit('monitoring', self); // Perform ismaster call // Get start time var start = new Date().getTime(); // Execute the ismaster query self.command( 'admin.$cmd', { ismaster: true }, { socketTimeout: typeof self.s.options.connectionTimeout !== 'number' ? 2000 : self.s.options.connectionTimeout, monitoring: true }, (err, result) => { // Set initial lastIsMasterMS self.lastIsMasterMS = new Date().getTime() - start; if (self.s.pool.isDestroyed()) return; // Update the ismaster view if we have a result if (result) { self.ismaster = result.result; } // Re-schedule the monitoring process self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); } ); }; } var eventHandler = function(self, event) { return function(err, conn) { // Log information of received information if in info mode if (self.s.logger.isInfo()) { var object = err instanceof MongoError ? JSON.stringify(err) : {}; self.s.logger.info( f('server %s fired event %s out with message %s', self.name, event, object) ); } // Handle connect event if (event === 'connect') { self.initialConnect = false; self.ismaster = conn.ismaster; self.lastIsMasterMS = conn.lastIsMasterMS; if (conn.agreedCompressor) { self.s.pool.options.agreedCompressor = conn.agreedCompressor; } if (conn.zlibCompressionLevel) { self.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; } if (conn.ismaster.$clusterTime) { const $clusterTime = conn.ismaster.$clusterTime; self.clusterTime = $clusterTime; } // It's a proxy change the type so // the wireprotocol will send $readPreference if (self.ismaster.msg === 'isdbgrid') { self._type = 'mongos'; } // Have we defined self monitoring if (self.s.monitoring) { self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); } // Emit server description changed if something listening sdam.emitServerDescriptionChanged(self, { address: self.name, arbiters: [], hosts: [], passives: [], type: sdam.getTopologyType(self) }); if (!self.s.inTopology) { // Emit topology description changed if something listening sdam.emitTopologyDescriptionChanged(self, { topologyType: 'Single', servers: [ { address: self.name, arbiters: [], hosts: [], passives: [], type: sdam.getTopologyType(self) } ] }); } // Log the ismaster if available if (self.s.logger.isInfo()) { self.s.logger.info( f('server %s connected with ismaster [%s]', self.name, JSON.stringify(self.ismaster)) ); } // Emit connect self.emit('connect', self); } else if ( event === 'error' || event === 'parseError' || event === 'close' || event === 'timeout' || event === 'reconnect' || event === 'attemptReconnect' || 'reconnectFailed' ) { // Remove server instance from accounting if ( serverAccounting && ['close', 'timeout', 'error', 'parseError', 'reconnectFailed'].indexOf(event) !== -1 ) { // Emit toplogy opening event if not in topology if (!self.s.inTopology) { self.emit('topologyOpening', { topologyId: self.id }); } delete servers[self.id]; } if (event === 'close') { // Closing emits a server description changed event going to unknown. sdam.emitServerDescriptionChanged(self, { address: self.name, arbiters: [], hosts: [], passives: [], type: 'Unknown' }); } // Reconnect failed return error if (event === 'reconnectFailed') { self.emit('reconnectFailed', err); // Emit error if any listeners if (self.listeners('error').length > 0) { self.emit('error', err); } // Terminate return; } // On first connect fail if ( ['disconnected', 'connecting'].indexOf(self.s.pool.state) !== -1 && self.initialConnect && ['close', 'timeout', 'error', 'parseError'].indexOf(event) !== -1 ) { self.initialConnect = false; return self.emit( 'error', new MongoNetworkError( f('failed to connect to server [%s] on first connect [%s]', self.name, err) ) ); } // Reconnect event, emit the server if (event === 'reconnect') { // Reconnecting emits a server description changed event going from unknown to the // current server type. sdam.emitServerDescriptionChanged(self, { address: self.name, arbiters: [], hosts: [], passives: [], type: sdam.getTopologyType(self) }); return self.emit(event, self); } // Emit the event self.emit(event, err); } }; }; /** * Initiate server connect */ Server.prototype.connect = function(options) { var self = this; options = options || {}; // Set the connections if (serverAccounting) servers[this.id] = this; // Do not allow connect to be called on anything that's not disconnected if (self.s.pool && !self.s.pool.isDisconnected() && !self.s.pool.isDestroyed()) { throw new MongoError(f('server instance in invalid state %s', self.s.pool.state)); } // Create a pool self.s.pool = new Pool(this, Object.assign(self.s.options, options, { bson: this.s.bson })); // Set up listeners self.s.pool.on('close', eventHandler(self, 'close')); self.s.pool.on('error', eventHandler(self, 'error')); self.s.pool.on('timeout', eventHandler(self, 'timeout')); self.s.pool.on('parseError', eventHandler(self, 'parseError')); self.s.pool.on('connect', eventHandler(self, 'connect')); self.s.pool.on('reconnect', eventHandler(self, 'reconnect')); self.s.pool.on('reconnectFailed', eventHandler(self, 'reconnectFailed')); // Set up listeners for command monitoring relayEvents(self.s.pool, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); // Emit toplogy opening event if not in topology if (!self.s.inTopology) { this.emit('topologyOpening', { topologyId: self.id }); } // Emit opening server event self.emit('serverOpening', { topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id, address: self.name }); self.s.pool.connect(); }; /** * Authenticate the topology. * @method * @param {MongoCredentials} credentials The credentials for authentication we are using * @param {authResultCallback} callback A callback function */ Server.prototype.auth = function(credentials, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Get the server description * @method * @return {object} */ Server.prototype.getDescription = function() { var ismaster = this.ismaster || {}; var description = { type: sdam.getTopologyType(this), address: this.name }; // Add fields if available if (ismaster.hosts) description.hosts = ismaster.hosts; if (ismaster.arbiters) description.arbiters = ismaster.arbiters; if (ismaster.passives) description.passives = ismaster.passives; if (ismaster.setName) description.setName = ismaster.setName; return description; }; /** * Returns the last known ismaster document for this server * @method * @return {object} */ Server.prototype.lastIsMaster = function() { return this.ismaster; }; /** * Unref all connections belong to this server * @method */ Server.prototype.unref = function() { this.s.pool.unref(); }; /** * Figure out if the server is connected * @method * @return {boolean} */ Server.prototype.isConnected = function() { if (!this.s.pool) return false; return this.s.pool.isConnected(); }; /** * Figure out if the server instance was destroyed by calling destroy * @method * @return {boolean} */ Server.prototype.isDestroyed = function() { if (!this.s.pool) return false; return this.s.pool.isDestroyed(); }; function basicWriteValidations(self) { if (!self.s.pool) return new MongoError('server instance is not connected'); if (self.s.pool.isDestroyed()) return new MongoError('server instance pool was destroyed'); } function basicReadValidations(self, options) { basicWriteValidations(self, options); if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { throw new Error('readPreference must be an instance of ReadPreference'); } } /** * Execute a command * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object} cmd The command hash * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ Server.prototype.command = function(ns, cmd, options, callback) { var self = this; if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } var result = basicReadValidations(self, options); if (result) return callback(result); // Clone the options options = Object.assign({}, options, { wireProtocolCommand: false }); // Debug log if (self.s.logger.isDebug()) self.s.logger.debug( f( 'executing command [%s] against %s', JSON.stringify({ ns: ns, cmd: cmd, options: debugOptions(debugFields, options) }), self.name ) ); // If we are not connected or have a disconnectHandler specified if (disconnectHandler(self, 'command', ns, cmd, options, callback)) return; // error if collation not supported if (collationNotSupported(this, cmd)) { return callback(new MongoError(`server ${this.name} does not support collation`)); } wireProtocol.command(self, ns, cmd, options, callback); }; /** * Insert one or more documents * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of documents to insert * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ Server.prototype.insert = function(ns, ops, options, callback) { var self = this; if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } var result = basicWriteValidations(self, options); if (result) return callback(result); // If we are not connected or have a disconnectHandler specified if (disconnectHandler(self, 'insert', ns, ops, options, callback)) return; // Setup the docs as an array ops = Array.isArray(ops) ? ops : [ops]; // Execute write return wireProtocol.insert(self, ns, ops, options, callback); }; /** * Perform one or more update operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of updates * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ Server.prototype.update = function(ns, ops, options, callback) { var self = this; if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } var result = basicWriteValidations(self, options); if (result) return callback(result); // If we are not connected or have a disconnectHandler specified if (disconnectHandler(self, 'update', ns, ops, options, callback)) return; // error if collation not supported if (collationNotSupported(this, options)) { return callback(new MongoError(`server ${this.name} does not support collation`)); } // Setup the docs as an array ops = Array.isArray(ops) ? ops : [ops]; // Execute write return wireProtocol.update(self, ns, ops, options, callback); }; /** * Perform one or more remove operations * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {array} ops An array of removes * @param {boolean} [options.ordered=true] Execute in order or out of order * @param {object} [options.writeConcern={}] Write concern for the operation * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {opResultCallback} callback A callback function */ Server.prototype.remove = function(ns, ops, options, callback) { var self = this; if (typeof options === 'function') { (callback = options), (options = {}), (options = options || {}); } var result = basicWriteValidations(self, options); if (result) return callback(result); // If we are not connected or have a disconnectHandler specified if (disconnectHandler(self, 'remove', ns, ops, options, callback)) return; // error if collation not supported if (collationNotSupported(this, options)) { return callback(new MongoError(`server ${this.name} does not support collation`)); } // Setup the docs as an array ops = Array.isArray(ops) ? ops : [ops]; // Execute write return wireProtocol.remove(self, ns, ops, options, callback); }; /** * Get a new cursor * @method * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId * @param {object} [options] Options for the cursor * @param {object} [options.batchSize=0] Batchsize for the operation * @param {array} [options.documents=[]] Initial documents list for cursor * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. * @param {ClientSession} [options.session=null] Session to use for the operation * @param {object} [options.topology] The internal topology of the created cursor * @returns {Cursor} */ Server.prototype.cursor = function(ns, cmd, options) { options = options || {}; const topology = options.topology || this; // Set up final cursor type var FinalCursor = options.cursorFactory || this.s.Cursor; // Return the cursor return new FinalCursor(this.s.bson, ns, cmd, options, topology, this.s.options); }; /** * Compare two server instances * @method * @param {Server} server Server to compare equality against * @return {boolean} */ Server.prototype.equals = function(server) { if (typeof server === 'string') return this.name.toLowerCase() === server.toLowerCase(); if (server.name) return this.name.toLowerCase() === server.name.toLowerCase(); return false; }; /** * All raw connections * @method * @return {Connection[]} */ Server.prototype.connections = function() { return this.s.pool.allConnections(); }; /** * Selects a server * @method * @param {function} selector Unused * @param {ReadPreference} [options.readPreference] Unused * @param {ClientSession} [options.session] Unused * @return {Server} */ Server.prototype.selectServer = function(selector, options, callback) { if (typeof selector === 'function' && typeof callback === 'undefined') (callback = selector), (selector = undefined), (options = {}); if (typeof options === 'function') (callback = options), (options = selector), (selector = undefined); callback(null, this); }; var listeners = ['close', 'error', 'timeout', 'parseError', 'connect']; /** * Destroy the server connection * @method * @param {boolean} [options.emitClose=false] Emit close event on destroy * @param {boolean} [options.emitDestroy=false] Emit destroy event on destroy * @param {boolean} [options.force=false] Force destroy the pool */ Server.prototype.destroy = function(options, callback) { if (this._destroyed) { if (typeof callback === 'function') callback(null, null); return; } options = options || {}; var self = this; // Set the connections if (serverAccounting) delete servers[this.id]; // Destroy the monitoring process if any if (this.monitoringProcessId) { clearTimeout(this.monitoringProcessId); } // No pool, return if (!self.s.pool) { this._destroyed = true; if (typeof callback === 'function') callback(null, null); return; } // Emit close event if (options.emitClose) { self.emit('close', self); } // Emit destroy event if (options.emitDestroy) { self.emit('destroy', self); } // Remove all listeners listeners.forEach(function(event) { self.s.pool.removeAllListeners(event); }); // Emit opening server event if (self.listeners('serverClosed').length > 0) self.emit('serverClosed', { topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id, address: self.name }); // Emit toplogy opening event if not in topology if (self.listeners('topologyClosed').length > 0 && !self.s.inTopology) { self.emit('topologyClosed', { topologyId: self.id }); } if (self.s.logger.isDebug()) { self.s.logger.debug(f('destroy called on server %s', self.name)); } // Destroy the pool this.s.pool.destroy(options.force, callback); this._destroyed = true; }; /** * A server connect event, used to verify that the connection is up and running * * @event Server#connect * @type {Server} */ /** * A server reconnect event, used to verify that the server topology has reconnected * * @event Server#reconnect * @type {Server} */ /** * A server opening SDAM monitoring event * * @event Server#serverOpening * @type {object} */ /** * A server closed SDAM monitoring event * * @event Server#serverClosed * @type {object} */ /** * A server description SDAM change monitoring event * * @event Server#serverDescriptionChanged * @type {object} */ /** * A topology open SDAM event * * @event Server#topologyOpening * @type {object} */ /** * A topology closed SDAM event * * @event Server#topologyClosed * @type {object} */ /** * A topology structure SDAM change event * * @event Server#topologyDescriptionChanged * @type {object} */ /** * Server reconnect failed * * @event Server#reconnectFailed * @type {Error} */ /** * Server connection pool closed * * @event Server#close * @type {object} */ /** * Server connection pool caused an error * * @event Server#error * @type {Error} */ /** * Server destroyed was called * * @event Server#destroy * @type {Server} */ module.exports = Server; package/lib/topologies/shared.js000644 0000027503 3560116604 014025 0ustar00000000 000000 'use strict'; const os = require('os'); const f = require('util').format; const ReadPreference = require('./read_preference'); const Buffer = require('safe-buffer').Buffer; const TopologyType = require('../sdam/topology_description').TopologyType; /** * Emit event if it exists * @method */ function emitSDAMEvent(self, event, description) { if (self.listeners(event).length > 0) { self.emit(event, description); } } // Get package.json variable var driverVersion = require('../../package.json').version; var nodejsversion = f('Node.js %s, %s', process.version, os.endianness()); var type = os.type(); var name = process.platform; var architecture = process.arch; var release = os.release(); function createClientInfo(options) { // Build default client information var clientInfo = options.clientInfo ? clone(options.clientInfo) : { driver: { name: 'nodejs-core', version: driverVersion }, os: { type: type, name: name, architecture: architecture, version: release } }; // Is platform specified if (clientInfo.platform && clientInfo.platform.indexOf('mongodb-core') === -1) { clientInfo.platform = f('%s, mongodb-core: %s', clientInfo.platform, driverVersion); } else if (!clientInfo.platform) { clientInfo.platform = nodejsversion; } // Do we have an application specific string if (options.appname) { // Cut at 128 bytes var buffer = Buffer.from(options.appname); // Return the truncated appname var appname = buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname; // Add to the clientInfo clientInfo.application = { name: appname }; } return clientInfo; } function createCompressionInfo(options) { if (!options.compression || !options.compression.compressors) { return []; } // Check that all supplied compressors are valid options.compression.compressors.forEach(function(compressor) { if (compressor !== 'snappy' && compressor !== 'zlib') { throw new Error('compressors must be at least one of snappy or zlib'); } }); return options.compression.compressors; } function clone(object) { return JSON.parse(JSON.stringify(object)); } var getPreviousDescription = function(self) { if (!self.s.serverDescription) { self.s.serverDescription = { address: self.name, arbiters: [], hosts: [], passives: [], type: 'Unknown' }; } return self.s.serverDescription; }; var emitServerDescriptionChanged = function(self, description) { if (self.listeners('serverDescriptionChanged').length > 0) { // Emit the server description changed events self.emit('serverDescriptionChanged', { topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id, address: self.name, previousDescription: getPreviousDescription(self), newDescription: description }); self.s.serverDescription = description; } }; var getPreviousTopologyDescription = function(self) { if (!self.s.topologyDescription) { self.s.topologyDescription = { topologyType: 'Unknown', servers: [ { address: self.name, arbiters: [], hosts: [], passives: [], type: 'Unknown' } ] }; } return self.s.topologyDescription; }; var emitTopologyDescriptionChanged = function(self, description) { if (self.listeners('topologyDescriptionChanged').length > 0) { // Emit the server description changed events self.emit('topologyDescriptionChanged', { topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id, address: self.name, previousDescription: getPreviousTopologyDescription(self), newDescription: description }); self.s.serverDescription = description; } }; var changedIsMaster = function(self, currentIsmaster, ismaster) { var currentType = getTopologyType(self, currentIsmaster); var newType = getTopologyType(self, ismaster); if (newType !== currentType) return true; return false; }; var getTopologyType = function(self, ismaster) { if (!ismaster) { ismaster = self.ismaster; } if (!ismaster) return 'Unknown'; if (ismaster.ismaster && ismaster.msg === 'isdbgrid') return 'Mongos'; if (ismaster.ismaster && !ismaster.hosts) return 'Standalone'; if (ismaster.ismaster) return 'RSPrimary'; if (ismaster.secondary) return 'RSSecondary'; if (ismaster.arbiterOnly) return 'RSArbiter'; return 'Unknown'; }; var inquireServerState = function(self) { return function(callback) { if (self.s.state === 'destroyed') return; // Record response time var start = new Date().getTime(); // emitSDAMEvent emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: self.name }); // Attempt to execute ismaster command self.command('admin.$cmd', { ismaster: true }, { monitoring: true }, function(err, r) { if (!err) { // Legacy event sender self.emit('ismaster', r, self); // Calculate latencyMS var latencyMS = new Date().getTime() - start; // Server heart beat event emitSDAMEvent(self, 'serverHeartbeatSucceeded', { durationMS: latencyMS, reply: r.result, connectionId: self.name }); // Did the server change if (changedIsMaster(self, self.s.ismaster, r.result)) { // Emit server description changed if something listening emitServerDescriptionChanged(self, { address: self.name, arbiters: [], hosts: [], passives: [], type: !self.s.inTopology ? 'Standalone' : getTopologyType(self) }); } // Updat ismaster view self.s.ismaster = r.result; // Set server response time self.s.isMasterLatencyMS = latencyMS; } else { emitSDAMEvent(self, 'serverHeartbeatFailed', { durationMS: latencyMS, failure: err, connectionId: self.name }); } // Peforming an ismaster monitoring callback operation if (typeof callback === 'function') { return callback(err, r); } // Perform another sweep self.s.inquireServerStateTimeout = setTimeout(inquireServerState(self), self.s.haInterval); }); }; }; // // Clone the options var cloneOptions = function(options) { var opts = {}; for (var name in options) { opts[name] = options[name]; } return opts; }; function Interval(fn, time) { var timer = false; this.start = function() { if (!this.isRunning()) { timer = setInterval(fn, time); } return this; }; this.stop = function() { clearInterval(timer); timer = false; return this; }; this.isRunning = function() { return timer !== false; }; } function Timeout(fn, time) { var timer = false; this.start = function() { if (!this.isRunning()) { timer = setTimeout(fn, time); } return this; }; this.stop = function() { clearTimeout(timer); timer = false; return this; }; this.isRunning = function() { if (timer && timer._called) return false; return timer !== false; }; } function diff(previous, current) { // Difference document var diff = { servers: [] }; // Previous entry if (!previous) { previous = { servers: [] }; } // Check if we have any previous servers missing in the current ones for (var i = 0; i < previous.servers.length; i++) { var found = false; for (var j = 0; j < current.servers.length; j++) { if (current.servers[j].address.toLowerCase() === previous.servers[i].address.toLowerCase()) { found = true; break; } } if (!found) { // Add to the diff diff.servers.push({ address: previous.servers[i].address, from: previous.servers[i].type, to: 'Unknown' }); } } // Check if there are any severs that don't exist for (j = 0; j < current.servers.length; j++) { found = false; // Go over all the previous servers for (i = 0; i < previous.servers.length; i++) { if (previous.servers[i].address.toLowerCase() === current.servers[j].address.toLowerCase()) { found = true; break; } } // Add the server to the diff if (!found) { diff.servers.push({ address: current.servers[j].address, from: 'Unknown', to: current.servers[j].type }); } } // Got through all the servers for (i = 0; i < previous.servers.length; i++) { var prevServer = previous.servers[i]; // Go through all current servers for (j = 0; j < current.servers.length; j++) { var currServer = current.servers[j]; // Matching server if (prevServer.address.toLowerCase() === currServer.address.toLowerCase()) { // We had a change in state if (prevServer.type !== currServer.type) { diff.servers.push({ address: prevServer.address, from: prevServer.type, to: currServer.type }); } } } } // Return difference return diff; } /** * Shared function to determine clusterTime for a given topology * * @param {*} topology * @param {*} clusterTime */ function resolveClusterTime(topology, $clusterTime) { if (topology.clusterTime == null) { topology.clusterTime = $clusterTime; } else { if ($clusterTime.clusterTime.greaterThan(topology.clusterTime.clusterTime)) { topology.clusterTime = $clusterTime; } } } // NOTE: this is a temporary move until the topologies can be more formally refactored // to share code. const SessionMixins = { endSessions: function(sessions, callback) { if (!Array.isArray(sessions)) { sessions = [sessions]; } // TODO: // When connected to a sharded cluster the endSessions command // can be sent to any mongos. When connected to a replica set the // endSessions command MUST be sent to the primary if the primary // is available, otherwise it MUST be sent to any available secondary. // Is it enough to use: ReadPreference.primaryPreferred ? this.command( 'admin.$cmd', { endSessions: sessions }, { readPreference: ReadPreference.primaryPreferred }, () => { // intentionally ignored, per spec if (typeof callback === 'function') callback(); } ); } }; function topologyType(topology) { if (topology.description) { return topology.description.type; } if (topology.type === 'mongos') { return TopologyType.Sharded; } else if (topology.type === 'replset') { return TopologyType.ReplicaSetWithPrimary; } return TopologyType.Single; } const RETRYABLE_WIRE_VERSION = 6; /** * Determines whether the provided topology supports retryable writes * * @param {Mongos|Replset} topology */ const isRetryableWritesSupported = function(topology) { const maxWireVersion = topology.lastIsMaster().maxWireVersion; if (maxWireVersion < RETRYABLE_WIRE_VERSION) { return false; } if (!topology.logicalSessionTimeoutMinutes) { return false; } if (topologyType(topology) === TopologyType.Single) { return false; } return true; }; module.exports.SessionMixins = SessionMixins; module.exports.resolveClusterTime = resolveClusterTime; module.exports.inquireServerState = inquireServerState; module.exports.getTopologyType = getTopologyType; module.exports.emitServerDescriptionChanged = emitServerDescriptionChanged; module.exports.emitTopologyDescriptionChanged = emitTopologyDescriptionChanged; module.exports.cloneOptions = cloneOptions; module.exports.createClientInfo = createClientInfo; module.exports.createCompressionInfo = createCompressionInfo; module.exports.clone = clone; module.exports.diff = diff; module.exports.Interval = Interval; module.exports.Timeout = Timeout; module.exports.isRetryableWritesSupported = isRetryableWritesSupported; package/lib/transactions.js000644 0000011561 3560116604 013100 0ustar00000000 000000 'use strict'; const MongoError = require('./error').MongoError; let TxnState; let stateMachine; (() => { const NO_TRANSACTION = 'NO_TRANSACTION'; const STARTING_TRANSACTION = 'STARTING_TRANSACTION'; const TRANSACTION_IN_PROGRESS = 'TRANSACTION_IN_PROGRESS'; const TRANSACTION_COMMITTED = 'TRANSACTION_COMMITTED'; const TRANSACTION_COMMITTED_EMPTY = 'TRANSACTION_COMMITTED_EMPTY'; const TRANSACTION_ABORTED = 'TRANSACTION_ABORTED'; TxnState = { NO_TRANSACTION, STARTING_TRANSACTION, TRANSACTION_IN_PROGRESS, TRANSACTION_COMMITTED, TRANSACTION_COMMITTED_EMPTY, TRANSACTION_ABORTED }; stateMachine = { [NO_TRANSACTION]: [NO_TRANSACTION, STARTING_TRANSACTION], [STARTING_TRANSACTION]: [ TRANSACTION_IN_PROGRESS, TRANSACTION_COMMITTED, TRANSACTION_COMMITTED_EMPTY, TRANSACTION_ABORTED ], [TRANSACTION_IN_PROGRESS]: [ TRANSACTION_IN_PROGRESS, TRANSACTION_COMMITTED, TRANSACTION_ABORTED ], [TRANSACTION_COMMITTED]: [ TRANSACTION_COMMITTED, TRANSACTION_COMMITTED_EMPTY, STARTING_TRANSACTION, NO_TRANSACTION ], [TRANSACTION_ABORTED]: [STARTING_TRANSACTION, NO_TRANSACTION], [TRANSACTION_COMMITTED_EMPTY]: [TRANSACTION_COMMITTED_EMPTY, NO_TRANSACTION] }; })(); /** * The MongoDB ReadConcern, which allows for control of the consistency and isolation properties * of the data read from replica sets and replica set shards. * @typedef {Object} ReadConcern * @property {'local'|'available'|'majority'|'linearizable'|'snapshot'} level The readConcern Level * @see https://docs.mongodb.com/manual/reference/read-concern/ */ /** * A MongoDB WriteConcern, which describes the level of acknowledgement * requested from MongoDB for write operations. * @typedef {Object} WriteConcern * @property {number|'majority'|string} [w=1] requests acknowledgement that the write operation has * propagated to a specified number of mongod hosts * @property {boolean} [j=false] requests acknowledgement from MongoDB that the write operation has * been written to the journal * @property {number} [wtimeout] a time limit, in milliseconds, for the write concern * @see https://docs.mongodb.com/manual/reference/write-concern/ */ /** * Configuration options for a transaction. * @typedef {Object} TransactionOptions * @property {ReadConcern} [readConcern] A default read concern for commands in this transaction * @property {WriteConcern} [writeConcern] A default writeConcern for commands in this transaction * @property {ReadPreference} [readPreference] A default read preference for commands in this transaction */ /** * A class maintaining state related to a server transaction. Internal Only * @ignore */ class Transaction { /** * Create a transaction * * @ignore * @param {TransactionOptions} [options] Optional settings */ constructor(options) { options = options || {}; this.state = TxnState.NO_TRANSACTION; this.options = {}; if (options.writeConcern || typeof options.w !== 'undefined') { const w = options.writeConcern ? options.writeConcern.w : options.w; if (w <= 0) { throw new MongoError('Transactions do not support unacknowledged write concern'); } this.options.writeConcern = options.writeConcern ? options.writeConcern : { w: options.w }; } if (options.readConcern) this.options.readConcern = options.readConcern; if (options.readPreference) this.options.readPreference = options.readPreference; // TODO: This isn't technically necessary this._pinnedServer = undefined; this._recoveryToken = undefined; } get server() { return this._pinnedServer; } get recoveryToken() { return this._recoveryToken; } get isPinned() { return !!this.server; } /** * @ignore * @return Whether this session is presently in a transaction */ get isActive() { return ( [TxnState.STARTING_TRANSACTION, TxnState.TRANSACTION_IN_PROGRESS].indexOf(this.state) !== -1 ); } /** * Transition the transaction in the state machine * @ignore * @param {TxnState} state The new state to transition to */ transition(nextState) { const nextStates = stateMachine[this.state]; if (nextStates && nextStates.indexOf(nextState) !== -1) { this.state = nextState; if (this.state === TxnState.NO_TRANSACTION || this.state === TxnState.STARTING_TRANSACTION) { this.unpinServer(); } return; } throw new MongoError( `Attempted illegal state transition from [${this.state}] to [${nextState}]` ); } pinServer(server) { if (this.isActive) { this._pinnedServer = server; } } unpinServer() { this._pinnedServer = undefined; } } function isTransactionCommand(command) { return !!(command.commitTransaction || command.abortTransaction); } module.exports = { TxnState, Transaction, isTransactionCommand }; package/lib/uri_parser.js000644 0000047116 3560116604 012550 0ustar00000000 000000 'use strict'; const URL = require('url'); const qs = require('querystring'); const dns = require('dns'); const MongoParseError = require('./error').MongoParseError; const ReadPreference = require('./topologies/read_preference'); /** * The following regular expression validates a connection string and breaks the * provide string into the following capture groups: [protocol, username, password, hosts] */ const HOSTS_RX = /(mongodb(?:\+srv|)):\/\/(?: (?:[^:]*) (?: : ([^@]*) )? @ )?([^/?]*)(?:\/|)(.*)/; /** * Determines whether a provided address matches the provided parent domain in order * to avoid certain attack vectors. * * @param {String} srvAddress The address to check against a domain * @param {String} parentDomain The domain to check the provided address against * @return {Boolean} Whether the provided address matches the parent domain */ function matchesParentDomain(srvAddress, parentDomain) { const regex = /^.*?\./; const srv = `.${srvAddress.replace(regex, '')}`; const parent = `.${parentDomain.replace(regex, '')}`; return srv.endsWith(parent); } /** * Lookup a `mongodb+srv` connection string, combine the parts and reparse it as a normal * connection string. * * @param {string} uri The connection string to parse * @param {object} options Optional user provided connection string options * @param {function} callback */ function parseSrvConnectionString(uri, options, callback) { const result = URL.parse(uri, true); if (result.hostname.split('.').length < 3) { return callback(new MongoParseError('URI does not have hostname, domain name and tld')); } result.domainLength = result.hostname.split('.').length; if (result.pathname && result.pathname.match(',')) { return callback(new MongoParseError('Invalid URI, cannot contain multiple hostnames')); } if (result.port) { return callback(new MongoParseError(`Ports not accepted with '${PROTOCOL_MONGODB_SRV}' URIs`)); } // Resolve the SRV record and use the result as the list of hosts to connect to. const lookupAddress = result.host; dns.resolveSrv(`_mongodb._tcp.${lookupAddress}`, (err, addresses) => { if (err) return callback(err); if (addresses.length === 0) { return callback(new MongoParseError('No addresses found at host')); } for (let i = 0; i < addresses.length; i++) { if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) { return callback( new MongoParseError('Server record does not share hostname with parent URI') ); } } // Convert the original URL to a non-SRV URL. result.protocol = 'mongodb'; result.host = addresses.map(address => `${address.name}:${address.port}`).join(','); // Default to SSL true if it's not specified. if ( !('ssl' in options) && (!result.search || !('ssl' in result.query) || result.query.ssl === null) ) { result.query.ssl = true; } // Resolve TXT record and add options from there if they exist. dns.resolveTxt(lookupAddress, (err, record) => { if (err) { if (err.code !== 'ENODATA') { return callback(err); } record = null; } if (record) { if (record.length > 1) { return callback(new MongoParseError('Multiple text records not allowed')); } record = qs.parse(record[0].join('')); if (Object.keys(record).some(key => key !== 'authSource' && key !== 'replicaSet')) { return callback( new MongoParseError('Text record must only set `authSource` or `replicaSet`') ); } Object.assign(result.query, record); } // Set completed options back into the URL object. result.search = qs.stringify(result.query); const finalString = URL.format(result); parseConnectionString(finalString, options, callback); }); }); } /** * Parses a query string item according to the connection string spec * * @param {string} key The key for the parsed value * @param {Array|String} value The value to parse * @return {Array|Object|String} The parsed value */ function parseQueryStringItemValue(key, value) { if (Array.isArray(value)) { // deduplicate and simplify arrays value = value.filter((v, idx) => value.indexOf(v) === idx); if (value.length === 1) value = value[0]; } else if (STRING_OPTIONS.has(key)) { // TODO: refactor function to make this early return not // stand out return value; } else if (value.indexOf(':') > 0) { value = value.split(',').reduce((result, pair) => { const parts = pair.split(':'); result[parts[0]] = parseQueryStringItemValue(key, parts[1]); return result; }, {}); } else if (value.indexOf(',') > 0) { value = value.split(',').map(v => { return parseQueryStringItemValue(key, v); }); } else if (value.toLowerCase() === 'true' || value.toLowerCase() === 'false') { value = value.toLowerCase() === 'true'; } else if (!Number.isNaN(value)) { const numericValue = parseFloat(value); if (!Number.isNaN(numericValue)) { value = parseFloat(value); } } return value; } // Options that are known boolean types const BOOLEAN_OPTIONS = new Set([ 'slaveok', 'slave_ok', 'sslvalidate', 'fsync', 'safe', 'retrywrites', 'j' ]); // Known string options // TODO: Do this for more types const STRING_OPTIONS = new Set(['authsource', 'replicaset', 'appname']); // Supported text representations of auth mechanisms // NOTE: this list exists in native already, if it is merged here we should deduplicate const AUTH_MECHANISMS = new Set([ 'GSSAPI', 'MONGODB-X509', 'MONGODB-CR', 'DEFAULT', 'SCRAM-SHA-1', 'SCRAM-SHA-256', 'PLAIN' ]); // Lookup table used to translate normalized (lower-cased) forms of connection string // options to their expected camelCase version const CASE_TRANSLATION = { replicaset: 'replicaSet', connecttimeoutms: 'connectTimeoutMS', sockettimeoutms: 'socketTimeoutMS', maxpoolsize: 'maxPoolSize', minpoolsize: 'minPoolSize', maxidletimems: 'maxIdleTimeMS', waitqueuemultiple: 'waitQueueMultiple', waitqueuetimeoutms: 'waitQueueTimeoutMS', wtimeoutms: 'wtimeoutMS', readconcern: 'readConcern', readconcernlevel: 'readConcernLevel', readpreference: 'readPreference', maxstalenessseconds: 'maxStalenessSeconds', readpreferencetags: 'readPreferenceTags', authsource: 'authSource', authmechanism: 'authMechanism', authmechanismproperties: 'authMechanismProperties', gssapiservicename: 'gssapiServiceName', localthresholdms: 'localThresholdMS', serverselectiontimeoutms: 'serverSelectionTimeoutMS', serverselectiontryonce: 'serverSelectionTryOnce', heartbeatfrequencyms: 'heartbeatFrequencyMS', retrywrites: 'retryWrites', uuidrepresentation: 'uuidRepresentation', zlibcompressionlevel: 'zlibCompressionLevel', tlsallowinvalidcertificates: 'tlsAllowInvalidCertificates', tlsallowinvalidhostnames: 'tlsAllowInvalidHostnames', tlsinsecure: 'tlsInsecure', tlscafile: 'tlsCAFile', tlscertificatekeyfile: 'tlsCertificateKeyFile', tlscertificatekeyfilepassword: 'tlsCertificateKeyFilePassword', wtimeout: 'wTimeoutMS', j: 'journal' }; /** * Sets the value for `key`, allowing for any required translation * * @param {object} obj The object to set the key on * @param {string} key The key to set the value for * @param {*} value The value to set * @param {object} options The options used for option parsing */ function applyConnectionStringOption(obj, key, value, options) { // simple key translation if (key === 'journal') { key = 'j'; } else if (key === 'wtimeoutms') { key = 'wtimeout'; } // more complicated translation if (BOOLEAN_OPTIONS.has(key)) { value = value === 'true' || value === true; } else if (key === 'appname') { value = decodeURIComponent(value); } else if (key === 'readconcernlevel') { obj['readConcernLevel'] = value; key = 'readconcern'; value = { level: value }; } // simple validation if (key === 'compressors') { value = Array.isArray(value) ? value : [value]; if (!value.every(c => c === 'snappy' || c === 'zlib')) { throw new MongoParseError( 'Value for `compressors` must be at least one of: `snappy`, `zlib`' ); } } if (key === 'authmechanism' && !AUTH_MECHANISMS.has(value)) { throw new MongoParseError( 'Value for `authMechanism` must be one of: `DEFAULT`, `GSSAPI`, `PLAIN`, `MONGODB-X509`, `SCRAM-SHA-1`, `SCRAM-SHA-256`' ); } if (key === 'readpreference' && !ReadPreference.isValid(value)) { throw new MongoParseError( 'Value for `readPreference` must be one of: `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred`, `nearest`' ); } if (key === 'zlibcompressionlevel' && (value < -1 || value > 9)) { throw new MongoParseError('zlibCompressionLevel must be an integer between -1 and 9'); } // special cases if (key === 'compressors' || key === 'zlibcompressionlevel') { obj.compression = obj.compression || {}; obj = obj.compression; } if (key === 'authmechanismproperties') { if (typeof value.SERVICE_NAME === 'string') obj.gssapiServiceName = value.SERVICE_NAME; if (typeof value.SERVICE_REALM === 'string') obj.gssapiServiceRealm = value.SERVICE_REALM; if (typeof value.CANONICALIZE_HOST_NAME !== 'undefined') { obj.gssapiCanonicalizeHostName = value.CANONICALIZE_HOST_NAME; } } if (key === 'readpreferencetags' && Array.isArray(value)) { value = splitArrayOfMultipleReadPreferenceTags(value); } // set the actual value if (options.caseTranslate && CASE_TRANSLATION[key]) { obj[CASE_TRANSLATION[key]] = value; return; } obj[key] = value; } const USERNAME_REQUIRED_MECHANISMS = new Set([ 'GSSAPI', 'MONGODB-CR', 'PLAIN', 'SCRAM-SHA-1', 'SCRAM-SHA-256' ]); function splitArrayOfMultipleReadPreferenceTags(value) { const parsedTags = []; for (let i = 0; i < value.length; i++) { parsedTags[i] = {}; value[i].split(',').forEach(individualTag => { const splitTag = individualTag.split(':'); parsedTags[i][splitTag[0]] = splitTag[1]; }); } return parsedTags; } /** * Modifies the parsed connection string object taking into account expectations we * have for authentication-related options. * * @param {object} parsed The parsed connection string result * @return The parsed connection string result possibly modified for auth expectations */ function applyAuthExpectations(parsed) { if (parsed.options == null) { return; } const options = parsed.options; const authSource = options.authsource || options.authSource; if (authSource != null) { parsed.auth = Object.assign({}, parsed.auth, { db: authSource }); } const authMechanism = options.authmechanism || options.authMechanism; if (authMechanism != null) { if ( USERNAME_REQUIRED_MECHANISMS.has(authMechanism) && (!parsed.auth || parsed.auth.username == null) ) { throw new MongoParseError(`Username required for mechanism \`${authMechanism}\``); } if (authMechanism === 'GSSAPI') { if (authSource != null && authSource !== '$external') { throw new MongoParseError( `Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.` ); } parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } if (authMechanism === 'MONGODB-X509') { if (parsed.auth && parsed.auth.password != null) { throw new MongoParseError(`Password not allowed for mechanism \`${authMechanism}\``); } if (authSource != null && authSource !== '$external') { throw new MongoParseError( `Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.` ); } parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } if (authMechanism === 'PLAIN') { if (parsed.auth && parsed.auth.db == null) { parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } } } // default to `admin` if nothing else was resolved if (parsed.auth && parsed.auth.db == null) { parsed.auth = Object.assign({}, parsed.auth, { db: 'admin' }); } return parsed; } /** * Parses a query string according the connection string spec. * * @param {String} query The query string to parse * @param {object} [options] The options used for options parsing * @return {Object|Error} The parsed query string as an object, or an error if one was encountered */ function parseQueryString(query, options) { const result = {}; let parsedQueryString = qs.parse(query); checkTLSOptions(parsedQueryString); for (const key in parsedQueryString) { const value = parsedQueryString[key]; if (value === '' || value == null) { throw new MongoParseError('Incomplete key value pair for option'); } const normalizedKey = key.toLowerCase(); const parsedValue = parseQueryStringItemValue(normalizedKey, value); applyConnectionStringOption(result, normalizedKey, parsedValue, options); } // special cases for known deprecated options if (result.wtimeout && result.wtimeoutms) { delete result.wtimeout; console.warn('Unsupported option `wtimeout` specified'); } return Object.keys(result).length ? result : null; } /** * Checks a query string for invalid tls options according to the URI options spec. * * @param {string} queryString The query string to check * @throws {MongoParseError} */ function checkTLSOptions(queryString) { const queryStringKeys = Object.keys(queryString); if ( queryStringKeys.indexOf('tlsInsecure') !== -1 && (queryStringKeys.indexOf('tlsAllowInvalidCertificates') !== -1 || queryStringKeys.indexOf('tlsAllowInvalidHostnames') !== -1) ) { throw new MongoParseError( 'The `tlsInsecure` option cannot be used with `tlsAllowInvalidCertificates` or `tlsAllowInvalidHostnames`.' ); } const tlsValue = assertTlsOptionsAreEqual('tls', queryString, queryStringKeys); const sslValue = assertTlsOptionsAreEqual('ssl', queryString, queryStringKeys); if (tlsValue != null && sslValue != null) { if (tlsValue !== sslValue) { throw new MongoParseError('All values of `tls` and `ssl` must be the same.'); } } } /** * Checks a query string to ensure all tls/ssl options are the same. * * @param {string} key The key (tls or ssl) to check * @param {string} queryString The query string to check * @throws {MongoParseError} * @return The value of the tls/ssl option */ function assertTlsOptionsAreEqual(optionName, queryString, queryStringKeys) { const queryStringHasTLSOption = queryStringKeys.indexOf(optionName) !== -1; let optionValue; if (Array.isArray(queryString[optionName])) { optionValue = queryString[optionName][0]; } else { optionValue = queryString[optionName]; } if (queryStringHasTLSOption) { if (Array.isArray(queryString[optionName])) { const firstValue = queryString[optionName][0]; queryString[optionName].forEach(tlsValue => { if (tlsValue !== firstValue) { throw new MongoParseError('All values of ${optionName} must be the same.'); } }); } } return optionValue; } const PROTOCOL_MONGODB = 'mongodb'; const PROTOCOL_MONGODB_SRV = 'mongodb+srv'; const SUPPORTED_PROTOCOLS = [PROTOCOL_MONGODB, PROTOCOL_MONGODB_SRV]; /** * Parses a MongoDB connection string * * @param {*} uri the MongoDB connection string to parse * @param {object} [options] Optional settings. * @param {boolean} [options.caseTranslate] Whether the parser should translate options back into camelCase after normalization * @param {parseCallback} callback */ function parseConnectionString(uri, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, { caseTranslate: true }, options); // Check for bad uris before we parse try { URL.parse(uri); } catch (e) { return callback(new MongoParseError('URI malformed, cannot be parsed')); } const cap = uri.match(HOSTS_RX); if (!cap) { return callback(new MongoParseError('Invalid connection string')); } const protocol = cap[1]; if (SUPPORTED_PROTOCOLS.indexOf(protocol) === -1) { return callback(new MongoParseError('Invalid protocol provided')); } if (protocol === PROTOCOL_MONGODB_SRV) { return parseSrvConnectionString(uri, options, callback); } const dbAndQuery = cap[4].split('?'); const db = dbAndQuery.length > 0 ? dbAndQuery[0] : null; const query = dbAndQuery.length > 1 ? dbAndQuery[1] : null; let parsedOptions; try { parsedOptions = parseQueryString(query, options); } catch (parseError) { return callback(parseError); } parsedOptions = Object.assign({}, parsedOptions, options); const auth = { username: null, password: null, db: db && db !== '' ? qs.unescape(db) : null }; if (parsedOptions.auth) { // maintain support for legacy options passed into `MongoClient` if (parsedOptions.auth.username) auth.username = parsedOptions.auth.username; if (parsedOptions.auth.user) auth.username = parsedOptions.auth.user; if (parsedOptions.auth.password) auth.password = parsedOptions.auth.password; } if (cap[4].split('?')[0].indexOf('@') !== -1) { return callback(new MongoParseError('Unescaped slash in userinfo section')); } const authorityParts = cap[3].split('@'); if (authorityParts.length > 2) { return callback(new MongoParseError('Unescaped at-sign in authority section')); } if (authorityParts.length > 1) { const authParts = authorityParts.shift().split(':'); if (authParts.length > 2) { return callback(new MongoParseError('Unescaped colon in authority section')); } auth.username = qs.unescape(authParts[0]); auth.password = authParts[1] ? qs.unescape(authParts[1]) : null; } let hostParsingError = null; const hosts = authorityParts .shift() .split(',') .map(host => { let parsedHost = URL.parse(`mongodb://${host}`); if (parsedHost.path === '/:') { hostParsingError = new MongoParseError('Double colon in host identifier'); return null; } // heuristically determine if we're working with a domain socket if (host.match(/\.sock/)) { parsedHost.hostname = qs.unescape(host); parsedHost.port = null; } if (Number.isNaN(parsedHost.port)) { hostParsingError = new MongoParseError('Invalid port (non-numeric string)'); return; } const result = { host: parsedHost.hostname, port: parsedHost.port ? parseInt(parsedHost.port) : 27017 }; if (result.port === 0) { hostParsingError = new MongoParseError('Invalid port (zero) with hostname'); return; } if (result.port > 65535) { hostParsingError = new MongoParseError('Invalid port (larger than 65535) with hostname'); return; } if (result.port < 0) { hostParsingError = new MongoParseError('Invalid port (negative number)'); return; } return result; }) .filter(host => !!host); if (hostParsingError) { return callback(hostParsingError); } if (hosts.length === 0 || hosts[0].host === '' || hosts[0].host === null) { return callback(new MongoParseError('No hostname or hostnames provided in connection string')); } const result = { hosts: hosts, auth: auth.db || auth.username ? auth : null, options: Object.keys(parsedOptions).length ? parsedOptions : null }; if (result.auth && result.auth.db) { result.defaultDatabase = result.auth.db; } try { applyAuthExpectations(result); } catch (authError) { return callback(authError); } callback(null, result); } module.exports = parseConnectionString; package/lib/utils.js000644 0000006163 3560116604 011532 0ustar00000000 000000 'use strict'; const crypto = require('crypto'); const requireOptional = require('require_optional'); /** * Generate a UUIDv4 */ const uuidV4 = () => { const result = crypto.randomBytes(16); result[6] = (result[6] & 0x0f) | 0x40; result[8] = (result[8] & 0x3f) | 0x80; return result; }; /** * Returns the duration calculated from two high resolution timers in milliseconds * * @param {Object} started A high resolution timestamp created from `process.hrtime()` * @returns {Number} The duration in milliseconds */ const calculateDurationInMs = started => { const hrtime = process.hrtime(started); return (hrtime[0] * 1e9 + hrtime[1]) / 1e6; }; /** * Relays events for a given listener and emitter * * @param {EventEmitter} listener the EventEmitter to listen to the events from * @param {EventEmitter} emitter the EventEmitter to relay the events to */ function relayEvents(listener, emitter, events) { events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event))); } function retrieveKerberos() { let kerberos; try { kerberos = requireOptional('kerberos'); } catch (err) { if (err.code === 'MODULE_NOT_FOUND') { throw new Error('The `kerberos` module was not found. Please install it and try again.'); } throw err; } return kerberos; } // Throw an error if an attempt to use EJSON is made when it is not installed const noEJSONError = function() { throw new Error('The `mongodb-extjson` module was not found. Please install it and try again.'); }; // Facilitate loading EJSON optionally function retrieveEJSON() { let EJSON = null; try { EJSON = requireOptional('mongodb-extjson'); } catch (error) {} // eslint-disable-line if (!EJSON) { EJSON = { parse: noEJSONError, deserialize: noEJSONError, serialize: noEJSONError, stringify: noEJSONError, setBSONModule: noEJSONError, BSON: noEJSONError }; } return EJSON; } /** * A helper function for determining `maxWireVersion` between legacy and new topology * instances * * @private * @param {(Topology|Server)} topologyOrServer */ function maxWireVersion(topologyOrServer) { if (topologyOrServer.ismaster) { return topologyOrServer.ismaster.maxWireVersion; } if (topologyOrServer.description) { return topologyOrServer.description.maxWireVersion; } return null; } /* * Checks that collation is supported by server. * * @param {Server} [server] to check against * @param {object} [cmd] object where collation may be specified * @param {function} [callback] callback function * @return true if server does not support collation */ function collationNotSupported(server, cmd) { return cmd && cmd.collation && maxWireVersion(server) < 5; } /** * Checks if a given value is a Promise * * @param {*} maybePromise * @return true if the provided value is a Promise */ function isPromiseLike(maybePromise) { return maybePromise && typeof maybePromise.then === 'function'; } module.exports = { uuidV4, calculateDurationInMs, relayEvents, collationNotSupported, retrieveEJSON, retrieveKerberos, maxWireVersion, isPromiseLike }; package/lib/wireprotocol/command.js000644 0000006414 3560116604 014537 0ustar00000000 000000 'use strict'; const Query = require('../connection/commands').Query; const Msg = require('../connection/msg').Msg; const MongoError = require('../error').MongoError; const getReadPreference = require('./shared').getReadPreference; const isSharded = require('./shared').isSharded; const databaseNamespace = require('./shared').databaseNamespace; const isTransactionCommand = require('../transactions').isTransactionCommand; const applySession = require('../sessions').applySession; function command(server, ns, cmd, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; if (cmd == null) { return callback(new MongoError(`command ${JSON.stringify(cmd)} does not return a cursor`)); } const bson = server.s.bson; const pool = server.s.pool; const readPreference = getReadPreference(cmd, options); const shouldUseOpMsg = supportsOpMsg(server); const session = options.session; let clusterTime = server.clusterTime; let finalCmd = Object.assign({}, cmd); if (hasSessionSupport(server) && session) { if ( session.clusterTime && session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime) ) { clusterTime = session.clusterTime; } const err = applySession(session, finalCmd, options); if (err) { return callback(err); } } // if we have a known cluster time, gossip it if (clusterTime) { finalCmd.$clusterTime = clusterTime; } if ( isSharded(server) && !shouldUseOpMsg && readPreference && readPreference.preference !== 'primary' ) { finalCmd = { $query: finalCmd, $readPreference: readPreference.toJSON() }; } const commandOptions = Object.assign( { command: true, numberToSkip: 0, numberToReturn: -1, checkKeys: false }, options ); // This value is not overridable commandOptions.slaveOk = readPreference.slaveOk(); const cmdNs = `${databaseNamespace(ns)}.$cmd`; const message = shouldUseOpMsg ? new Msg(bson, cmdNs, finalCmd, commandOptions) : new Query(bson, cmdNs, finalCmd, commandOptions); const inTransaction = session && (session.inTransaction() || isTransactionCommand(finalCmd)); const commandResponseHandler = inTransaction ? function(err) { if ( !cmd.commitTransaction && err && err instanceof MongoError && err.hasErrorLabel('TransientTransactionError') ) { session.transaction.unpinServer(); } return callback.apply(null, arguments); } : callback; try { pool.write(message, commandOptions, commandResponseHandler); } catch (err) { commandResponseHandler(err); } } function hasSessionSupport(topology) { if (topology == null) return false; if (topology.description) { return topology.description.maxWireVersion >= 6; } return topology.ismaster == null ? false : topology.ismaster.maxWireVersion >= 6; } function supportsOpMsg(topologyOrServer) { const description = topologyOrServer.ismaster ? topologyOrServer.ismaster : topologyOrServer.description; if (description == null) { return false; } return description.maxWireVersion >= 6 && description.__nodejs_mock_server__ == null; } module.exports = command; package/lib/wireprotocol/compression.js000644 0000003445 3560116604 015463 0ustar00000000 000000 'use strict'; var Snappy = require('../connection/utils').retrieveSnappy(), zlib = require('zlib'); var compressorIDs = { snappy: 1, zlib: 2 }; var uncompressibleCommands = [ 'ismaster', 'saslStart', 'saslContinue', 'getnonce', 'authenticate', 'createUser', 'updateUser', 'copydbSaslStart', 'copydbgetnonce', 'copydb' ]; // Facilitate compressing a message using an agreed compressor var compress = function(self, dataToBeCompressed, callback) { switch (self.options.agreedCompressor) { case 'snappy': Snappy.compress(dataToBeCompressed, callback); break; case 'zlib': // Determine zlibCompressionLevel var zlibOptions = {}; if (self.options.zlibCompressionLevel) { zlibOptions.level = self.options.zlibCompressionLevel; } zlib.deflate(dataToBeCompressed, zlibOptions, callback); break; default: throw new Error( 'Attempt to compress message using unknown compressor "' + self.options.agreedCompressor + '".' ); } }; // Decompress a message using the given compressor var decompress = function(compressorID, compressedData, callback) { if (compressorID < 0 || compressorID > compressorIDs.length) { throw new Error( 'Server sent message compressed using an unsupported compressor. (Received compressor ID ' + compressorID + ')' ); } switch (compressorID) { case compressorIDs.snappy: Snappy.uncompress(compressedData, callback); break; case compressorIDs.zlib: zlib.inflate(compressedData, callback); break; default: callback(null, compressedData); } }; module.exports = { compressorIDs: compressorIDs, uncompressibleCommands: uncompressibleCommands, compress: compress, decompress: decompress }; package/lib/wireprotocol/constants.js000644 0000000505 3560116604 015130 0ustar00000000 000000 'use strict'; const MIN_SUPPORTED_SERVER_VERSION = '2.6'; const MAX_SUPPORTED_SERVER_VERSION = '4.2'; const MIN_SUPPORTED_WIRE_VERSION = 2; const MAX_SUPPORTED_WIRE_VERSION = 8; module.exports = { MIN_SUPPORTED_SERVER_VERSION, MAX_SUPPORTED_SERVER_VERSION, MIN_SUPPORTED_WIRE_VERSION, MAX_SUPPORTED_WIRE_VERSION }; package/lib/wireprotocol/get_more.js000644 0000005235 3560116604 014722 0ustar00000000 000000 'use strict'; const GetMore = require('../connection/commands').GetMore; const retrieveBSON = require('../connection/utils').retrieveBSON; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const BSON = retrieveBSON(); const Long = BSON.Long; const collectionNamespace = require('./shared').collectionNamespace; const maxWireVersion = require('../utils').maxWireVersion; const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions; const command = require('./command'); function getMore(server, ns, cursorState, batchSize, options, callback) { options = options || {}; const wireVersion = maxWireVersion(server); function queryCallback(err, result) { if (err) return callback(err); const response = result.message; // If we have a timed out query or a cursor that was killed if (response.cursorNotFound) { return callback(new MongoNetworkError('cursor killed or timed out'), null); } if (wireVersion < 4) { const cursorId = typeof response.cursorId === 'number' ? Long.fromNumber(response.cursorId) : response.cursorId; cursorState.documents = response.documents; cursorState.cursorId = cursorId; callback(null, null, response.connection); return; } // We have an error detected if (response.documents[0].ok === 0) { return callback(new MongoError(response.documents[0])); } // Ensure we have a Long valid cursor id const cursorId = typeof response.documents[0].cursor.id === 'number' ? Long.fromNumber(response.documents[0].cursor.id) : response.documents[0].cursor.id; cursorState.documents = response.documents[0].cursor.nextBatch; cursorState.cursorId = cursorId; callback(null, response.documents[0], response.connection); } if (wireVersion < 4) { const bson = server.s.bson; const getMoreOp = new GetMore(bson, ns, cursorState.cursorId, { numberToReturn: batchSize }); const queryOptions = applyCommonQueryOptions({}, cursorState); server.s.pool.write(getMoreOp, queryOptions, queryCallback); return; } const getMoreCmd = { getMore: cursorState.cursorId, collection: collectionNamespace(ns), batchSize: Math.abs(batchSize) }; if (cursorState.cmd.tailable && typeof cursorState.cmd.maxAwaitTimeMS === 'number') { getMoreCmd.maxTimeMS = cursorState.cmd.maxAwaitTimeMS; } const commandOptions = Object.assign( { returnFieldSelector: null, documentsReturnedIn: 'nextBatch' }, options ); command(server, ns, getMoreCmd, commandOptions, queryCallback); } module.exports = getMore; package/lib/wireprotocol/index.js000644 0000001221 3560116604 014217 0ustar00000000 000000 'use strict'; const writeCommand = require('./write_command'); module.exports = { insert: function insert(server, ns, ops, options, callback) { writeCommand(server, 'insert', 'documents', ns, ops, options, callback); }, update: function update(server, ns, ops, options, callback) { writeCommand(server, 'update', 'updates', ns, ops, options, callback); }, remove: function remove(server, ns, ops, options, callback) { writeCommand(server, 'delete', 'deletes', ns, ops, options, callback); }, killCursors: require('./kill_cursors'), getMore: require('./get_more'), query: require('./query'), command: require('./command') }; package/lib/wireprotocol/kill_cursors.js000644 0000003622 3560116604 015632 0ustar00000000 000000 'use strict'; const KillCursor = require('../connection/commands').KillCursor; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const collectionNamespace = require('./shared').collectionNamespace; const maxWireVersion = require('../utils').maxWireVersion; const command = require('./command'); function killCursors(server, ns, cursorState, callback) { callback = typeof callback === 'function' ? callback : () => {}; const cursorId = cursorState.cursorId; if (maxWireVersion(server) < 4) { const bson = server.s.bson; const pool = server.s.pool; const killCursor = new KillCursor(bson, ns, [cursorId]); const options = { immediateRelease: true, noResponse: true }; if (typeof cursorState.session === 'object') { options.session = cursorState.session; } if (pool && pool.isConnected()) { try { pool.write(killCursor, options, callback); } catch (err) { if (typeof callback === 'function') { callback(err, null); } else { console.warn(err); } } } return; } const killCursorCmd = { killCursors: collectionNamespace(ns), cursors: [cursorId] }; const options = {}; if (typeof cursorState.session === 'object') options.session = cursorState.session; command(server, ns, killCursorCmd, options, (err, result) => { if (err) { return callback(err); } const response = result.message; if (response.cursorNotFound) { return callback(new MongoNetworkError('cursor killed or timed out'), null); } if (!Array.isArray(response.documents) || response.documents.length === 0) { return callback( new MongoError(`invalid killCursors result returned for cursor id ${cursorId}`) ); } callback(null, response.documents[0]); }); } module.exports = killCursors; package/lib/wireprotocol/query.js000644 0000016452 3560116604 014271 0ustar00000000 000000 'use strict'; const Query = require('../connection/commands').Query; const MongoError = require('../error').MongoError; const getReadPreference = require('./shared').getReadPreference; const collectionNamespace = require('./shared').collectionNamespace; const isSharded = require('./shared').isSharded; const maxWireVersion = require('../utils').maxWireVersion; const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions; const command = require('./command'); function query(server, ns, cmd, cursorState, options, callback) { options = options || {}; if (cursorState.cursorId != null) { return callback(); } if (cmd == null) { return callback(new MongoError(`command ${JSON.stringify(cmd)} does not return a cursor`)); } if (maxWireVersion(server) < 4) { const query = prepareLegacyFindQuery(server, ns, cmd, cursorState, options); const queryOptions = applyCommonQueryOptions({}, cursorState); if (typeof query.documentsReturnedIn === 'string') { queryOptions.documentsReturnedIn = query.documentsReturnedIn; } server.s.pool.write(query, queryOptions, callback); return; } const readPreference = getReadPreference(cmd, options); const findCmd = prepareFindCommand(server, ns, cmd, cursorState, options); // NOTE: This actually modifies the passed in cmd, and our code _depends_ on this // side-effect. Change this ASAP cmd.virtual = false; const commandOptions = Object.assign( { documentsReturnedIn: 'firstBatch', numberToReturn: 1, slaveOk: readPreference.slaveOk() }, options ); if (cmd.readPreference) commandOptions.readPreference = readPreference; command(server, ns, findCmd, commandOptions, callback); } function prepareFindCommand(server, ns, cmd, cursorState) { cursorState.batchSize = cmd.batchSize || cursorState.batchSize; let findCmd = { find: collectionNamespace(ns) }; if (cmd.query) { if (cmd.query['$query']) { findCmd.filter = cmd.query['$query']; } else { findCmd.filter = cmd.query; } } let sortValue = cmd.sort; if (Array.isArray(sortValue)) { const sortObject = {}; if (sortValue.length > 0 && !Array.isArray(sortValue[0])) { let sortDirection = sortValue[1]; if (sortDirection === 'asc') { sortDirection = 1; } else if (sortDirection === 'desc') { sortDirection = -1; } sortObject[sortValue[0]] = sortDirection; } else { for (let i = 0; i < sortValue.length; i++) { let sortDirection = sortValue[i][1]; if (sortDirection === 'asc') { sortDirection = 1; } else if (sortDirection === 'desc') { sortDirection = -1; } sortObject[sortValue[i][0]] = sortDirection; } } sortValue = sortObject; } if (cmd.sort) findCmd.sort = sortValue; if (cmd.fields) findCmd.projection = cmd.fields; if (cmd.hint) findCmd.hint = cmd.hint; if (cmd.skip) findCmd.skip = cmd.skip; if (cmd.limit) findCmd.limit = cmd.limit; if (cmd.limit < 0) { findCmd.limit = Math.abs(cmd.limit); findCmd.singleBatch = true; } if (typeof cmd.batchSize === 'number') { if (cmd.batchSize < 0) { if (cmd.limit !== 0 && Math.abs(cmd.batchSize) < Math.abs(cmd.limit)) { findCmd.limit = Math.abs(cmd.batchSize); } findCmd.singleBatch = true; } findCmd.batchSize = Math.abs(cmd.batchSize); } if (cmd.comment) findCmd.comment = cmd.comment; if (cmd.maxScan) findCmd.maxScan = cmd.maxScan; if (cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS; if (cmd.min) findCmd.min = cmd.min; if (cmd.max) findCmd.max = cmd.max; findCmd.returnKey = cmd.returnKey ? cmd.returnKey : false; findCmd.showRecordId = cmd.showDiskLoc ? cmd.showDiskLoc : false; if (cmd.snapshot) findCmd.snapshot = cmd.snapshot; if (cmd.tailable) findCmd.tailable = cmd.tailable; if (cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay; if (cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout; if (cmd.awaitData) findCmd.awaitData = cmd.awaitData; if (cmd.awaitdata) findCmd.awaitData = cmd.awaitdata; if (cmd.partial) findCmd.partial = cmd.partial; if (cmd.collation) findCmd.collation = cmd.collation; if (cmd.readConcern) findCmd.readConcern = cmd.readConcern; // If we have explain, we need to rewrite the find command // to wrap it in the explain command if (cmd.explain) { findCmd = { explain: findCmd }; } return findCmd; } function prepareLegacyFindQuery(server, ns, cmd, cursorState, options) { options = options || {}; const bson = server.s.bson; const readPreference = getReadPreference(cmd, options); cursorState.batchSize = cmd.batchSize || cursorState.batchSize; let numberToReturn = 0; if ( cursorState.limit < 0 || (cursorState.limit !== 0 && cursorState.limit < cursorState.batchSize) || (cursorState.limit > 0 && cursorState.batchSize === 0) ) { numberToReturn = cursorState.limit; } else { numberToReturn = cursorState.batchSize; } const numberToSkip = cursorState.skip || 0; const findCmd = {}; if (isSharded(server) && readPreference) { findCmd['$readPreference'] = readPreference.toJSON(); } if (cmd.sort) findCmd['$orderby'] = cmd.sort; if (cmd.hint) findCmd['$hint'] = cmd.hint; if (cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot; if (typeof cmd.returnKey !== 'undefined') findCmd['$returnKey'] = cmd.returnKey; if (cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan; if (cmd.min) findCmd['$min'] = cmd.min; if (cmd.max) findCmd['$max'] = cmd.max; if (typeof cmd.showDiskLoc !== 'undefined') findCmd['$showDiskLoc'] = cmd.showDiskLoc; if (cmd.comment) findCmd['$comment'] = cmd.comment; if (cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS; if (cmd.explain) { // nToReturn must be 0 (match all) or negative (match N and close cursor) // nToReturn > 0 will give explain results equivalent to limit(0) numberToReturn = -Math.abs(cmd.limit || 0); findCmd['$explain'] = true; } findCmd['$query'] = cmd.query; if (cmd.readConcern && cmd.readConcern.level !== 'local') { throw new MongoError( `server find command does not support a readConcern level of ${cmd.readConcern.level}` ); } if (cmd.readConcern) { cmd = Object.assign({}, cmd); delete cmd['readConcern']; } const serializeFunctions = typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false; const ignoreUndefined = typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false; const query = new Query(bson, ns, findCmd, { numberToSkip: numberToSkip, numberToReturn: numberToReturn, pre32Limit: typeof cmd.limit !== 'undefined' ? cmd.limit : undefined, checkKeys: false, returnFieldSelector: cmd.fields, serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined }); if (typeof cmd.tailable === 'boolean') query.tailable = cmd.tailable; if (typeof cmd.oplogReplay === 'boolean') query.oplogReplay = cmd.oplogReplay; if (typeof cmd.noCursorTimeout === 'boolean') query.noCursorTimeout = cmd.noCursorTimeout; if (typeof cmd.awaitData === 'boolean') query.awaitData = cmd.awaitData; if (typeof cmd.partial === 'boolean') query.partial = cmd.partial; query.slaveOk = readPreference.slaveOk(); return query; } module.exports = query; package/lib/wireprotocol/shared.js000644 0000006702 3560116604 014367 0ustar00000000 000000 'use strict'; const ReadPreference = require('../topologies/read_preference'); const MongoError = require('../error').MongoError; const ServerType = require('../sdam/server_description').ServerType; const TopologyDescription = require('../sdam/topology_description').TopologyDescription; const MESSAGE_HEADER_SIZE = 16; const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID // OPCODE Numbers // Defined at https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#request-opcodes var opcodes = { OP_REPLY: 1, OP_UPDATE: 2001, OP_INSERT: 2002, OP_QUERY: 2004, OP_GETMORE: 2005, OP_DELETE: 2006, OP_KILL_CURSORS: 2007, OP_COMPRESSED: 2012, OP_MSG: 2013 }; var getReadPreference = function(cmd, options) { // Default to command version of the readPreference var readPreference = cmd.readPreference || new ReadPreference('primary'); // If we have an option readPreference override the command one if (options.readPreference) { readPreference = options.readPreference; } if (typeof readPreference === 'string') { readPreference = new ReadPreference(readPreference); } if (!(readPreference instanceof ReadPreference)) { throw new MongoError('read preference must be a ReadPreference instance'); } return readPreference; }; // Parses the header of a wire protocol message var parseHeader = function(message) { return { length: message.readInt32LE(0), requestId: message.readInt32LE(4), responseTo: message.readInt32LE(8), opCode: message.readInt32LE(12) }; }; function applyCommonQueryOptions(queryOptions, options) { Object.assign(queryOptions, { raw: typeof options.raw === 'boolean' ? options.raw : false, promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true, promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true, promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false, monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : false, fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false }); if (typeof options.socketTimeout === 'number') { queryOptions.socketTimeout = options.socketTimeout; } if (options.session) { queryOptions.session = options.session; } if (typeof options.documentsReturnedIn === 'string') { queryOptions.documentsReturnedIn = options.documentsReturnedIn; } return queryOptions; } function isSharded(topologyOrServer) { if (topologyOrServer.type === 'mongos') return true; if (topologyOrServer.description && topologyOrServer.description.type === ServerType.Mongos) { return true; } // NOTE: This is incredibly inefficient, and should be removed once command construction // happens based on `Server` not `Topology`. if (topologyOrServer.description && topologyOrServer.description instanceof TopologyDescription) { const servers = Array.from(topologyOrServer.description.servers.values()); return servers.some(server => server.type === ServerType.Mongos); } return false; } function databaseNamespace(ns) { return ns.split('.')[0]; } function collectionNamespace(ns) { return ns .split('.') .slice(1) .join('.'); } module.exports = { getReadPreference, MESSAGE_HEADER_SIZE, COMPRESSION_DETAILS_SIZE, opcodes, parseHeader, applyCommonQueryOptions, isSharded, databaseNamespace, collectionNamespace }; package/lib/wireprotocol/write_command.js000644 0000002616 3560116604 015751 0ustar00000000 000000 'use strict'; const MongoError = require('../error').MongoError; const collectionNamespace = require('./shared').collectionNamespace; const command = require('./command'); function writeCommand(server, type, opsField, ns, ops, options, callback) { if (ops.length === 0) throw new MongoError(`${type} must contain at least one document`); if (typeof options === 'function') { callback = options; options = {}; } options = options || {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const writeConcern = options.writeConcern; const writeCommand = {}; writeCommand[type] = collectionNamespace(ns); writeCommand[opsField] = ops; writeCommand.ordered = ordered; if (writeConcern && Object.keys(writeConcern).length > 0) { writeCommand.writeConcern = writeConcern; } if (options.collation) { for (let i = 0; i < writeCommand[opsField].length; i++) { if (!writeCommand[opsField][i].collation) { writeCommand[opsField][i].collation = options.collation; } } } if (options.bypassDocumentValidation === true) { writeCommand.bypassDocumentValidation = options.bypassDocumentValidation; } const commandOptions = Object.assign( { checkKeys: type === 'insert', numberToReturn: 1 }, options ); command(server, ns, writeCommand, commandOptions, callback); } module.exports = writeCommand;